diff options
Diffstat (limited to 'fs')
-rw-r--r-- | fs/cifs/Makefile | 4 | ||||
-rw-r--r-- | fs/cifs/cifs_debug.c | 72 | ||||
-rw-r--r-- | fs/cifs/cifsacl.c | 2 | ||||
-rw-r--r-- | fs/cifs/cifsencrypt.c | 6 | ||||
-rw-r--r-- | fs/cifs/cifsfs.c | 53 | ||||
-rw-r--r-- | fs/cifs/cifsfs.h | 4 | ||||
-rw-r--r-- | fs/cifs/cifsglob.h | 135 | ||||
-rw-r--r-- | fs/cifs/cifsproto.h | 10 | ||||
-rw-r--r-- | fs/cifs/cifssmb.c | 477 | ||||
-rw-r--r-- | fs/cifs/connect.c | 299 | ||||
-rw-r--r-- | fs/cifs/dfs_cache.c | 8 | ||||
-rw-r--r-- | fs/cifs/dir.c | 8 | ||||
-rw-r--r-- | fs/cifs/file.c | 283 | ||||
-rw-r--r-- | fs/cifs/inode.c | 64 | ||||
-rw-r--r-- | fs/cifs/ioctl.c | 2 | ||||
-rw-r--r-- | fs/cifs/link.c | 8 | ||||
-rw-r--r-- | fs/cifs/misc.c | 33 | ||||
-rw-r--r-- | fs/cifs/netmisc.c | 2 | ||||
-rw-r--r-- | fs/cifs/sess.c | 5 | ||||
-rw-r--r-- | fs/cifs/smb1ops.c | 10 | ||||
-rw-r--r-- | fs/cifs/smb2misc.c | 49 | ||||
-rw-r--r-- | fs/cifs/smb2ops.c | 61 | ||||
-rw-r--r-- | fs/cifs/smb2pdu.c | 32 | ||||
-rw-r--r-- | fs/cifs/smb2transport.c | 38 | ||||
-rw-r--r-- | fs/cifs/transport.c | 334 | ||||
-rw-r--r-- | fs/cifs/xattr.c | 5 |
26 files changed, 1099 insertions, 905 deletions
diff --git a/fs/cifs/Makefile b/fs/cifs/Makefile index 8c9f2c00be72..e882e912a517 100644 --- a/fs/cifs/Makefile +++ b/fs/cifs/Makefile @@ -5,7 +5,7 @@ ccflags-y += -I$(src) # needed for trace events obj-$(CONFIG_CIFS) += cifs.o -cifs-y := trace.o cifsfs.o cifssmb.o cifs_debug.o connect.o dir.o file.o \ +cifs-y := trace.o cifsfs.o cifs_debug.o connect.o dir.o file.o \ inode.o link.o misc.o netmisc.o smbencrypt.o transport.o \ cifs_unicode.o nterr.o cifsencrypt.o \ readdir.o ioctl.o sess.o export.o unc.o winucase.o \ @@ -31,4 +31,4 @@ cifs-$(CONFIG_CIFS_SMB_DIRECT) += smbdirect.o cifs-$(CONFIG_CIFS_ROOT) += cifsroot.o -cifs-$(CONFIG_CIFS_ALLOW_INSECURE_LEGACY) += smb1ops.o +cifs-$(CONFIG_CIFS_ALLOW_INSECURE_LEGACY) += smb1ops.o cifssmb.o diff --git a/fs/cifs/cifs_debug.c b/fs/cifs/cifs_debug.c index 2cfbac8bb965..11fd85de7217 100644 --- a/fs/cifs/cifs_debug.c +++ b/fs/cifs/cifs_debug.c @@ -36,7 +36,7 @@ cifs_dump_mem(char *label, void *data, int length) void cifs_dump_detail(void *buf, struct TCP_Server_Info *server) { #ifdef CONFIG_CIFS_DEBUG2 - struct smb_hdr *smb = (struct smb_hdr *)buf; + struct smb_hdr *smb = buf; cifs_dbg(VFS, "Cmd: %d Err: 0x%x Flags: 0x%x Flgs2: 0x%x Mid: %d Pid: %d\n", smb->Command, smb->Status.CifsError, @@ -55,7 +55,7 @@ void cifs_dump_mids(struct TCP_Server_Info *server) return; cifs_dbg(VFS, "Dump pending requests:\n"); - spin_lock(&GlobalMid_Lock); + spin_lock(&server->mid_lock); list_for_each_entry(mid_entry, &server->pending_mid_q, qhead) { cifs_dbg(VFS, "State: %d Cmd: %d Pid: %d Cbdata: %p Mid %llu\n", mid_entry->mid_state, @@ -78,7 +78,7 @@ void cifs_dump_mids(struct TCP_Server_Info *server) mid_entry->resp_buf, 62); } } - spin_unlock(&GlobalMid_Lock); + spin_unlock(&server->mid_lock); #endif /* CONFIG_CIFS_DEBUG2 */ } @@ -168,7 +168,6 @@ cifs_dump_iface(struct seq_file *m, struct cifs_server_iface *iface) static int cifs_debug_files_proc_show(struct seq_file *m, void *v) { - struct list_head *tmp, *tmp1, *tmp2; struct TCP_Server_Info *server; struct cifs_ses *ses; struct cifs_tcon *tcon; @@ -184,14 +183,10 @@ static int cifs_debug_files_proc_show(struct seq_file *m, void *v) #endif /* CIFS_DEBUG2 */ spin_lock(&cifs_tcp_ses_lock); list_for_each_entry(server, &cifs_tcp_ses_list, tcp_ses_list) { - list_for_each(tmp, &server->smb_ses_list) { - ses = list_entry(tmp, struct cifs_ses, smb_ses_list); - list_for_each(tmp1, &ses->tcon_list) { - tcon = list_entry(tmp1, struct cifs_tcon, tcon_list); + list_for_each_entry(ses, &server->smb_ses_list, smb_ses_list) { + list_for_each_entry(tcon, &ses->tcon_list, tcon_list) { spin_lock(&tcon->open_file_lock); - list_for_each(tmp2, &tcon->openFileList) { - cfile = list_entry(tmp2, struct cifsFileInfo, - tlist); + list_for_each_entry(cfile, &tcon->openFileList, tlist) { seq_printf(m, "0x%x 0x%llx 0x%x %d %d %d %pd", tcon->tid, @@ -218,7 +213,6 @@ static int cifs_debug_files_proc_show(struct seq_file *m, void *v) static int cifs_debug_data_proc_show(struct seq_file *m, void *v) { - struct list_head *tmp2, *tmp3; struct mid_q_entry *mid_entry; struct TCP_Server_Info *server; struct cifs_ses *ses; @@ -381,9 +375,7 @@ skip_rdma: seq_printf(m, "\n\n\tSessions: "); i = 0; - list_for_each(tmp2, &server->smb_ses_list) { - ses = list_entry(tmp2, struct cifs_ses, - smb_ses_list); + list_for_each_entry(ses, &server->smb_ses_list, smb_ses_list) { i++; if ((ses->serverDomain == NULL) || (ses->serverOS == NULL) || @@ -447,9 +439,7 @@ skip_rdma: else seq_puts(m, "none\n"); - list_for_each(tmp3, &ses->tcon_list) { - tcon = list_entry(tmp3, struct cifs_tcon, - tcon_list); + list_for_each_entry(tcon, &ses->tcon_list, tcon_list) { ++j; seq_printf(m, "\n\t%d) ", j); cifs_debug_tcon(m, tcon); @@ -473,10 +463,8 @@ skip_rdma: seq_printf(m, "\n\t\t[NONE]"); seq_puts(m, "\n\n\tMIDs: "); - spin_lock(&GlobalMid_Lock); - list_for_each(tmp3, &server->pending_mid_q) { - mid_entry = list_entry(tmp3, struct mid_q_entry, - qhead); + spin_lock(&server->mid_lock); + list_for_each_entry(mid_entry, &server->pending_mid_q, qhead) { seq_printf(m, "\n\tState: %d com: %d pid:" " %d cbdata: %p mid %llu\n", mid_entry->mid_state, @@ -485,7 +473,7 @@ skip_rdma: mid_entry->callback_data, mid_entry->mid); } - spin_unlock(&GlobalMid_Lock); + spin_unlock(&server->mid_lock); seq_printf(m, "\n--\n"); } if (c == 0) @@ -504,7 +492,6 @@ static ssize_t cifs_stats_proc_write(struct file *file, { bool bv; int rc; - struct list_head *tmp1, *tmp2, *tmp3; struct TCP_Server_Info *server; struct cifs_ses *ses; struct cifs_tcon *tcon; @@ -514,8 +501,8 @@ static ssize_t cifs_stats_proc_write(struct file *file, #ifdef CONFIG_CIFS_STATS2 int i; - atomic_set(&totBufAllocCount, 0); - atomic_set(&totSmBufAllocCount, 0); + atomic_set(&total_buf_alloc_count, 0); + atomic_set(&total_small_buf_alloc_count, 0); #endif /* CONFIG_CIFS_STATS2 */ atomic_set(&tcpSesReconnectCount, 0); atomic_set(&tconInfoReconnectCount, 0); @@ -525,9 +512,7 @@ static ssize_t cifs_stats_proc_write(struct file *file, GlobalCurrentXid = 0; spin_unlock(&GlobalMid_Lock); spin_lock(&cifs_tcp_ses_lock); - list_for_each(tmp1, &cifs_tcp_ses_list) { - server = list_entry(tmp1, struct TCP_Server_Info, - tcp_ses_list); + list_for_each_entry(server, &cifs_tcp_ses_list, tcp_ses_list) { server->max_in_flight = 0; #ifdef CONFIG_CIFS_STATS2 for (i = 0; i < NUMBER_OF_SMB2_COMMANDS; i++) { @@ -538,13 +523,8 @@ static ssize_t cifs_stats_proc_write(struct file *file, server->fastest_cmd[0] = 0; } #endif /* CONFIG_CIFS_STATS2 */ - list_for_each(tmp2, &server->smb_ses_list) { - ses = list_entry(tmp2, struct cifs_ses, - smb_ses_list); - list_for_each(tmp3, &ses->tcon_list) { - tcon = list_entry(tmp3, - struct cifs_tcon, - tcon_list); + list_for_each_entry(ses, &server->smb_ses_list, smb_ses_list) { + list_for_each_entry(tcon, &ses->tcon_list, tcon_list) { atomic_set(&tcon->num_smbs_sent, 0); spin_lock(&tcon->stat_lock); tcon->bytes_read = 0; @@ -569,7 +549,6 @@ static int cifs_stats_proc_show(struct seq_file *m, void *v) #ifdef CONFIG_CIFS_STATS2 int j; #endif /* STATS2 */ - struct list_head *tmp2, *tmp3; struct TCP_Server_Info *server; struct cifs_ses *ses; struct cifs_tcon *tcon; @@ -579,17 +558,17 @@ static int cifs_stats_proc_show(struct seq_file *m, void *v) seq_printf(m, "Share (unique mount targets): %d\n", tconInfoAllocCount.counter); seq_printf(m, "SMB Request/Response Buffer: %d Pool size: %d\n", - bufAllocCount.counter, + buf_alloc_count.counter, cifs_min_rcv + tcpSesAllocCount.counter); seq_printf(m, "SMB Small Req/Resp Buffer: %d Pool size: %d\n", - smBufAllocCount.counter, cifs_min_small); + small_buf_alloc_count.counter, cifs_min_small); #ifdef CONFIG_CIFS_STATS2 seq_printf(m, "Total Large %d Small %d Allocations\n", - atomic_read(&totBufAllocCount), - atomic_read(&totSmBufAllocCount)); + atomic_read(&total_buf_alloc_count), + atomic_read(&total_small_buf_alloc_count)); #endif /* CONFIG_CIFS_STATS2 */ - seq_printf(m, "Operations (MIDs): %d\n", atomic_read(&midCount)); + seq_printf(m, "Operations (MIDs): %d\n", atomic_read(&mid_count)); seq_printf(m, "\n%d session %d share reconnects\n", tcpSesReconnectCount.counter, tconInfoReconnectCount.counter); @@ -619,13 +598,8 @@ static int cifs_stats_proc_show(struct seq_file *m, void *v) atomic_read(&server->smb2slowcmd[j]), server->hostname, j); #endif /* STATS2 */ - list_for_each(tmp2, &server->smb_ses_list) { - ses = list_entry(tmp2, struct cifs_ses, - smb_ses_list); - list_for_each(tmp3, &ses->tcon_list) { - tcon = list_entry(tmp3, - struct cifs_tcon, - tcon_list); + list_for_each_entry(ses, &server->smb_ses_list, smb_ses_list) { + list_for_each_entry(tcon, &ses->tcon_list, tcon_list) { i++; seq_printf(m, "\n%d) %s", i, tcon->treeName); if (tcon->need_reconnect) diff --git a/fs/cifs/cifsacl.c b/fs/cifs/cifsacl.c index bf861fef2f0c..fa480d62f313 100644 --- a/fs/cifs/cifsacl.c +++ b/fs/cifs/cifsacl.c @@ -1379,6 +1379,7 @@ chown_chgrp_exit: return rc; } +#ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY struct cifs_ntsd *get_cifs_acl_by_fid(struct cifs_sb_info *cifs_sb, const struct cifs_fid *cifsfid, u32 *pacllen, u32 __maybe_unused unused) @@ -1512,6 +1513,7 @@ out: cifs_put_tlink(tlink); return rc; } +#endif /* CONFIG_CIFS_ALLOW_INSECURE_LEGACY */ /* Translate the CIFS ACL (similar to NTFS ACL) for a file into mode bits */ int diff --git a/fs/cifs/cifsencrypt.c b/fs/cifs/cifsencrypt.c index 663cb9db4908..8f7835ccbca1 100644 --- a/fs/cifs/cifsencrypt.c +++ b/fs/cifs/cifsencrypt.c @@ -141,13 +141,13 @@ int cifs_sign_rqst(struct smb_rqst *rqst, struct TCP_Server_Info *server, if ((cifs_pdu == NULL) || (server == NULL)) return -EINVAL; - spin_lock(&cifs_tcp_ses_lock); + spin_lock(&server->srv_lock); if (!(cifs_pdu->Flags2 & SMBFLG2_SECURITY_SIGNATURE) || server->tcpStatus == CifsNeedNegotiate) { - spin_unlock(&cifs_tcp_ses_lock); + spin_unlock(&server->srv_lock); return rc; } - spin_unlock(&cifs_tcp_ses_lock); + spin_unlock(&server->srv_lock); if (!server->session_estab) { memcpy(cifs_pdu->Signature.SecuritySignature, "BSRSPYL", 8); diff --git a/fs/cifs/cifsfs.c b/fs/cifs/cifsfs.c index 8f2e003e0590..8849f0852110 100644 --- a/fs/cifs/cifsfs.c +++ b/fs/cifs/cifsfs.c @@ -68,6 +68,34 @@ bool enable_negotiate_signing; /* false by default */ unsigned int global_secflags = CIFSSEC_DEF; /* unsigned int ntlmv2_support = 0; */ unsigned int sign_CIFS_PDUs = 1; + +/* + * Global transaction id (XID) information + */ +unsigned int GlobalCurrentXid; /* protected by GlobalMid_Sem */ +unsigned int GlobalTotalActiveXid; /* prot by GlobalMid_Sem */ +unsigned int GlobalMaxActiveXid; /* prot by GlobalMid_Sem */ +spinlock_t GlobalMid_Lock; /* protects above & list operations on midQ entries */ + +/* + * Global counters, updated atomically + */ +atomic_t sesInfoAllocCount; +atomic_t tconInfoAllocCount; +atomic_t tcpSesNextId; +atomic_t tcpSesAllocCount; +atomic_t tcpSesReconnectCount; +atomic_t tconInfoReconnectCount; + +atomic_t mid_count; +atomic_t buf_alloc_count; +atomic_t small_buf_alloc_count; +#ifdef CONFIG_CIFS_STATS2 +atomic_t total_buf_alloc_count; +atomic_t total_small_buf_alloc_count; +#endif/* STATS2 */ +struct list_head cifs_tcp_ses_list; +spinlock_t cifs_tcp_ses_lock; static const struct super_operations cifs_super_ops; unsigned int CIFSMaxBufSize = CIFS_MAX_MSGSIZE; module_param(CIFSMaxBufSize, uint, 0444); @@ -703,14 +731,17 @@ static void cifs_umount_begin(struct super_block *sb) tcon = cifs_sb_master_tcon(cifs_sb); spin_lock(&cifs_tcp_ses_lock); + spin_lock(&tcon->tc_lock); if ((tcon->tc_count > 1) || (tcon->status == TID_EXITING)) { /* we have other mounts to same share or we have already tried to force umount this and woken up all waiting network requests, nothing to do */ + spin_unlock(&tcon->tc_lock); spin_unlock(&cifs_tcp_ses_lock); return; } else if (tcon->tc_count == 1) tcon->status = TID_EXITING; + spin_unlock(&tcon->tc_lock); spin_unlock(&cifs_tcp_ses_lock); /* cancel_brl_requests(tcon); */ /* BB mark all brl mids as exiting */ @@ -1537,8 +1568,7 @@ cifs_destroy_request_bufs(void) kmem_cache_destroy(cifs_sm_req_cachep); } -static int -cifs_init_mids(void) +static int init_mids(void) { cifs_mid_cachep = kmem_cache_create("cifs_mpx_ids", sizeof(struct mid_q_entry), 0, @@ -1556,8 +1586,7 @@ cifs_init_mids(void) return 0; } -static void -cifs_destroy_mids(void) +static void destroy_mids(void) { mempool_destroy(cifs_mid_poolp); kmem_cache_destroy(cifs_mid_cachep); @@ -1579,11 +1608,11 @@ init_cifs(void) atomic_set(&tcpSesReconnectCount, 0); atomic_set(&tconInfoReconnectCount, 0); - atomic_set(&bufAllocCount, 0); - atomic_set(&smBufAllocCount, 0); + atomic_set(&buf_alloc_count, 0); + atomic_set(&small_buf_alloc_count, 0); #ifdef CONFIG_CIFS_STATS2 - atomic_set(&totBufAllocCount, 0); - atomic_set(&totSmBufAllocCount, 0); + atomic_set(&total_buf_alloc_count, 0); + atomic_set(&total_small_buf_alloc_count, 0); if (slow_rsp_threshold < 1) cifs_dbg(FYI, "slow_response_threshold msgs disabled\n"); else if (slow_rsp_threshold > 32767) @@ -1591,7 +1620,7 @@ init_cifs(void) "slow response threshold set higher than recommended (0 to 32767)\n"); #endif /* CONFIG_CIFS_STATS2 */ - atomic_set(&midCount, 0); + atomic_set(&mid_count, 0); GlobalCurrentXid = 0; GlobalTotalActiveXid = 0; GlobalMaxActiveXid = 0; @@ -1654,7 +1683,7 @@ init_cifs(void) if (rc) goto out_destroy_deferredclose_wq; - rc = cifs_init_mids(); + rc = init_mids(); if (rc) goto out_destroy_inodecache; @@ -1711,7 +1740,7 @@ out_destroy_request_bufs: #endif cifs_destroy_request_bufs(); out_destroy_mids: - cifs_destroy_mids(); + destroy_mids(); out_destroy_inodecache: cifs_destroy_inodecache(); out_destroy_deferredclose_wq: @@ -1747,7 +1776,7 @@ exit_cifs(void) dfs_cache_destroy(); #endif cifs_destroy_request_bufs(); - cifs_destroy_mids(); + destroy_mids(); cifs_destroy_inodecache(); destroy_workqueue(deferredclose_wq); destroy_workqueue(cifsoplockd_wq); diff --git a/fs/cifs/cifsfs.h b/fs/cifs/cifsfs.h index b17be47a8e59..81f4c15936d0 100644 --- a/fs/cifs/cifsfs.h +++ b/fs/cifs/cifsfs.h @@ -153,6 +153,6 @@ extern const struct export_operations cifs_export_ops; #endif /* CONFIG_CIFS_NFSD_EXPORT */ /* when changing internal version - update following two lines at same time */ -#define SMB3_PRODUCT_BUILD 37 -#define CIFS_VERSION "2.37" +#define SMB3_PRODUCT_BUILD 38 +#define CIFS_VERSION "2.38" #endif /* _CIFSFS_H */ diff --git a/fs/cifs/cifsglob.h b/fs/cifs/cifsglob.h index a643c84ff1e9..3070407cafa7 100644 --- a/fs/cifs/cifsglob.h +++ b/fs/cifs/cifsglob.h @@ -605,6 +605,7 @@ inc_rfc1001_len(void *buf, int count) struct TCP_Server_Info { struct list_head tcp_ses_list; struct list_head smb_ses_list; + spinlock_t srv_lock; /* protect anything here that is not protected */ __u64 conn_id; /* connection identifier (useful for debugging) */ int srv_count; /* reference counter */ /* 15 character server name + 0x20 16th byte indicating type = srv */ @@ -622,6 +623,7 @@ struct TCP_Server_Info { #endif wait_queue_head_t response_q; wait_queue_head_t request_q; /* if more than maxmpx to srvr must block*/ + spinlock_t mid_lock; /* protect mid queue and it's entries */ struct list_head pending_mid_q; bool noblocksnd; /* use blocking sendmsg */ bool noautotune; /* do not autotune send buf sizes */ @@ -1008,6 +1010,7 @@ struct cifs_ses { struct list_head rlist; /* reconnect list */ struct list_head tcon_list; struct cifs_tcon *tcon_ipc; + spinlock_t ses_lock; /* protect anything here that is not protected */ struct mutex session_mutex; struct TCP_Server_Info *server; /* pointer to server info */ int ses_count; /* reference counter */ @@ -1169,6 +1172,7 @@ struct cifs_tcon { struct list_head tcon_list; int tc_count; struct list_head rlist; /* reconnect list */ + spinlock_t tc_lock; /* protect anything here that is not protected */ atomic_t num_local_opens; /* num of all opens including disconnected */ atomic_t num_remote_opens; /* num of all network opens on server */ struct list_head openFileList; @@ -1899,33 +1903,78 @@ require use of the stronger protocol */ */ /**************************************************************************** - * Locking notes. All updates to global variables and lists should be - * protected by spinlocks or semaphores. + * Here are all the locks (spinlock, mutex, semaphore) in cifs.ko, arranged according + * to the locking order. i.e. if two locks are to be held together, the lock that + * appears higher in this list needs to be taken before the other. * - * Spinlocks - * --------- - * GlobalMid_Lock protects: - * list operations on pending_mid_q and oplockQ - * updates to XID counters, multiplex id and SMB sequence numbers - * list operations on global DnotifyReqList - * updates to ses->status and TCP_Server_Info->tcpStatus - * updates to server->CurrentMid - * tcp_ses_lock protects: - * list operations on tcp and SMB session lists - * tcon->open_file_lock protects the list of open files hanging off the tcon - * inode->open_file_lock protects the openFileList hanging off the inode - * cfile->file_info_lock protects counters and fields in cifs file struct - * f_owner.lock protects certain per file struct operations - * mapping->page_lock protects certain per page operations + * If you hold a lock that is lower in this list, and you need to take a higher lock + * (or if you think that one of the functions that you're calling may need to), first + * drop the lock you hold, pick up the higher lock, then the lower one. This will + * ensure that locks are picked up only in one direction in the below table + * (top to bottom). * - * Note that the cifs_tcon.open_file_lock should be taken before - * not after the cifsInodeInfo.open_file_lock + * Also, if you expect a function to be called with a lock held, explicitly document + * this in the comments on top of your function definition. * - * Semaphores - * ---------- - * cifsInodeInfo->lock_sem protects: - * the list of locks held by the inode + * And also, try to keep the critical sections (lock hold time) to be as minimal as + * possible. Blocking / calling other functions with a lock held always increase + * the risk of a possible deadlock. * + * Following this rule will avoid unnecessary deadlocks, which can get really hard to + * debug. Also, any new lock that you introduce, please add to this list in the correct + * order. + * + * Please populate this list whenever you introduce new locks in your changes. Or in + * case I've missed some existing locks. Please ensure that it's added in the list + * based on the locking order expected. + * + * ===================================================================================== + * Lock Protects Initialization fn + * ===================================================================================== + * vol_list_lock + * vol_info->ctx_lock vol_info->ctx + * cifs_sb_info->tlink_tree_lock cifs_sb_info->tlink_tree cifs_setup_cifs_sb + * TCP_Server_Info-> TCP_Server_Info cifs_get_tcp_session + * reconnect_mutex + * TCP_Server_Info->srv_mutex TCP_Server_Info cifs_get_tcp_session + * cifs_ses->session_mutex cifs_ses sesInfoAlloc + * cifs_tcon + * cifs_tcon->open_file_lock cifs_tcon->openFileList tconInfoAlloc + * cifs_tcon->pending_opens + * cifs_tcon->stat_lock cifs_tcon->bytes_read tconInfoAlloc + * cifs_tcon->bytes_written + * cifs_tcp_ses_lock cifs_tcp_ses_list sesInfoAlloc + * GlobalMid_Lock GlobalMaxActiveXid init_cifs + * GlobalCurrentXid + * GlobalTotalActiveXid + * TCP_Server_Info->srv_lock (anything in struct not protected by another lock and can change) + * TCP_Server_Info->mid_lock TCP_Server_Info->pending_mid_q cifs_get_tcp_session + * ->CurrentMid + * (any changes in mid_q_entry fields) + * TCP_Server_Info->req_lock TCP_Server_Info->in_flight cifs_get_tcp_session + * ->credits + * ->echo_credits + * ->oplock_credits + * ->reconnect_instance + * cifs_ses->ses_lock (anything that is not protected by another lock and can change) + * cifs_ses->iface_lock cifs_ses->iface_list sesInfoAlloc + * ->iface_count + * ->iface_last_update + * cifs_ses->chan_lock cifs_ses->chans + * ->chans_need_reconnect + * ->chans_in_reconnect + * cifs_tcon->tc_lock (anything that is not protected by another lock and can change) + * cifsInodeInfo->open_file_lock cifsInodeInfo->openFileList cifs_alloc_inode + * cifsInodeInfo->writers_lock cifsInodeInfo->writers cifsInodeInfo_alloc + * cifsInodeInfo->lock_sem cifsInodeInfo->llist cifs_init_once + * ->can_cache_brlcks + * cifsInodeInfo->deferred_lock cifsInodeInfo->deferred_closes cifsInodeInfo_alloc + * cached_fid->fid_mutex cifs_tcon->crfid tconInfoAlloc + * cifsFileInfo->fh_mutex cifsFileInfo cifs_new_fileinfo + * cifsFileInfo->file_info_lock cifsFileInfo->count cifs_new_fileinfo + * ->invalidHandle initiate_cifs_search + * ->oplock_break_cancelled + * cifs_aio_ctx->aio_mutex cifs_aio_ctx cifs_aio_ctx_alloc ****************************************************************************/ #ifdef DECLARE_GLOBALS_HERE @@ -1941,47 +1990,44 @@ require use of the stronger protocol */ * sessions (and from that the tree connections) can be found * by iterating over cifs_tcp_ses_list */ -GLOBAL_EXTERN struct list_head cifs_tcp_ses_list; +extern struct list_head cifs_tcp_ses_list; /* * This lock protects the cifs_tcp_ses_list, the list of smb sessions per * tcp session, and the list of tcon's per smb session. It also protects - * the reference counters for the server, smb session, and tcon. It also - * protects some fields in the TCP_Server_Info struct such as dstaddr. Finally, - * changes to the tcon->tidStatus should be done while holding this lock. + * the reference counters for the server, smb session, and tcon. * generally the locks should be taken in order tcp_ses_lock before * tcon->open_file_lock and that before file->file_info_lock since the * structure order is cifs_socket-->cifs_ses-->cifs_tcon-->cifs_file */ -GLOBAL_EXTERN spinlock_t cifs_tcp_ses_lock; +extern spinlock_t cifs_tcp_ses_lock; /* * Global transaction id (XID) information */ -GLOBAL_EXTERN unsigned int GlobalCurrentXid; /* protected by GlobalMid_Sem */ -GLOBAL_EXTERN unsigned int GlobalTotalActiveXid; /* prot by GlobalMid_Sem */ -GLOBAL_EXTERN unsigned int GlobalMaxActiveXid; /* prot by GlobalMid_Sem */ -GLOBAL_EXTERN spinlock_t GlobalMid_Lock; /* protects above & list operations */ - /* on midQ entries */ +extern unsigned int GlobalCurrentXid; /* protected by GlobalMid_Sem */ +extern unsigned int GlobalTotalActiveXid; /* prot by GlobalMid_Sem */ +extern unsigned int GlobalMaxActiveXid; /* prot by GlobalMid_Sem */ +extern spinlock_t GlobalMid_Lock; /* protects above & list operations on midQ entries */ + /* * Global counters, updated atomically */ -GLOBAL_EXTERN atomic_t sesInfoAllocCount; -GLOBAL_EXTERN atomic_t tconInfoAllocCount; -GLOBAL_EXTERN atomic_t tcpSesNextId; -GLOBAL_EXTERN atomic_t tcpSesAllocCount; -GLOBAL_EXTERN atomic_t tcpSesReconnectCount; -GLOBAL_EXTERN atomic_t tconInfoReconnectCount; +extern atomic_t sesInfoAllocCount; +extern atomic_t tconInfoAllocCount; +extern atomic_t tcpSesNextId; +extern atomic_t tcpSesAllocCount; +extern atomic_t tcpSesReconnectCount; +extern atomic_t tconInfoReconnectCount; /* Various Debug counters */ -GLOBAL_EXTERN atomic_t bufAllocCount; /* current number allocated */ +extern atomic_t buf_alloc_count; /* current number allocated */ +extern atomic_t small_buf_alloc_count; #ifdef CONFIG_CIFS_STATS2 -GLOBAL_EXTERN atomic_t totBufAllocCount; /* total allocated over all time */ -GLOBAL_EXTERN atomic_t totSmBufAllocCount; +extern atomic_t total_buf_alloc_count; /* total allocated over all time */ +extern atomic_t total_small_buf_alloc_count; extern unsigned int slow_rsp_threshold; /* number of secs before logging */ #endif -GLOBAL_EXTERN atomic_t smBufAllocCount; -GLOBAL_EXTERN atomic_t midCount; /* Misc globals */ extern bool enable_oplocks; /* enable or disable oplocks */ @@ -1998,6 +2044,7 @@ extern unsigned int cifs_min_rcv; /* min size of big ntwrk buf pool */ extern unsigned int cifs_min_small; /* min size of small buf pool */ extern unsigned int cifs_max_pending; /* MAX requests at once to server*/ extern bool disable_legacy_dialects; /* forbid vers=1.0 and vers=2.0 mounts */ +extern atomic_t mid_count; void cifs_oplock_break(struct work_struct *work); void cifs_queue_oplock_break(struct cifsFileInfo *cfile); diff --git a/fs/cifs/cifsproto.h b/fs/cifs/cifsproto.h index d59aebefa71c..daaadffa2b88 100644 --- a/fs/cifs/cifsproto.h +++ b/fs/cifs/cifsproto.h @@ -78,12 +78,8 @@ extern char *build_wildcard_path_from_dentry(struct dentry *direntry); extern char *cifs_compose_mount_options(const char *sb_mountdata, const char *fullpath, const struct dfs_info3_param *ref, char **devname); -/* extern void renew_parental_timestamps(struct dentry *direntry);*/ -extern struct mid_q_entry *AllocMidQEntry(const struct smb_hdr *smb_buffer, - struct TCP_Server_Info *server); -extern void DeleteMidQEntry(struct mid_q_entry *midEntry); -extern void cifs_delete_mid(struct mid_q_entry *mid); -extern void cifs_mid_q_entry_release(struct mid_q_entry *midEntry); +extern void delete_mid(struct mid_q_entry *mid); +extern void release_mid(struct mid_q_entry *mid); extern void cifs_wake_up_task(struct mid_q_entry *mid); extern int cifs_handle_standard(struct TCP_Server_Info *server, struct mid_q_entry *mid); @@ -521,6 +517,7 @@ extern int generate_smb30signingkey(struct cifs_ses *ses, extern int generate_smb311signingkey(struct cifs_ses *ses, struct TCP_Server_Info *server); +#ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY extern int CIFSSMBCopy(unsigned int xid, struct cifs_tcon *source_tcon, const char *fromName, @@ -551,6 +548,7 @@ extern int CIFSSMBSetPosixACL(const unsigned int xid, struct cifs_tcon *tcon, const struct nls_table *nls_codepage, int remap_special_chars); extern int CIFSGetExtAttr(const unsigned int xid, struct cifs_tcon *tcon, const int netfid, __u64 *pExtAttrBits, __u64 *pMask); +#endif /* CIFS_ALLOW_INSECURE_LEGACY */ extern void cifs_autodisable_serverino(struct cifs_sb_info *cifs_sb); extern bool couldbe_mf_symlink(const struct cifs_fattr *fattr); extern int check_mf_symlink(unsigned int xid, struct cifs_tcon *tcon, diff --git a/fs/cifs/cifssmb.c b/fs/cifs/cifssmb.c index 6371b9eebdad..7aa91e272027 100644 --- a/fs/cifs/cifssmb.c +++ b/fs/cifs/cifssmb.c @@ -29,7 +29,6 @@ #include "cifsproto.h" #include "cifs_unicode.h" #include "cifs_debug.h" -#include "smb2proto.h" #include "fscache.h" #include "smbdirect.h" #ifdef CONFIG_CIFS_DFS_UPCALL @@ -62,52 +61,6 @@ static struct { #define CIFS_NUM_PROT 1 #endif /* CIFS_POSIX */ -/* - * Mark as invalid, all open files on tree connections since they - * were closed when session to server was lost. - */ -void -cifs_mark_open_files_invalid(struct cifs_tcon *tcon) -{ - struct cifsFileInfo *open_file = NULL; - struct list_head *tmp; - struct list_head *tmp1; - - /* only send once per connect */ - spin_lock(&cifs_tcp_ses_lock); - if ((tcon->ses->ses_status != SES_GOOD) || (tcon->status != TID_NEED_RECON)) { - spin_unlock(&cifs_tcp_ses_lock); - return; - } - tcon->status = TID_IN_FILES_INVALIDATE; - spin_unlock(&cifs_tcp_ses_lock); - - /* list all files open on tree connection and mark them invalid */ - spin_lock(&tcon->open_file_lock); - list_for_each_safe(tmp, tmp1, &tcon->openFileList) { - open_file = list_entry(tmp, struct cifsFileInfo, tlist); - open_file->invalidHandle = true; - open_file->oplock_break_cancelled = true; - } - spin_unlock(&tcon->open_file_lock); - - mutex_lock(&tcon->crfid.fid_mutex); - tcon->crfid.is_valid = false; - /* cached handle is not valid, so SMB2_CLOSE won't be sent below */ - close_cached_dir_lease_locked(&tcon->crfid); - memset(tcon->crfid.fid, 0, sizeof(struct cifs_fid)); - mutex_unlock(&tcon->crfid.fid_mutex); - - spin_lock(&cifs_tcp_ses_lock); - if (tcon->status == TID_IN_FILES_INVALIDATE) - tcon->status = TID_NEED_TCON; - spin_unlock(&cifs_tcp_ses_lock); - - /* - * BB Add call to invalidate_inodes(sb) for all superblocks mounted - * to this tcon. - */ -} /* reconnect the socket, tcon, and smb session if needed */ static int @@ -134,18 +87,18 @@ cifs_reconnect_tcon(struct cifs_tcon *tcon, int smb_command) * only tree disconnect, open, and write, (and ulogoff which does not * have tcon) are allowed as we start force umount */ - spin_lock(&cifs_tcp_ses_lock); + spin_lock(&tcon->tc_lock); if (tcon->status == TID_EXITING) { if (smb_command != SMB_COM_WRITE_ANDX && smb_command != SMB_COM_OPEN_ANDX && smb_command != SMB_COM_TREE_DISCONNECT) { - spin_unlock(&cifs_tcp_ses_lock); + spin_unlock(&tcon->tc_lock); cifs_dbg(FYI, "can not send cmd %d while umounting\n", smb_command); return -ENODEV; } } - spin_unlock(&cifs_tcp_ses_lock); + spin_unlock(&tcon->tc_lock); retries = server->nr_targets; @@ -165,12 +118,12 @@ cifs_reconnect_tcon(struct cifs_tcon *tcon, int smb_command) } /* are we still trying to reconnect? */ - spin_lock(&cifs_tcp_ses_lock); + spin_lock(&server->srv_lock); if (server->tcpStatus != CifsNeedReconnect) { - spin_unlock(&cifs_tcp_ses_lock); + spin_unlock(&server->srv_lock); break; } - spin_unlock(&cifs_tcp_ses_lock); + spin_unlock(&server->srv_lock); if (retries && --retries) continue; @@ -201,13 +154,13 @@ cifs_reconnect_tcon(struct cifs_tcon *tcon, int smb_command) * and the server never sends an answer the socket will be closed * and tcpStatus set to reconnect. */ - spin_lock(&cifs_tcp_ses_lock); + spin_lock(&server->srv_lock); if (server->tcpStatus == CifsNeedReconnect) { - spin_unlock(&cifs_tcp_ses_lock); + spin_unlock(&server->srv_lock); rc = -EHOSTDOWN; goto out; } - spin_unlock(&cifs_tcp_ses_lock); + spin_unlock(&server->srv_lock); /* * need to prevent multiple threads trying to simultaneously @@ -457,52 +410,6 @@ decode_ext_sec_blob(struct cifs_ses *ses, NEGOTIATE_RSP *pSMBr) return 0; } -int -cifs_enable_signing(struct TCP_Server_Info *server, bool mnt_sign_required) -{ - bool srv_sign_required = server->sec_mode & server->vals->signing_required; - bool srv_sign_enabled = server->sec_mode & server->vals->signing_enabled; - bool mnt_sign_enabled = global_secflags & CIFSSEC_MAY_SIGN; - - /* - * Is signing required by mnt options? If not then check - * global_secflags to see if it is there. - */ - if (!mnt_sign_required) - mnt_sign_required = ((global_secflags & CIFSSEC_MUST_SIGN) == - CIFSSEC_MUST_SIGN); - - /* - * If signing is required then it's automatically enabled too, - * otherwise, check to see if the secflags allow it. - */ - mnt_sign_enabled = mnt_sign_required ? mnt_sign_required : - (global_secflags & CIFSSEC_MAY_SIGN); - - /* If server requires signing, does client allow it? */ - if (srv_sign_required) { - if (!mnt_sign_enabled) { - cifs_dbg(VFS, "Server requires signing, but it's disabled in SecurityFlags!\n"); - return -ENOTSUPP; - } - server->sign = true; - } - - /* If client requires signing, does server allow it? */ - if (mnt_sign_required) { - if (!srv_sign_enabled) { - cifs_dbg(VFS, "Server does not support signing!\n"); - return -ENOTSUPP; - } - server->sign = true; - } - - if (cifs_rdma_enabled(server) && server->sign) - cifs_dbg(VFS, "Signing is enabled, and RDMA read/write will be disabled\n"); - - return 0; -} - static bool should_set_ext_sec_flag(enum securityEnum sectype) { @@ -684,7 +591,7 @@ cifs_echo_callback(struct mid_q_entry *mid) struct TCP_Server_Info *server = mid->callback_data; struct cifs_credits credits = { .value = 1, .instance = 0 }; - DeleteMidQEntry(mid); + release_mid(mid); add_credits(server, &credits, CIFS_ECHO_OP); } @@ -1379,184 +1286,6 @@ openRetry: return rc; } -/* - * Discard any remaining data in the current SMB. To do this, we borrow the - * current bigbuf. - */ -int -cifs_discard_remaining_data(struct TCP_Server_Info *server) -{ - unsigned int rfclen = server->pdu_size; - int remaining = rfclen + server->vals->header_preamble_size - - server->total_read; - - while (remaining > 0) { - int length; - - length = cifs_discard_from_socket(server, - min_t(size_t, remaining, - CIFSMaxBufSize + MAX_HEADER_SIZE(server))); - if (length < 0) - return length; - server->total_read += length; - remaining -= length; - } - - return 0; -} - -static int -__cifs_readv_discard(struct TCP_Server_Info *server, struct mid_q_entry *mid, - bool malformed) -{ - int length; - - length = cifs_discard_remaining_data(server); - dequeue_mid(mid, malformed); - mid->resp_buf = server->smallbuf; - server->smallbuf = NULL; - return length; -} - -static int -cifs_readv_discard(struct TCP_Server_Info *server, struct mid_q_entry *mid) -{ - struct cifs_readdata *rdata = mid->callback_data; - - return __cifs_readv_discard(server, mid, rdata->result); -} - -int -cifs_readv_receive(struct TCP_Server_Info *server, struct mid_q_entry *mid) -{ - int length, len; - unsigned int data_offset, data_len; - struct cifs_readdata *rdata = mid->callback_data; - char *buf = server->smallbuf; - unsigned int buflen = server->pdu_size + - server->vals->header_preamble_size; - bool use_rdma_mr = false; - - cifs_dbg(FYI, "%s: mid=%llu offset=%llu bytes=%u\n", - __func__, mid->mid, rdata->offset, rdata->bytes); - - /* - * read the rest of READ_RSP header (sans Data array), or whatever we - * can if there's not enough data. At this point, we've read down to - * the Mid. - */ - len = min_t(unsigned int, buflen, server->vals->read_rsp_size) - - HEADER_SIZE(server) + 1; - - length = cifs_read_from_socket(server, - buf + HEADER_SIZE(server) - 1, len); - if (length < 0) - return length; - server->total_read += length; - - if (server->ops->is_session_expired && - server->ops->is_session_expired(buf)) { - cifs_reconnect(server, true); - return -1; - } - - if (server->ops->is_status_pending && - server->ops->is_status_pending(buf, server)) { - cifs_discard_remaining_data(server); - return -1; - } - - /* set up first two iov for signature check and to get credits */ - rdata->iov[0].iov_base = buf; - rdata->iov[0].iov_len = server->vals->header_preamble_size; - rdata->iov[1].iov_base = buf + server->vals->header_preamble_size; - rdata->iov[1].iov_len = - server->total_read - server->vals->header_preamble_size; - cifs_dbg(FYI, "0: iov_base=%p iov_len=%zu\n", - rdata->iov[0].iov_base, rdata->iov[0].iov_len); - cifs_dbg(FYI, "1: iov_base=%p iov_len=%zu\n", - rdata->iov[1].iov_base, rdata->iov[1].iov_len); - - /* Was the SMB read successful? */ - rdata->result = server->ops->map_error(buf, false); - if (rdata->result != 0) { - cifs_dbg(FYI, "%s: server returned error %d\n", - __func__, rdata->result); - /* normal error on read response */ - return __cifs_readv_discard(server, mid, false); - } - - /* Is there enough to get to the rest of the READ_RSP header? */ - if (server->total_read < server->vals->read_rsp_size) { - cifs_dbg(FYI, "%s: server returned short header. got=%u expected=%zu\n", - __func__, server->total_read, - server->vals->read_rsp_size); - rdata->result = -EIO; - return cifs_readv_discard(server, mid); - } - - data_offset = server->ops->read_data_offset(buf) + - server->vals->header_preamble_size; - if (data_offset < server->total_read) { - /* - * win2k8 sometimes sends an offset of 0 when the read - * is beyond the EOF. Treat it as if the data starts just after - * the header. - */ - cifs_dbg(FYI, "%s: data offset (%u) inside read response header\n", - __func__, data_offset); - data_offset = server->total_read; - } else if (data_offset > MAX_CIFS_SMALL_BUFFER_SIZE) { - /* data_offset is beyond the end of smallbuf */ - cifs_dbg(FYI, "%s: data offset (%u) beyond end of smallbuf\n", - __func__, data_offset); - rdata->result = -EIO; - return cifs_readv_discard(server, mid); - } - - cifs_dbg(FYI, "%s: total_read=%u data_offset=%u\n", - __func__, server->total_read, data_offset); - - len = data_offset - server->total_read; - if (len > 0) { - /* read any junk before data into the rest of smallbuf */ - length = cifs_read_from_socket(server, - buf + server->total_read, len); - if (length < 0) - return length; - server->total_read += length; - } - - /* how much data is in the response? */ -#ifdef CONFIG_CIFS_SMB_DIRECT - use_rdma_mr = rdata->mr; -#endif - data_len = server->ops->read_data_length(buf, use_rdma_mr); - if (!use_rdma_mr && (data_offset + data_len > buflen)) { - /* data_len is corrupt -- discard frame */ - rdata->result = -EIO; - return cifs_readv_discard(server, mid); - } - - length = rdata->read_into_pages(server, rdata, data_len); - if (length < 0) - return length; - - server->total_read += length; - - cifs_dbg(FYI, "total_read=%u buflen=%u remaining=%u\n", - server->total_read, buflen, data_len); - - /* discard anything left over */ - if (server->total_read < buflen) - return cifs_readv_discard(server, mid); - - dequeue_mid(mid, false); - mid->resp_buf = server->smallbuf; - server->smallbuf = NULL; - return length; -} - static void cifs_readv_callback(struct mid_q_entry *mid) { @@ -1607,7 +1336,7 @@ cifs_readv_callback(struct mid_q_entry *mid) } queue_work(cifsiod_wq, &rdata->work); - DeleteMidQEntry(mid); + release_mid(mid); add_credits(server, &credits, 0); } @@ -1909,183 +1638,6 @@ CIFSSMBWrite(const unsigned int xid, struct cifs_io_parms *io_parms, return rc; } -void -cifs_writedata_release(struct kref *refcount) -{ - struct cifs_writedata *wdata = container_of(refcount, - struct cifs_writedata, refcount); -#ifdef CONFIG_CIFS_SMB_DIRECT - if (wdata->mr) { - smbd_deregister_mr(wdata->mr); - wdata->mr = NULL; - } -#endif - - if (wdata->cfile) - cifsFileInfo_put(wdata->cfile); - - kvfree(wdata->pages); - kfree(wdata); -} - -/* - * Write failed with a retryable error. Resend the write request. It's also - * possible that the page was redirtied so re-clean the page. - */ -static void -cifs_writev_requeue(struct cifs_writedata *wdata) -{ - int i, rc = 0; - struct inode *inode = d_inode(wdata->cfile->dentry); - struct TCP_Server_Info *server; - unsigned int rest_len; - - server = tlink_tcon(wdata->cfile->tlink)->ses->server; - i = 0; - rest_len = wdata->bytes; - do { - struct cifs_writedata *wdata2; - unsigned int j, nr_pages, wsize, tailsz, cur_len; - - wsize = server->ops->wp_retry_size(inode); - if (wsize < rest_len) { - nr_pages = wsize / PAGE_SIZE; - if (!nr_pages) { - rc = -ENOTSUPP; - break; - } - cur_len = nr_pages * PAGE_SIZE; - tailsz = PAGE_SIZE; - } else { - nr_pages = DIV_ROUND_UP(rest_len, PAGE_SIZE); - cur_len = rest_len; - tailsz = rest_len - (nr_pages - 1) * PAGE_SIZE; - } - - wdata2 = cifs_writedata_alloc(nr_pages, cifs_writev_complete); - if (!wdata2) { - rc = -ENOMEM; - break; - } - - for (j = 0; j < nr_pages; j++) { - wdata2->pages[j] = wdata->pages[i + j]; - lock_page(wdata2->pages[j]); - clear_page_dirty_for_io(wdata2->pages[j]); - } - - wdata2->sync_mode = wdata->sync_mode; - wdata2->nr_pages = nr_pages; - wdata2->offset = page_offset(wdata2->pages[0]); - wdata2->pagesz = PAGE_SIZE; - wdata2->tailsz = tailsz; - wdata2->bytes = cur_len; - - rc = cifs_get_writable_file(CIFS_I(inode), FIND_WR_ANY, - &wdata2->cfile); - if (!wdata2->cfile) { - cifs_dbg(VFS, "No writable handle to retry writepages rc=%d\n", - rc); - if (!is_retryable_error(rc)) - rc = -EBADF; - } else { - wdata2->pid = wdata2->cfile->pid; - rc = server->ops->async_writev(wdata2, - cifs_writedata_release); - } - - for (j = 0; j < nr_pages; j++) { - unlock_page(wdata2->pages[j]); - if (rc != 0 && !is_retryable_error(rc)) { - SetPageError(wdata2->pages[j]); - end_page_writeback(wdata2->pages[j]); - put_page(wdata2->pages[j]); - } - } - - kref_put(&wdata2->refcount, cifs_writedata_release); - if (rc) { - if (is_retryable_error(rc)) - continue; - i += nr_pages; - break; - } - - rest_len -= cur_len; - i += nr_pages; - } while (i < wdata->nr_pages); - - /* cleanup remaining pages from the original wdata */ - for (; i < wdata->nr_pages; i++) { - SetPageError(wdata->pages[i]); - end_page_writeback(wdata->pages[i]); - put_page(wdata->pages[i]); - } - - if (rc != 0 && !is_retryable_error(rc)) - mapping_set_error(inode->i_mapping, rc); - kref_put(&wdata->refcount, cifs_writedata_release); -} - -void -cifs_writev_complete(struct work_struct *work) -{ - struct cifs_writedata *wdata = container_of(work, - struct cifs_writedata, work); - struct inode *inode = d_inode(wdata->cfile->dentry); - int i = 0; - - if (wdata->result == 0) { - spin_lock(&inode->i_lock); - cifs_update_eof(CIFS_I(inode), wdata->offset, wdata->bytes); - spin_unlock(&inode->i_lock); - cifs_stats_bytes_written(tlink_tcon(wdata->cfile->tlink), - wdata->bytes); - } else if (wdata->sync_mode == WB_SYNC_ALL && wdata->result == -EAGAIN) - return cifs_writev_requeue(wdata); - - for (i = 0; i < wdata->nr_pages; i++) { - struct page *page = wdata->pages[i]; - if (wdata->result == -EAGAIN) - __set_page_dirty_nobuffers(page); - else if (wdata->result < 0) - SetPageError(page); - end_page_writeback(page); - cifs_readpage_to_fscache(inode, page); - put_page(page); - } - if (wdata->result != -EAGAIN) - mapping_set_error(inode->i_mapping, wdata->result); - kref_put(&wdata->refcount, cifs_writedata_release); -} - -struct cifs_writedata * -cifs_writedata_alloc(unsigned int nr_pages, work_func_t complete) -{ - struct page **pages = - kcalloc(nr_pages, sizeof(struct page *), GFP_NOFS); - if (pages) - return cifs_writedata_direct_alloc(pages, complete); - - return NULL; -} - -struct cifs_writedata * -cifs_writedata_direct_alloc(struct page **pages, work_func_t complete) -{ - struct cifs_writedata *wdata; - - wdata = kzalloc(sizeof(*wdata), GFP_NOFS); - if (wdata != NULL) { - wdata->pages = pages; - kref_init(&wdata->refcount); - INIT_LIST_HEAD(&wdata->list); - init_completion(&wdata->done); - INIT_WORK(&wdata->work, complete); - } - return wdata; -} - /* * Check the mid_state and signature on received buffer (if any), and queue the * workqueue completion task. @@ -2132,7 +1684,7 @@ cifs_writev_callback(struct mid_q_entry *mid) } queue_work(cifsiod_wq, &wdata->work); - DeleteMidQEntry(mid); + release_mid(mid); add_credits(tcon->ses->server, &credits, 0); } @@ -3660,7 +3212,6 @@ setACLerrorExit: return rc; } -/* BB fix tabs in this function FIXME BB */ int CIFSGetExtAttr(const unsigned int xid, struct cifs_tcon *tcon, const int netfid, __u64 *pExtAttrBits, __u64 *pMask) @@ -3677,7 +3228,7 @@ CIFSGetExtAttr(const unsigned int xid, struct cifs_tcon *tcon, GetExtAttrRetry: rc = smb_init(SMB_COM_TRANSACTION2, 15, tcon, (void **) &pSMB, - (void **) &pSMBr); + (void **) &pSMBr); if (rc) return rc; @@ -3723,7 +3274,7 @@ GetExtAttrRetry: __u16 data_offset = le16_to_cpu(pSMBr->t2.DataOffset); __u16 count = le16_to_cpu(pSMBr->t2.DataCount); struct file_chattr_info *pfinfo; - /* BB Do we need a cast or hash here ? */ + if (count != 16) { cifs_dbg(FYI, "Invalid size ret in GetExtAttr\n"); rc = -EIO; diff --git a/fs/cifs/connect.c b/fs/cifs/connect.c index 386bb523c69e..7f205a9a2de4 100644 --- a/fs/cifs/connect.c +++ b/fs/cifs/connect.c @@ -119,10 +119,10 @@ static int reconn_set_ipaddr_from_hostname(struct TCP_Server_Info *server) goto requeue_resolve; } - spin_lock(&cifs_tcp_ses_lock); + spin_lock(&server->srv_lock); rc = cifs_convert_address((struct sockaddr *)&server->dstaddr, ipaddr, strlen(ipaddr)); - spin_unlock(&cifs_tcp_ses_lock); + spin_unlock(&server->srv_lock); kfree(ipaddr); /* rc == 1 means success here */ @@ -205,17 +205,22 @@ cifs_signal_cifsd_for_reconnect(struct TCP_Server_Info *server, /* If server is a channel, select the primary channel */ pserver = CIFS_SERVER_IS_CHAN(server) ? server->primary_server : server; - spin_lock(&cifs_tcp_ses_lock); + spin_lock(&pserver->srv_lock); if (!all_channels) { pserver->tcpStatus = CifsNeedReconnect; - spin_unlock(&cifs_tcp_ses_lock); + spin_unlock(&pserver->srv_lock); return; } + spin_unlock(&pserver->srv_lock); + spin_lock(&cifs_tcp_ses_lock); list_for_each_entry(ses, &pserver->smb_ses_list, smb_ses_list) { spin_lock(&ses->chan_lock); - for (i = 0; i < ses->chan_count; i++) + for (i = 0; i < ses->chan_count; i++) { + spin_lock(&ses->chans[i].server->srv_lock); ses->chans[i].server->tcpStatus = CifsNeedReconnect; + spin_unlock(&ses->chans[i].server->srv_lock); + } spin_unlock(&ses->chan_lock); } spin_unlock(&cifs_tcp_ses_lock); @@ -252,17 +257,8 @@ cifs_mark_tcp_ses_conns_for_reconnect(struct TCP_Server_Info *server, spin_lock(&cifs_tcp_ses_lock); list_for_each_entry_safe(ses, nses, &pserver->smb_ses_list, smb_ses_list) { /* check if iface is still active */ - if (!cifs_chan_is_iface_active(ses, server)) { - /* - * HACK: drop the lock before calling - * cifs_chan_update_iface to avoid deadlock - */ - ses->ses_count++; - spin_unlock(&cifs_tcp_ses_lock); + if (!cifs_chan_is_iface_active(ses, server)) cifs_chan_update_iface(ses, server); - spin_lock(&cifs_tcp_ses_lock); - ses->ses_count--; - } spin_lock(&ses->chan_lock); if (!mark_smb_session && cifs_chan_needs_reconnect(ses, server)) @@ -323,7 +319,7 @@ cifs_abort_connection(struct TCP_Server_Info *server) /* mark submitted MIDs for retry and issue callback */ INIT_LIST_HEAD(&retry_list); cifs_dbg(FYI, "%s: moving mids to private list\n", __func__); - spin_lock(&GlobalMid_Lock); + spin_lock(&server->mid_lock); list_for_each_entry_safe(mid, nmid, &server->pending_mid_q, qhead) { kref_get(&mid->refcount); if (mid->mid_state == MID_REQUEST_SUBMITTED) @@ -331,14 +327,14 @@ cifs_abort_connection(struct TCP_Server_Info *server) list_move(&mid->qhead, &retry_list); mid->mid_flags |= MID_DELETED; } - spin_unlock(&GlobalMid_Lock); + spin_unlock(&server->mid_lock); cifs_server_unlock(server); cifs_dbg(FYI, "%s: issuing mid callbacks\n", __func__); list_for_each_entry_safe(mid, nmid, &retry_list, qhead) { list_del_init(&mid->qhead); mid->callback(mid); - cifs_mid_q_entry_release(mid); + release_mid(mid); } if (cifs_rdma_enabled(server)) { @@ -350,11 +346,11 @@ cifs_abort_connection(struct TCP_Server_Info *server) static bool cifs_tcp_ses_needs_reconnect(struct TCP_Server_Info *server, int num_targets) { - spin_lock(&cifs_tcp_ses_lock); + spin_lock(&server->srv_lock); server->nr_targets = num_targets; if (server->tcpStatus == CifsExiting) { /* the demux thread will exit normally next time through the loop */ - spin_unlock(&cifs_tcp_ses_lock); + spin_unlock(&server->srv_lock); wake_up(&server->response_q); return false; } @@ -364,7 +360,7 @@ static bool cifs_tcp_ses_needs_reconnect(struct TCP_Server_Info *server, int num server->hostname); server->tcpStatus = CifsNeedReconnect; - spin_unlock(&cifs_tcp_ses_lock); + spin_unlock(&server->srv_lock); return true; } @@ -414,20 +410,20 @@ static int __cifs_reconnect(struct TCP_Server_Info *server, } else { atomic_inc(&tcpSesReconnectCount); set_credits(server, 1); - spin_lock(&cifs_tcp_ses_lock); + spin_lock(&server->srv_lock); if (server->tcpStatus != CifsExiting) server->tcpStatus = CifsNeedNegotiate; - spin_unlock(&cifs_tcp_ses_lock); + spin_unlock(&server->srv_lock); cifs_swn_reset_server_dstaddr(server); cifs_server_unlock(server); mod_delayed_work(cifsiod_wq, &server->reconnect, 0); } } while (server->tcpStatus == CifsNeedReconnect); - spin_lock(&cifs_tcp_ses_lock); + spin_lock(&server->srv_lock); if (server->tcpStatus == CifsNeedNegotiate) mod_delayed_work(cifsiod_wq, &server->echo, 0); - spin_unlock(&cifs_tcp_ses_lock); + spin_unlock(&server->srv_lock); wake_up(&server->response_q); return rc; @@ -541,10 +537,10 @@ static int reconnect_dfs_server(struct TCP_Server_Info *server) */ atomic_inc(&tcpSesReconnectCount); set_credits(server, 1); - spin_lock(&cifs_tcp_ses_lock); + spin_lock(&server->srv_lock); if (server->tcpStatus != CifsExiting) server->tcpStatus = CifsNeedNegotiate; - spin_unlock(&cifs_tcp_ses_lock); + spin_unlock(&server->srv_lock); cifs_swn_reset_server_dstaddr(server); cifs_server_unlock(server); mod_delayed_work(cifsiod_wq, &server->reconnect, 0); @@ -556,11 +552,10 @@ static int reconnect_dfs_server(struct TCP_Server_Info *server) dfs_cache_free_tgts(&tl); /* Need to set up echo worker again once connection has been established */ - spin_lock(&cifs_tcp_ses_lock); + spin_lock(&server->srv_lock); if (server->tcpStatus == CifsNeedNegotiate) mod_delayed_work(cifsiod_wq, &server->echo, 0); - - spin_unlock(&cifs_tcp_ses_lock); + spin_unlock(&server->srv_lock); wake_up(&server->response_q); return rc; @@ -569,12 +564,12 @@ static int reconnect_dfs_server(struct TCP_Server_Info *server) int cifs_reconnect(struct TCP_Server_Info *server, bool mark_smb_session) { /* If tcp session is not an dfs connection, then reconnect to last target server */ - spin_lock(&cifs_tcp_ses_lock); + spin_lock(&server->srv_lock); if (!server->is_dfs_conn) { - spin_unlock(&cifs_tcp_ses_lock); + spin_unlock(&server->srv_lock); return __cifs_reconnect(server, mark_smb_session); } - spin_unlock(&cifs_tcp_ses_lock); + spin_unlock(&server->srv_lock); mutex_lock(&server->refpath_lock); if (!server->origin_fullpath || !server->leaf_fullpath) { @@ -670,18 +665,18 @@ server_unresponsive(struct TCP_Server_Info *server) * 65s kernel_recvmsg times out, and we see that we haven't gotten * a response in >60s. */ - spin_lock(&cifs_tcp_ses_lock); + spin_lock(&server->srv_lock); if ((server->tcpStatus == CifsGood || server->tcpStatus == CifsNeedNegotiate) && (!server->ops->can_echo || server->ops->can_echo(server)) && time_after(jiffies, server->lstrp + 3 * server->echo_interval)) { - spin_unlock(&cifs_tcp_ses_lock); + spin_unlock(&server->srv_lock); cifs_server_dbg(VFS, "has not responded in %lu seconds. Reconnecting...\n", (3 * server->echo_interval) / HZ); cifs_reconnect(server, false); return true; } - spin_unlock(&cifs_tcp_ses_lock); + spin_unlock(&server->srv_lock); return false; } @@ -726,18 +721,18 @@ cifs_readv_from_socket(struct TCP_Server_Info *server, struct msghdr *smb_msg) else length = sock_recvmsg(server->ssocket, smb_msg, 0); - spin_lock(&cifs_tcp_ses_lock); + spin_lock(&server->srv_lock); if (server->tcpStatus == CifsExiting) { - spin_unlock(&cifs_tcp_ses_lock); + spin_unlock(&server->srv_lock); return -ESHUTDOWN; } if (server->tcpStatus == CifsNeedReconnect) { - spin_unlock(&cifs_tcp_ses_lock); + spin_unlock(&server->srv_lock); cifs_reconnect(server, false); return -ECONNABORTED; } - spin_unlock(&cifs_tcp_ses_lock); + spin_unlock(&server->srv_lock); if (length == -ERESTARTSYS || length == -EAGAIN || @@ -849,7 +844,7 @@ dequeue_mid(struct mid_q_entry *mid, bool malformed) #ifdef CONFIG_CIFS_STATS2 mid->when_received = jiffies; #endif - spin_lock(&GlobalMid_Lock); + spin_lock(&mid->server->mid_lock); if (!malformed) mid->mid_state = MID_RESPONSE_RECEIVED; else @@ -859,12 +854,12 @@ dequeue_mid(struct mid_q_entry *mid, bool malformed) * function has finished processing it is a bug. */ if (mid->mid_flags & MID_DELETED) { - spin_unlock(&GlobalMid_Lock); + spin_unlock(&mid->server->mid_lock); pr_warn_once("trying to dequeue a deleted mid\n"); } else { list_del_init(&mid->qhead); mid->mid_flags |= MID_DELETED; - spin_unlock(&GlobalMid_Lock); + spin_unlock(&mid->server->mid_lock); } } @@ -903,21 +898,68 @@ handle_mid(struct mid_q_entry *mid, struct TCP_Server_Info *server, dequeue_mid(mid, malformed); } +int +cifs_enable_signing(struct TCP_Server_Info *server, bool mnt_sign_required) +{ + bool srv_sign_required = server->sec_mode & server->vals->signing_required; + bool srv_sign_enabled = server->sec_mode & server->vals->signing_enabled; + bool mnt_sign_enabled; + + /* + * Is signing required by mnt options? If not then check + * global_secflags to see if it is there. + */ + if (!mnt_sign_required) + mnt_sign_required = ((global_secflags & CIFSSEC_MUST_SIGN) == + CIFSSEC_MUST_SIGN); + + /* + * If signing is required then it's automatically enabled too, + * otherwise, check to see if the secflags allow it. + */ + mnt_sign_enabled = mnt_sign_required ? mnt_sign_required : + (global_secflags & CIFSSEC_MAY_SIGN); + + /* If server requires signing, does client allow it? */ + if (srv_sign_required) { + if (!mnt_sign_enabled) { + cifs_dbg(VFS, "Server requires signing, but it's disabled in SecurityFlags!\n"); + return -EOPNOTSUPP; + } + server->sign = true; + } + + /* If client requires signing, does server allow it? */ + if (mnt_sign_required) { + if (!srv_sign_enabled) { + cifs_dbg(VFS, "Server does not support signing!\n"); + return -EOPNOTSUPP; + } + server->sign = true; + } + + if (cifs_rdma_enabled(server) && server->sign) + cifs_dbg(VFS, "Signing is enabled, and RDMA read/write will be disabled\n"); + + return 0; +} + + static void clean_demultiplex_info(struct TCP_Server_Info *server) { int length; /* take it off the list, if it's not already */ - spin_lock(&cifs_tcp_ses_lock); + spin_lock(&server->srv_lock); list_del_init(&server->tcp_ses_list); - spin_unlock(&cifs_tcp_ses_lock); + spin_unlock(&server->srv_lock); cancel_delayed_work_sync(&server->echo); cancel_delayed_work_sync(&server->resolve); - spin_lock(&cifs_tcp_ses_lock); + spin_lock(&server->srv_lock); server->tcpStatus = CifsExiting; - spin_unlock(&cifs_tcp_ses_lock); + spin_unlock(&server->srv_lock); wake_up_all(&server->response_q); /* check if we have blocked requests that need to free */ @@ -948,7 +990,7 @@ static void clean_demultiplex_info(struct TCP_Server_Info *server) struct list_head *tmp, *tmp2; INIT_LIST_HEAD(&dispose_list); - spin_lock(&GlobalMid_Lock); + spin_lock(&server->mid_lock); list_for_each_safe(tmp, tmp2, &server->pending_mid_q) { mid_entry = list_entry(tmp, struct mid_q_entry, qhead); cifs_dbg(FYI, "Clearing mid %llu\n", mid_entry->mid); @@ -957,7 +999,7 @@ static void clean_demultiplex_info(struct TCP_Server_Info *server) list_move(&mid_entry->qhead, &dispose_list); mid_entry->mid_flags |= MID_DELETED; } - spin_unlock(&GlobalMid_Lock); + spin_unlock(&server->mid_lock); /* now walk dispose list and issue callbacks */ list_for_each_safe(tmp, tmp2, &dispose_list) { @@ -965,7 +1007,7 @@ static void clean_demultiplex_info(struct TCP_Server_Info *server) cifs_dbg(FYI, "Callback mid %llu\n", mid_entry->mid); list_del_init(&mid_entry->qhead); mid_entry->callback(mid_entry); - cifs_mid_q_entry_release(mid_entry); + release_mid(mid_entry); } /* 1/8th of sec is more than enough time for them to exit */ msleep(125); @@ -1039,19 +1081,18 @@ int cifs_handle_standard(struct TCP_Server_Info *server, struct mid_q_entry *mid) { char *buf = server->large_buf ? server->bigbuf : server->smallbuf; - int length; + int rc; /* * We know that we received enough to get to the MID as we * checked the pdu_length earlier. Now check to see - * if the rest of the header is OK. We borrow the length - * var for the rest of the loop to avoid a new stack var. + * if the rest of the header is OK. * * 48 bytes is enough to display the header and a little bit * into the payload for debugging purposes. */ - length = server->ops->check_message(buf, server->total_read, server); - if (length != 0) + rc = server->ops->check_message(buf, server->total_read, server); + if (rc) cifs_dump_mem("Bad SMB: ", buf, min_t(unsigned int, server->total_read, 48)); @@ -1066,9 +1107,9 @@ cifs_handle_standard(struct TCP_Server_Info *server, struct mid_q_entry *mid) return -1; if (!mid) - return length; + return rc; - handle_mid(mid, server, buf, length); + handle_mid(mid, server, buf, rc); return 0; } @@ -1205,7 +1246,7 @@ next_pdu: if (length < 0) { for (i = 0; i < num_mids; i++) if (mids[i]) - cifs_mid_q_entry_release(mids[i]); + release_mid(mids[i]); continue; } @@ -1232,7 +1273,7 @@ next_pdu: if (!mids[i]->multiRsp || mids[i]->multiEnd) mids[i]->callback(mids[i]); - cifs_mid_q_entry_release(mids[i]); + release_mid(mids[i]); } else if (server->ops->is_oplock_break && server->ops->is_oplock_break(bufs[i], server)) { @@ -1240,7 +1281,7 @@ next_pdu: cifs_dbg(FYI, "Received oplock break\n"); } else { cifs_server_dbg(VFS, "No task to wake, unknown frame received! NumMids %d\n", - atomic_read(&midCount)); + atomic_read(&mid_count)); cifs_dump_mem("Received Data is: ", bufs[i], HEADER_SIZE(server)); smb2_add_credits_from_hdr(bufs[i], server); @@ -1411,6 +1452,7 @@ match_security(struct TCP_Server_Info *server, struct smb3_fs_context *ctx) return true; } +/* this function must be called with srv_lock held */ static int match_server(struct TCP_Server_Info *server, struct smb3_fs_context *ctx) { struct sockaddr *addr = (struct sockaddr *)&ctx->dstaddr; @@ -1471,6 +1513,7 @@ cifs_find_tcp_session(struct smb3_fs_context *ctx) spin_lock(&cifs_tcp_ses_lock); list_for_each_entry(server, &cifs_tcp_ses_list, tcp_ses_list) { + spin_lock(&server->srv_lock); #ifdef CONFIG_CIFS_DFS_UPCALL /* * DFS failover implementation in cifs_reconnect() requires unique tcp sessions for @@ -1478,15 +1521,20 @@ cifs_find_tcp_session(struct smb3_fs_context *ctx) * shares or even links that may connect to same server but having completely * different failover targets. */ - if (server->is_dfs_conn) + if (server->is_dfs_conn) { + spin_unlock(&server->srv_lock); continue; + } #endif /* * Skip ses channels since they're only handled in lower layers * (e.g. cifs_send_recv). */ - if (CIFS_SERVER_IS_CHAN(server) || !match_server(server, ctx)) + if (CIFS_SERVER_IS_CHAN(server) || !match_server(server, ctx)) { + spin_unlock(&server->srv_lock); continue; + } + spin_unlock(&server->srv_lock); ++server->srv_count; spin_unlock(&cifs_tcp_ses_lock); @@ -1534,9 +1582,9 @@ cifs_put_tcp_session(struct TCP_Server_Info *server, int from_reconnect) else cancel_delayed_work_sync(&server->reconnect); - spin_lock(&cifs_tcp_ses_lock); + spin_lock(&server->srv_lock); server->tcpStatus = CifsExiting; - spin_unlock(&cifs_tcp_ses_lock); + spin_unlock(&server->srv_lock); cifs_crypto_secmech_release(server); @@ -1595,8 +1643,8 @@ cifs_get_tcp_session(struct smb3_fs_context *ctx, if (primary_server) { spin_lock(&cifs_tcp_ses_lock); ++primary_server->srv_count; - tcp_ses->primary_server = primary_server; spin_unlock(&cifs_tcp_ses_lock); + tcp_ses->primary_server = primary_server; } init_waitqueue_head(&tcp_ses->response_q); init_waitqueue_head(&tcp_ses->request_q); @@ -1612,6 +1660,8 @@ cifs_get_tcp_session(struct smb3_fs_context *ctx, tcp_ses->lstrp = jiffies; tcp_ses->compress_algorithm = cpu_to_le16(ctx->compression); spin_lock_init(&tcp_ses->req_lock); + spin_lock_init(&tcp_ses->srv_lock); + spin_lock_init(&tcp_ses->mid_lock); INIT_LIST_HEAD(&tcp_ses->tcp_ses_list); INIT_LIST_HEAD(&tcp_ses->smb_ses_list); INIT_DELAYED_WORK(&tcp_ses->echo, cifs_echo_request); @@ -1685,9 +1735,9 @@ smbd_connected: * to the struct since the kernel thread not created yet * no need to spinlock this update of tcpStatus */ - spin_lock(&cifs_tcp_ses_lock); + spin_lock(&tcp_ses->srv_lock); tcp_ses->tcpStatus = CifsNeedNegotiate; - spin_unlock(&cifs_tcp_ses_lock); + spin_unlock(&tcp_ses->srv_lock); if ((ctx->max_credits < 20) || (ctx->max_credits > 60000)) tcp_ses->max_credits = SMB2_MAX_CREDITS_AVAILABLE; @@ -1729,6 +1779,7 @@ out_err: return ERR_PTR(rc); } +/* this function must be called with ses_lock held */ static int match_session(struct cifs_ses *ses, struct smb3_fs_context *ctx) { if (ctx->sectype != Unspecified && @@ -1864,10 +1915,17 @@ cifs_find_smb_ses(struct TCP_Server_Info *server, struct smb3_fs_context *ctx) spin_lock(&cifs_tcp_ses_lock); list_for_each_entry(ses, &server->smb_ses_list, smb_ses_list) { - if (ses->ses_status == SES_EXITING) + spin_lock(&ses->ses_lock); + if (ses->ses_status == SES_EXITING) { + spin_unlock(&ses->ses_lock); continue; - if (!match_session(ses, ctx)) + } + if (!match_session(ses, ctx)) { + spin_unlock(&ses->ses_lock); continue; + } + spin_unlock(&ses->ses_lock); + ++ses->ses_count; spin_unlock(&cifs_tcp_ses_lock); return ses; @@ -1882,26 +1940,28 @@ void cifs_put_smb_ses(struct cifs_ses *ses) unsigned int chan_count; struct TCP_Server_Info *server = ses->server; - spin_lock(&cifs_tcp_ses_lock); + spin_lock(&ses->ses_lock); if (ses->ses_status == SES_EXITING) { - spin_unlock(&cifs_tcp_ses_lock); + spin_unlock(&ses->ses_lock); return; } + spin_unlock(&ses->ses_lock); cifs_dbg(FYI, "%s: ses_count=%d\n", __func__, ses->ses_count); cifs_dbg(FYI, "%s: ses ipc: %s\n", __func__, ses->tcon_ipc ? ses->tcon_ipc->treeName : "NONE"); + spin_lock(&cifs_tcp_ses_lock); if (--ses->ses_count > 0) { spin_unlock(&cifs_tcp_ses_lock); return; } + spin_unlock(&cifs_tcp_ses_lock); /* ses_count can never go negative */ WARN_ON(ses->ses_count < 0); if (ses->ses_status == SES_GOOD) ses->ses_status = SES_EXITING; - spin_unlock(&cifs_tcp_ses_lock); cifs_free_ipc(ses); @@ -2236,6 +2296,7 @@ get_ses_fail: return ERR_PTR(rc); } +/* this function must be called with tc_lock held */ static int match_tcon(struct cifs_tcon *tcon, struct smb3_fs_context *ctx) { if (tcon->status == TID_EXITING) @@ -2258,16 +2319,17 @@ static int match_tcon(struct cifs_tcon *tcon, struct smb3_fs_context *ctx) static struct cifs_tcon * cifs_find_tcon(struct cifs_ses *ses, struct smb3_fs_context *ctx) { - struct list_head *tmp; struct cifs_tcon *tcon; spin_lock(&cifs_tcp_ses_lock); - list_for_each(tmp, &ses->tcon_list) { - tcon = list_entry(tmp, struct cifs_tcon, tcon_list); - - if (!match_tcon(tcon, ctx)) + list_for_each_entry(tcon, &ses->tcon_list, tcon_list) { + spin_lock(&tcon->tc_lock); + if (!match_tcon(tcon, ctx)) { + spin_unlock(&tcon->tc_lock); continue; + } ++tcon->tc_count; + spin_unlock(&tcon->tc_lock); spin_unlock(&cifs_tcp_ses_lock); return tcon; } @@ -2644,7 +2706,7 @@ match_prepath(struct super_block *sb, struct cifs_mnt_data *mnt_data) int cifs_match_super(struct super_block *sb, void *data) { - struct cifs_mnt_data *mnt_data = (struct cifs_mnt_data *)data; + struct cifs_mnt_data *mnt_data = data; struct smb3_fs_context *ctx; struct cifs_sb_info *cifs_sb; struct TCP_Server_Info *tcp_srv; @@ -2667,6 +2729,9 @@ cifs_match_super(struct super_block *sb, void *data) ctx = mnt_data->ctx; + spin_lock(&tcp_srv->srv_lock); + spin_lock(&ses->ses_lock); + spin_lock(&tcon->tc_lock); if (!match_server(tcp_srv, ctx) || !match_session(ses, ctx) || !match_tcon(tcon, ctx) || @@ -2677,6 +2742,10 @@ cifs_match_super(struct super_block *sb, void *data) rc = compare_mount_options(sb, mnt_data); out: + spin_unlock(&tcon->tc_lock); + spin_unlock(&ses->ses_lock); + spin_unlock(&tcp_srv->srv_lock); + spin_unlock(&cifs_tcp_ses_lock); cifs_put_tlink(tlink); return rc; @@ -2954,6 +3023,7 @@ ip_connect(struct TCP_Server_Info *server) return generic_ip_connect(server); } +#ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY void reset_cifs_unix_caps(unsigned int xid, struct cifs_tcon *tcon, struct cifs_sb_info *cifs_sb, struct smb3_fs_context *ctx) { @@ -3059,6 +3129,7 @@ void reset_cifs_unix_caps(unsigned int xid, struct cifs_tcon *tcon, } } } +#endif /* CONFIG_CIFS_ALLOW_INSECURE_LEGACY */ int cifs_setup_cifs_sb(struct cifs_sb_info *cifs_sb) { @@ -3175,6 +3246,7 @@ static int mount_get_conns(struct mount_ctx *mnt_ctx) if (tcon->posix_extensions) cifs_sb->mnt_cifs_flags |= CIFS_MOUNT_POSIX_PATHS; +#ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY /* tell server which Unix caps we support */ if (cap_unix(tcon->ses)) { /* @@ -3182,16 +3254,17 @@ static int mount_get_conns(struct mount_ctx *mnt_ctx) * for just this mount. */ reset_cifs_unix_caps(xid, tcon, cifs_sb, ctx); - spin_lock(&cifs_tcp_ses_lock); + spin_lock(&tcon->ses->server->srv_lock); if ((tcon->ses->server->tcpStatus == CifsNeedReconnect) && (le64_to_cpu(tcon->fsUnixInfo.Capability) & CIFS_UNIX_TRANSPORT_ENCRYPTION_MANDATORY_CAP)) { - spin_unlock(&cifs_tcp_ses_lock); + spin_unlock(&tcon->ses->server->srv_lock); rc = -EACCES; goto out; } - spin_unlock(&cifs_tcp_ses_lock); + spin_unlock(&tcon->ses->server->srv_lock); } else +#endif /* CONFIG_CIFS_ALLOW_INSECURE_LEGACY */ tcon->unix_ext = 0; /* server does not support them */ /* do not care if a following call succeed - informational */ @@ -3273,9 +3346,9 @@ static int mount_get_dfs_conns(struct mount_ctx *mnt_ctx) rc = mount_get_conns(mnt_ctx); if (mnt_ctx->server) { cifs_dbg(FYI, "%s: marking tcp session as a dfs connection\n", __func__); - spin_lock(&cifs_tcp_ses_lock); + spin_lock(&mnt_ctx->server->srv_lock); mnt_ctx->server->is_dfs_conn = true; - spin_unlock(&cifs_tcp_ses_lock); + spin_unlock(&mnt_ctx->server->srv_lock); } return rc; } @@ -3990,28 +4063,28 @@ cifs_negotiate_protocol(const unsigned int xid, struct cifs_ses *ses, return -ENOSYS; /* only send once per connect */ - spin_lock(&cifs_tcp_ses_lock); + spin_lock(&server->srv_lock); if (!server->ops->need_neg(server) || server->tcpStatus != CifsNeedNegotiate) { - spin_unlock(&cifs_tcp_ses_lock); + spin_unlock(&server->srv_lock); return 0; } server->tcpStatus = CifsInNegotiate; - spin_unlock(&cifs_tcp_ses_lock); + spin_unlock(&server->srv_lock); rc = server->ops->negotiate(xid, ses, server); if (rc == 0) { - spin_lock(&cifs_tcp_ses_lock); + spin_lock(&server->srv_lock); if (server->tcpStatus == CifsInNegotiate) server->tcpStatus = CifsGood; else rc = -EHOSTDOWN; - spin_unlock(&cifs_tcp_ses_lock); + spin_unlock(&server->srv_lock); } else { - spin_lock(&cifs_tcp_ses_lock); + spin_lock(&server->srv_lock); if (server->tcpStatus == CifsInNegotiate) server->tcpStatus = CifsNeedNegotiate; - spin_unlock(&cifs_tcp_ses_lock); + spin_unlock(&server->srv_lock); } return rc; @@ -4027,7 +4100,7 @@ cifs_setup_session(const unsigned int xid, struct cifs_ses *ses, struct sockaddr_in *addr = (struct sockaddr_in *)&server->dstaddr; bool is_binding = false; - spin_lock(&cifs_tcp_ses_lock); + spin_lock(&ses->ses_lock); if (server->dstaddr.ss_family == AF_INET6) scnprintf(ses->ip_addr, sizeof(ses->ip_addr), "%pI6", &addr6->sin6_addr); else @@ -4036,7 +4109,7 @@ cifs_setup_session(const unsigned int xid, struct cifs_ses *ses, if (ses->ses_status != SES_GOOD && ses->ses_status != SES_NEW && ses->ses_status != SES_NEED_RECON) { - spin_unlock(&cifs_tcp_ses_lock); + spin_unlock(&ses->ses_lock); return 0; } @@ -4045,7 +4118,7 @@ cifs_setup_session(const unsigned int xid, struct cifs_ses *ses, if (CIFS_ALL_CHANS_GOOD(ses) || cifs_chan_in_reconnect(ses, server)) { spin_unlock(&ses->chan_lock); - spin_unlock(&cifs_tcp_ses_lock); + spin_unlock(&ses->ses_lock); return 0; } is_binding = !CIFS_ALL_CHANS_NEED_RECONNECT(ses); @@ -4054,7 +4127,7 @@ cifs_setup_session(const unsigned int xid, struct cifs_ses *ses, if (!is_binding) ses->ses_status = SES_IN_SETUP; - spin_unlock(&cifs_tcp_ses_lock); + spin_unlock(&ses->ses_lock); if (!is_binding) { ses->capabilities = server->capabilities; @@ -4078,22 +4151,22 @@ cifs_setup_session(const unsigned int xid, struct cifs_ses *ses, if (rc) { cifs_server_dbg(VFS, "Send error in SessSetup = %d\n", rc); - spin_lock(&cifs_tcp_ses_lock); + spin_lock(&ses->ses_lock); if (ses->ses_status == SES_IN_SETUP) ses->ses_status = SES_NEED_RECON; spin_lock(&ses->chan_lock); cifs_chan_clear_in_reconnect(ses, server); spin_unlock(&ses->chan_lock); - spin_unlock(&cifs_tcp_ses_lock); + spin_unlock(&ses->ses_lock); } else { - spin_lock(&cifs_tcp_ses_lock); + spin_lock(&ses->ses_lock); if (ses->ses_status == SES_IN_SETUP) ses->ses_status = SES_GOOD; spin_lock(&ses->chan_lock); cifs_chan_clear_in_reconnect(ses, server); cifs_chan_clear_need_reconnect(ses, server); spin_unlock(&ses->chan_lock); - spin_unlock(&cifs_tcp_ses_lock); + spin_unlock(&ses->ses_lock); } return rc; @@ -4167,8 +4240,10 @@ cifs_construct_tcon(struct cifs_sb_info *cifs_sb, kuid_t fsuid) goto out; } +#ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY if (cap_unix(ses)) reset_cifs_unix_caps(0, tcon, NULL, ctx); +#endif /* CONFIG_CIFS_ALLOW_INSECURE_LEGACY */ out: kfree(ctx->username); @@ -4557,15 +4632,15 @@ int cifs_tree_connect(const unsigned int xid, struct cifs_tcon *tcon, const stru struct dfs_info3_param ref = {0}; /* only send once per connect */ - spin_lock(&cifs_tcp_ses_lock); + spin_lock(&tcon->tc_lock); if (tcon->ses->ses_status != SES_GOOD || (tcon->status != TID_NEW && tcon->status != TID_NEED_TCON)) { - spin_unlock(&cifs_tcp_ses_lock); + spin_unlock(&tcon->tc_lock); return 0; } tcon->status = TID_IN_TCON; - spin_unlock(&cifs_tcp_ses_lock); + spin_unlock(&tcon->tc_lock); tree = kzalloc(MAX_TREE_SIZE, GFP_KERNEL); if (!tree) { @@ -4604,15 +4679,15 @@ out: cifs_put_tcp_super(sb); if (rc) { - spin_lock(&cifs_tcp_ses_lock); + spin_lock(&tcon->tc_lock); if (tcon->status == TID_IN_TCON) tcon->status = TID_NEED_TCON; - spin_unlock(&cifs_tcp_ses_lock); + spin_unlock(&tcon->tc_lock); } else { - spin_lock(&cifs_tcp_ses_lock); + spin_lock(&tcon->tc_lock); if (tcon->status == TID_IN_TCON) tcon->status = TID_GOOD; - spin_unlock(&cifs_tcp_ses_lock); + spin_unlock(&tcon->tc_lock); tcon->need_reconnect = false; } @@ -4625,28 +4700,28 @@ int cifs_tree_connect(const unsigned int xid, struct cifs_tcon *tcon, const stru const struct smb_version_operations *ops = tcon->ses->server->ops; /* only send once per connect */ - spin_lock(&cifs_tcp_ses_lock); + spin_lock(&tcon->tc_lock); if (tcon->ses->ses_status != SES_GOOD || (tcon->status != TID_NEW && tcon->status != TID_NEED_TCON)) { - spin_unlock(&cifs_tcp_ses_lock); + spin_unlock(&tcon->tc_lock); return 0; } tcon->status = TID_IN_TCON; - spin_unlock(&cifs_tcp_ses_lock); + spin_unlock(&tcon->tc_lock); rc = ops->tree_connect(xid, tcon->ses, tcon->treeName, tcon, nlsc); if (rc) { - spin_lock(&cifs_tcp_ses_lock); + spin_lock(&tcon->tc_lock); if (tcon->status == TID_IN_TCON) tcon->status = TID_NEED_TCON; - spin_unlock(&cifs_tcp_ses_lock); + spin_unlock(&tcon->tc_lock); } else { - spin_lock(&cifs_tcp_ses_lock); + spin_lock(&tcon->tc_lock); if (tcon->status == TID_IN_TCON) tcon->status = TID_GOOD; - spin_unlock(&cifs_tcp_ses_lock); tcon->need_reconnect = false; + spin_unlock(&tcon->tc_lock); } return rc; diff --git a/fs/cifs/dfs_cache.c b/fs/cifs/dfs_cache.c index 34a8f3baed5e..a9b6c3eba6de 100644 --- a/fs/cifs/dfs_cache.c +++ b/fs/cifs/dfs_cache.c @@ -1526,15 +1526,21 @@ static void refresh_mounts(struct cifs_ses **sessions) spin_lock(&cifs_tcp_ses_lock); list_for_each_entry(server, &cifs_tcp_ses_list, tcp_ses_list) { - if (!server->is_dfs_conn) + spin_lock(&server->srv_lock); + if (!server->is_dfs_conn) { + spin_unlock(&server->srv_lock); continue; + } + spin_unlock(&server->srv_lock); list_for_each_entry(ses, &server->smb_ses_list, smb_ses_list) { list_for_each_entry(tcon, &ses->tcon_list, tcon_list) { + spin_lock(&tcon->tc_lock); if (!tcon->ipc && !tcon->need_reconnect) { tcon->tc_count++; list_add_tail(&tcon->ulist, &tcons); } + spin_unlock(&tcon->tc_lock); } } } diff --git a/fs/cifs/dir.c b/fs/cifs/dir.c index ce9b22aecfba..08f7392716e2 100644 --- a/fs/cifs/dir.c +++ b/fs/cifs/dir.c @@ -193,6 +193,7 @@ cifs_do_create(struct inode *inode, struct dentry *direntry, unsigned int xid, return PTR_ERR(full_path); } +#ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY if (tcon->unix_ext && cap_unix(tcon->ses) && !tcon->broken_posix_open && (CIFS_UNIX_POSIX_PATH_OPS_CAP & le64_to_cpu(tcon->fsUnixInfo.Capability))) { @@ -261,6 +262,7 @@ cifs_do_create(struct inode *inode, struct dentry *direntry, unsigned int xid, * rare for path not covered on files) */ } +#endif /* CONFIG_CIFS_ALLOW_INSECURE_LEGACY */ desired_access = 0; if (OPEN_FMODE(oflags) & FMODE_READ) @@ -316,6 +318,7 @@ cifs_do_create(struct inode *inode, struct dentry *direntry, unsigned int xid, goto out; } +#ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY /* * If Open reported that we actually created a file then we now have to * set the mode if possible. @@ -357,6 +360,9 @@ cifs_create_get_file_info: rc = cifs_get_inode_info_unix(&newinode, full_path, inode->i_sb, xid); else { +#else + { +#endif /* CONFIG_CIFS_ALLOW_INSECURE_LEGACY */ /* TODO: Add support for calling POSIX query info here, but passing in fid */ rc = cifs_get_inode_info(&newinode, full_path, buf, inode->i_sb, xid, fid); @@ -377,7 +383,9 @@ cifs_create_get_file_info: } } +#ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY cifs_create_set_dentry: +#endif /* CONFIG_CIFS_ALLOW_INSECURE_LEGACY */ if (rc != 0) { cifs_dbg(FYI, "Create worked, get_inode_info failed rc = %d\n", rc); diff --git a/fs/cifs/file.c b/fs/cifs/file.c index e64cda7a7610..85f2abcb2795 100644 --- a/fs/cifs/file.c +++ b/fs/cifs/file.c @@ -26,6 +26,7 @@ #include "cifspdu.h" #include "cifsglob.h" #include "cifsproto.h" +#include "smb2proto.h" #include "cifs_unicode.h" #include "cifs_debug.h" #include "cifs_fs_sb.h" @@ -34,6 +35,53 @@ #include "fs_context.h" #include "cifs_ioctl.h" +/* + * Mark as invalid, all open files on tree connections since they + * were closed when session to server was lost. + */ +void +cifs_mark_open_files_invalid(struct cifs_tcon *tcon) +{ + struct cifsFileInfo *open_file = NULL; + struct list_head *tmp; + struct list_head *tmp1; + + /* only send once per connect */ + spin_lock(&tcon->ses->ses_lock); + if ((tcon->ses->ses_status != SES_GOOD) || (tcon->status != TID_NEED_RECON)) { + spin_unlock(&tcon->ses->ses_lock); + return; + } + tcon->status = TID_IN_FILES_INVALIDATE; + spin_unlock(&tcon->ses->ses_lock); + + /* list all files open on tree connection and mark them invalid */ + spin_lock(&tcon->open_file_lock); + list_for_each_safe(tmp, tmp1, &tcon->openFileList) { + open_file = list_entry(tmp, struct cifsFileInfo, tlist); + open_file->invalidHandle = true; + open_file->oplock_break_cancelled = true; + } + spin_unlock(&tcon->open_file_lock); + + mutex_lock(&tcon->crfid.fid_mutex); + tcon->crfid.is_valid = false; + /* cached handle is not valid, so SMB2_CLOSE won't be sent below */ + close_cached_dir_lease_locked(&tcon->crfid); + memset(tcon->crfid.fid, 0, sizeof(struct cifs_fid)); + mutex_unlock(&tcon->crfid.fid_mutex); + + spin_lock(&tcon->tc_lock); + if (tcon->status == TID_IN_FILES_INVALIDATE) + tcon->status = TID_NEED_TCON; + spin_unlock(&tcon->tc_lock); + + /* + * BB Add call to invalidate_inodes(sb) for all superblocks mounted + * to this tcon. + */ +} + static inline int cifs_convert_flags(unsigned int flags) { if ((flags & O_ACCMODE) == O_RDONLY) @@ -52,6 +100,7 @@ static inline int cifs_convert_flags(unsigned int flags) FILE_READ_DATA); } +#ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY static u32 cifs_posix_convert_flags(unsigned int flags) { u32 posix_flags = 0; @@ -85,6 +134,7 @@ static u32 cifs_posix_convert_flags(unsigned int flags) return posix_flags; } +#endif /* CONFIG_CIFS_ALLOW_INSECURE_LEGACY */ static inline int cifs_get_disposition(unsigned int flags) { @@ -100,6 +150,7 @@ static inline int cifs_get_disposition(unsigned int flags) return FILE_OPEN; } +#ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY int cifs_posix_open(const char *full_path, struct inode **pinode, struct super_block *sb, int mode, unsigned int f_flags, __u32 *poplock, __u16 *pnetfid, unsigned int xid) @@ -161,6 +212,7 @@ posix_open_ret: kfree(presp_data); return rc; } +#endif /* CONFIG_CIFS_ALLOW_INSECURE_LEGACY */ static int cifs_nt_open(const char *full_path, struct inode *inode, struct cifs_sb_info *cifs_sb, @@ -579,6 +631,7 @@ int cifs_open(struct inode *inode, struct file *file) else oplock = 0; +#ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY if (!tcon->broken_posix_open && tcon->unix_ext && cap_unix(tcon->ses) && (CIFS_UNIX_POSIX_PATH_OPS_CAP & le64_to_cpu(tcon->fsUnixInfo.Capability))) { @@ -603,6 +656,7 @@ int cifs_open(struct inode *inode, struct file *file) * or DFS errors. */ } +#endif /* CONFIG_CIFS_ALLOW_INSECURE_LEGACY */ if (server->ops->get_lease_key) server->ops->get_lease_key(inode, &fid); @@ -630,6 +684,7 @@ int cifs_open(struct inode *inode, struct file *file) goto out; } +#ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY if ((oplock & CIFS_CREATE_ACTION) && !posix_open_ok && tcon->unix_ext) { /* * Time to set mode which we can not set earlier due to @@ -647,6 +702,7 @@ int cifs_open(struct inode *inode, struct file *file) CIFSSMBUnixSetFileInfo(xid, tcon, &args, fid.netfid, cfile->pid); } +#endif /* CONFIG_CIFS_ALLOW_INSECURE_LEGACY */ use_cache: fscache_use_cookie(cifs_inode_cookie(file_inode(file)), @@ -664,7 +720,9 @@ out: return rc; } +#ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY static int cifs_push_posix_locks(struct cifsFileInfo *cfile); +#endif /* CONFIG_CIFS_ALLOW_INSECURE_LEGACY */ /* * Try to reacquire byte range locks that were released when session @@ -673,10 +731,12 @@ static int cifs_push_posix_locks(struct cifsFileInfo *cfile); static int cifs_relock_file(struct cifsFileInfo *cfile) { - struct cifs_sb_info *cifs_sb = CIFS_SB(cfile->dentry->d_sb); struct cifsInodeInfo *cinode = CIFS_I(d_inode(cfile->dentry)); struct cifs_tcon *tcon = tlink_tcon(cfile->tlink); int rc = 0; +#ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY + struct cifs_sb_info *cifs_sb = CIFS_SB(cfile->dentry->d_sb); +#endif /* CONFIG_CIFS_ALLOW_INSECURE_LEGACY */ down_read_nested(&cinode->lock_sem, SINGLE_DEPTH_NESTING); if (cinode->can_cache_brlcks) { @@ -685,11 +745,13 @@ cifs_relock_file(struct cifsFileInfo *cfile) return rc; } +#ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY if (cap_unix(tcon->ses) && (CIFS_UNIX_FCNTL_CAP & le64_to_cpu(tcon->fsUnixInfo.Capability)) && ((cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOPOSIXBRL) == 0)) rc = cifs_push_posix_locks(cfile); else +#endif /* CONFIG_CIFS_ALLOW_INSECURE_LEGACY */ rc = tcon->ses->server->ops->push_mand_locks(cfile); up_read(&cinode->lock_sem); @@ -750,6 +812,7 @@ cifs_reopen_file(struct cifsFileInfo *cfile, bool can_flush) else oplock = 0; +#ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY if (tcon->unix_ext && cap_unix(tcon->ses) && (CIFS_UNIX_POSIX_PATH_OPS_CAP & le64_to_cpu(tcon->fsUnixInfo.Capability))) { @@ -773,6 +836,7 @@ cifs_reopen_file(struct cifsFileInfo *cfile, bool can_flush) * in the reconnect path it is important to retry hard */ } +#endif /* CONFIG_CIFS_ALLOW_INSECURE_LEGACY */ desired_access = cifs_convert_flags(cfile->f_flags); @@ -817,7 +881,9 @@ cifs_reopen_file(struct cifsFileInfo *cfile, bool can_flush) goto reopen_error_exit; } +#ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY reopen_success: +#endif /* CONFIG_CIFS_ALLOW_INSECURE_LEGACY */ cfile->invalidHandle = false; mutex_unlock(&cfile->fh_mutex); cinode = CIFS_I(inode); @@ -928,9 +994,7 @@ int cifs_close(struct inode *inode, struct file *file) void cifs_reopen_persistent_handles(struct cifs_tcon *tcon) { - struct cifsFileInfo *open_file; - struct list_head *tmp; - struct list_head *tmp1; + struct cifsFileInfo *open_file, *tmp; struct list_head tmp_list; if (!tcon->use_persistent || !tcon->need_reopen_files) @@ -943,8 +1007,7 @@ cifs_reopen_persistent_handles(struct cifs_tcon *tcon) /* list all files open on tree connection, reopen resilient handles */ spin_lock(&tcon->open_file_lock); - list_for_each(tmp, &tcon->openFileList) { - open_file = list_entry(tmp, struct cifsFileInfo, tlist); + list_for_each_entry(open_file, &tcon->openFileList, tlist) { if (!open_file->invalidHandle) continue; cifsFileInfo_get(open_file); @@ -952,8 +1015,7 @@ cifs_reopen_persistent_handles(struct cifs_tcon *tcon) } spin_unlock(&tcon->open_file_lock); - list_for_each_safe(tmp, tmp1, &tmp_list) { - open_file = list_entry(tmp, struct cifsFileInfo, rlist); + list_for_each_entry_safe(open_file, tmp, &tmp_list, rlist) { if (cifs_reopen_file(open_file, false /* do not flush */)) tcon->need_reopen_files = true; list_del_init(&open_file->rlist); @@ -1196,6 +1258,7 @@ try_again: return rc; } +#ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY /* * Check if there is another lock that prevents us to set the lock (posix * style). If such a lock exists, update the flock structure with its @@ -1334,6 +1397,7 @@ hash_lockowner(fl_owner_t owner) { return cifs_lock_secret ^ hash32_ptr((const void *)owner); } +#endif /* CONFIG_CIFS_ALLOW_INSECURE_LEGACY */ struct lock_to_push { struct list_head llist; @@ -1344,6 +1408,7 @@ struct lock_to_push { __u8 type; }; +#ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY static int cifs_push_posix_locks(struct cifsFileInfo *cfile) { @@ -1431,14 +1496,17 @@ err_out: } goto out; } +#endif /* CONFIG_CIFS_ALLOW_INSECURE_LEGACY */ static int cifs_push_locks(struct cifsFileInfo *cfile) { - struct cifs_sb_info *cifs_sb = CIFS_SB(cfile->dentry->d_sb); struct cifsInodeInfo *cinode = CIFS_I(d_inode(cfile->dentry)); struct cifs_tcon *tcon = tlink_tcon(cfile->tlink); int rc = 0; +#ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY + struct cifs_sb_info *cifs_sb = CIFS_SB(cfile->dentry->d_sb); +#endif /* CONFIG_CIFS_ALLOW_INSECURE_LEGACY */ /* we are going to update can_cache_brlcks here - need a write access */ cifs_down_write(&cinode->lock_sem); @@ -1447,11 +1515,13 @@ cifs_push_locks(struct cifsFileInfo *cfile) return rc; } +#ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY if (cap_unix(tcon->ses) && (CIFS_UNIX_FCNTL_CAP & le64_to_cpu(tcon->fsUnixInfo.Capability)) && ((cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOPOSIXBRL) == 0)) rc = cifs_push_posix_locks(cfile); else +#endif /* CONFIG_CIFS_ALLOW_INSECURE_LEGACY */ rc = tcon->ses->server->ops->push_mand_locks(cfile); cinode->can_cache_brlcks = false; @@ -1515,6 +1585,7 @@ cifs_getlk(struct file *file, struct file_lock *flock, __u32 type, struct cifsFileInfo *cfile = (struct cifsFileInfo *)file->private_data; struct cifs_tcon *tcon = tlink_tcon(cfile->tlink); struct TCP_Server_Info *server = tcon->ses->server; +#ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY __u16 netfid = cfile->fid.netfid; if (posix_lck) { @@ -1534,6 +1605,7 @@ cifs_getlk(struct file *file, struct file_lock *flock, __u32 type, posix_lock_type, wait_flag); return rc; } +#endif /* CONFIG_CIFS_ALLOW_INSECURE_LEGACY */ rc = cifs_lock_test(cfile, flock->fl_start, length, type, flock); if (!rc) @@ -1594,6 +1666,7 @@ cifs_free_llist(struct list_head *llist) } } +#ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY int cifs_unlock_range(struct cifsFileInfo *cfile, struct file_lock *flock, unsigned int xid) @@ -1706,6 +1779,7 @@ cifs_unlock_range(struct cifsFileInfo *cfile, struct file_lock *flock, kfree(buf); return rc; } +#endif /* CONFIG_CIFS_ALLOW_INSECURE_LEGACY */ static int cifs_setlk(struct file *file, struct file_lock *flock, __u32 type, @@ -1719,6 +1793,7 @@ cifs_setlk(struct file *file, struct file_lock *flock, __u32 type, struct TCP_Server_Info *server = tcon->ses->server; struct inode *inode = d_inode(cfile->dentry); +#ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY if (posix_lck) { int posix_lock_type; @@ -1740,7 +1815,7 @@ cifs_setlk(struct file *file, struct file_lock *flock, __u32 type, NULL, posix_lock_type, wait_flag); goto out; } - +#endif /* CONFIG_CIFS_ALLOW_INSECURE_LEGACY */ if (lock) { struct cifsLockInfo *lock; @@ -2204,6 +2279,185 @@ cifs_get_readable_path(struct cifs_tcon *tcon, const char *name, return -ENOENT; } +void +cifs_writedata_release(struct kref *refcount) +{ + struct cifs_writedata *wdata = container_of(refcount, + struct cifs_writedata, refcount); +#ifdef CONFIG_CIFS_SMB_DIRECT + if (wdata->mr) { + smbd_deregister_mr(wdata->mr); + wdata->mr = NULL; + } +#endif + + if (wdata->cfile) + cifsFileInfo_put(wdata->cfile); + + kvfree(wdata->pages); + kfree(wdata); +} + +/* + * Write failed with a retryable error. Resend the write request. It's also + * possible that the page was redirtied so re-clean the page. + */ +static void +cifs_writev_requeue(struct cifs_writedata *wdata) +{ + int i, rc = 0; + struct inode *inode = d_inode(wdata->cfile->dentry); + struct TCP_Server_Info *server; + unsigned int rest_len; + + server = tlink_tcon(wdata->cfile->tlink)->ses->server; + i = 0; + rest_len = wdata->bytes; + do { + struct cifs_writedata *wdata2; + unsigned int j, nr_pages, wsize, tailsz, cur_len; + + wsize = server->ops->wp_retry_size(inode); + if (wsize < rest_len) { + nr_pages = wsize / PAGE_SIZE; + if (!nr_pages) { + rc = -EOPNOTSUPP; + break; + } + cur_len = nr_pages * PAGE_SIZE; + tailsz = PAGE_SIZE; + } else { + nr_pages = DIV_ROUND_UP(rest_len, PAGE_SIZE); + cur_len = rest_len; + tailsz = rest_len - (nr_pages - 1) * PAGE_SIZE; + } + + wdata2 = cifs_writedata_alloc(nr_pages, cifs_writev_complete); + if (!wdata2) { + rc = -ENOMEM; + break; + } + + for (j = 0; j < nr_pages; j++) { + wdata2->pages[j] = wdata->pages[i + j]; + lock_page(wdata2->pages[j]); + clear_page_dirty_for_io(wdata2->pages[j]); + } + + wdata2->sync_mode = wdata->sync_mode; + wdata2->nr_pages = nr_pages; + wdata2->offset = page_offset(wdata2->pages[0]); + wdata2->pagesz = PAGE_SIZE; + wdata2->tailsz = tailsz; + wdata2->bytes = cur_len; + + rc = cifs_get_writable_file(CIFS_I(inode), FIND_WR_ANY, + &wdata2->cfile); + if (!wdata2->cfile) { + cifs_dbg(VFS, "No writable handle to retry writepages rc=%d\n", + rc); + if (!is_retryable_error(rc)) + rc = -EBADF; + } else { + wdata2->pid = wdata2->cfile->pid; + rc = server->ops->async_writev(wdata2, + cifs_writedata_release); + } + + for (j = 0; j < nr_pages; j++) { + unlock_page(wdata2->pages[j]); + if (rc != 0 && !is_retryable_error(rc)) { + SetPageError(wdata2->pages[j]); + end_page_writeback(wdata2->pages[j]); + put_page(wdata2->pages[j]); + } + } + + kref_put(&wdata2->refcount, cifs_writedata_release); + if (rc) { + if (is_retryable_error(rc)) + continue; + i += nr_pages; + break; + } + + rest_len -= cur_len; + i += nr_pages; + } while (i < wdata->nr_pages); + + /* cleanup remaining pages from the original wdata */ + for (; i < wdata->nr_pages; i++) { + SetPageError(wdata->pages[i]); + end_page_writeback(wdata->pages[i]); + put_page(wdata->pages[i]); + } + + if (rc != 0 && !is_retryable_error(rc)) + mapping_set_error(inode->i_mapping, rc); + kref_put(&wdata->refcount, cifs_writedata_release); +} + +void +cifs_writev_complete(struct work_struct *work) +{ + struct cifs_writedata *wdata = container_of(work, + struct cifs_writedata, work); + struct inode *inode = d_inode(wdata->cfile->dentry); + int i = 0; + + if (wdata->result == 0) { + spin_lock(&inode->i_lock); + cifs_update_eof(CIFS_I(inode), wdata->offset, wdata->bytes); + spin_unlock(&inode->i_lock); + cifs_stats_bytes_written(tlink_tcon(wdata->cfile->tlink), + wdata->bytes); + } else if (wdata->sync_mode == WB_SYNC_ALL && wdata->result == -EAGAIN) + return cifs_writev_requeue(wdata); + + for (i = 0; i < wdata->nr_pages; i++) { + struct page *page = wdata->pages[i]; + + if (wdata->result == -EAGAIN) + __set_page_dirty_nobuffers(page); + else if (wdata->result < 0) + SetPageError(page); + end_page_writeback(page); + cifs_readpage_to_fscache(inode, page); + put_page(page); + } + if (wdata->result != -EAGAIN) + mapping_set_error(inode->i_mapping, wdata->result); + kref_put(&wdata->refcount, cifs_writedata_release); +} + +struct cifs_writedata * +cifs_writedata_alloc(unsigned int nr_pages, work_func_t complete) +{ + struct page **pages = + kcalloc(nr_pages, sizeof(struct page *), GFP_NOFS); + if (pages) + return cifs_writedata_direct_alloc(pages, complete); + + return NULL; +} + +struct cifs_writedata * +cifs_writedata_direct_alloc(struct page **pages, work_func_t complete) +{ + struct cifs_writedata *wdata; + + wdata = kzalloc(sizeof(*wdata), GFP_NOFS); + if (wdata != NULL) { + wdata->pages = pages; + kref_init(&wdata->refcount); + INIT_LIST_HEAD(&wdata->list); + init_completion(&wdata->done); + INIT_WORK(&wdata->work, complete); + } + return wdata; +} + + static int cifs_partialpagewrite(struct page *page, unsigned from, unsigned to) { struct address_space *mapping = page->mapping; @@ -4459,10 +4713,11 @@ static void cifs_readahead(struct readahead_control *ractl) * TODO: Send a whole batch of pages to be read * by the cache. */ - page = readahead_page(ractl); - last_batch_size = 1 << thp_order(page); + struct folio *folio = readahead_folio(ractl); + + last_batch_size = folio_nr_pages(folio); if (cifs_readpage_from_fscache(ractl->mapping->host, - page) < 0) { + &folio->page) < 0) { /* * TODO: Deal with cache read failure * here, but for the moment, delegate @@ -4470,7 +4725,7 @@ static void cifs_readahead(struct readahead_control *ractl) */ caching = false; } - unlock_page(page); + folio_unlock(folio); next_cached++; cache_nr_pages--; if (cache_nr_pages == 0) diff --git a/fs/cifs/inode.c b/fs/cifs/inode.c index 81da81e18553..eeeaba3dec05 100644 --- a/fs/cifs/inode.c +++ b/fs/cifs/inode.c @@ -339,6 +339,7 @@ cifs_create_dfs_fattr(struct cifs_fattr *fattr, struct super_block *sb) fattr->cf_flags = CIFS_FATTR_DFS_REFERRAL; } +#ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY static int cifs_get_file_info_unix(struct file *filp) { @@ -432,6 +433,14 @@ int cifs_get_inode_info_unix(struct inode **pinode, cgiiu_exit: return rc; } +#else +int cifs_get_inode_info_unix(struct inode **pinode, + const unsigned char *full_path, + struct super_block *sb, unsigned int xid) +{ + return -EOPNOTSUPP; +} +#endif /* CONFIG_CIFS_ALLOW_INSECURE_LEGACY */ static int cifs_sfu_type(struct cifs_fattr *fattr, const char *path, @@ -795,6 +804,7 @@ static __u64 simple_hashstr(const char *str) return hash; } +#ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY /** * cifs_backup_query_path_info - SMB1 fallback code to get ino * @@ -847,6 +857,7 @@ cifs_backup_query_path_info(int xid, *data = (FILE_ALL_INFO *)info.srch_entries_start; return 0; } +#endif /* CONFIG_CIFS_ALLOW_INSECURE_LEGACY */ static void cifs_set_fattr_ino(int xid, @@ -991,6 +1002,7 @@ cifs_get_inode_info(struct inode **inode, rc = 0; break; case -EACCES: +#ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY /* * perm errors, try again with backup flags if possible * @@ -1022,6 +1034,9 @@ cifs_get_inode_info(struct inode **inode, /* nothing we can do, bail out */ goto out; } +#else + goto out; +#endif /* CONFIG_CIFS_ALLOW_INSECURE_LEGACY */ break; default: cifs_dbg(FYI, "%s: unhandled err rc %d\n", __func__, rc); @@ -1037,8 +1052,9 @@ cifs_get_inode_info(struct inode **inode, /* * 4. Tweak fattr based on mount options */ - +#ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY handle_mnt_opt: +#endif /* CONFIG_CIFS_ALLOW_INSECURE_LEGACY */ /* query for SFU type info if supported and needed */ if (fattr.cf_cifsattrs & ATTR_SYSTEM && cifs_sb->mnt_cifs_flags & CIFS_MOUNT_UNX_EMUL) { @@ -1223,7 +1239,7 @@ static const struct inode_operations cifs_ipc_inode_ops = { static int cifs_find_inode(struct inode *inode, void *opaque) { - struct cifs_fattr *fattr = (struct cifs_fattr *) opaque; + struct cifs_fattr *fattr = opaque; /* don't match inode with different uniqueid */ if (CIFS_I(inode)->uniqueid != fattr->cf_uniqueid) @@ -1247,7 +1263,7 @@ cifs_find_inode(struct inode *inode, void *opaque) static int cifs_init_inode(struct inode *inode, void *opaque) { - struct cifs_fattr *fattr = (struct cifs_fattr *) opaque; + struct cifs_fattr *fattr = opaque; CIFS_I(inode)->uniqueid = fattr->cf_uniqueid; CIFS_I(inode)->createtime = fattr->cf_createtime; @@ -1435,6 +1451,7 @@ cifs_set_file_info(struct inode *inode, struct iattr *attrs, unsigned int xid, return server->ops->set_file_info(inode, full_path, &info_buf, xid); } +#ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY /* * Open the given file (if it isn't already), set the DELETE_ON_CLOSE bit * and rename it to a random name that hopefully won't conflict with @@ -1565,6 +1582,7 @@ undo_setattr: goto out_close; } +#endif /* CONFIG_CIFS_ALLOW_INSECURE_LEGACY */ /* copied from fs/nfs/dir.c with small changes */ static void @@ -1627,6 +1645,7 @@ int cifs_unlink(struct inode *dir, struct dentry *dentry) } cifs_close_deferred_file_under_dentry(tcon, full_path); +#ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY if (cap_unix(tcon->ses) && (CIFS_UNIX_POSIX_PATH_OPS_CAP & le64_to_cpu(tcon->fsUnixInfo.Capability))) { rc = CIFSPOSIXDelFile(xid, tcon, full_path, @@ -1636,6 +1655,7 @@ int cifs_unlink(struct inode *dir, struct dentry *dentry) if ((rc == 0) || (rc == -ENOENT)) goto psx_del_no_retry; } +#endif /* CONFIG_CIFS_ALLOW_INSECURE_LEGACY */ retry_std_delete: if (!server->ops->unlink) { @@ -1714,9 +1734,11 @@ cifs_mkdir_qinfo(struct inode *parent, struct dentry *dentry, umode_t mode, if (tcon->posix_extensions) rc = smb311_posix_get_inode_info(&inode, full_path, parent->i_sb, xid); +#ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY else if (tcon->unix_ext) rc = cifs_get_inode_info_unix(&inode, full_path, parent->i_sb, xid); +#endif /* CONFIG_CIFS_ALLOW_INSECURE_LEGACY */ else rc = cifs_get_inode_info(&inode, full_path, NULL, parent->i_sb, xid, NULL); @@ -1746,6 +1768,7 @@ cifs_mkdir_qinfo(struct inode *parent, struct dentry *dentry, umode_t mode, if (parent->i_mode & S_ISGID) mode |= S_ISGID; +#ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY if (tcon->unix_ext) { struct cifs_unix_set_info_args args = { .mode = mode, @@ -1768,6 +1791,9 @@ cifs_mkdir_qinfo(struct inode *parent, struct dentry *dentry, umode_t mode, cifs_sb->local_nls, cifs_remap(cifs_sb)); } else { +#else + { +#endif /* CONFIG_CIFS_ALLOW_INSECURE_LEGACY */ struct TCP_Server_Info *server = tcon->ses->server; if (!(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_CIFS_ACL) && (mode & S_IWUGO) == 0 && server->ops->mkdir_setinfo) @@ -1788,6 +1814,7 @@ cifs_mkdir_qinfo(struct inode *parent, struct dentry *dentry, umode_t mode, return 0; } +#ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY static int cifs_posix_mkdir(struct inode *inode, struct dentry *dentry, umode_t mode, const char *full_path, struct cifs_sb_info *cifs_sb, @@ -1850,6 +1877,7 @@ posix_mkdir_get_info: xid); goto posix_mkdir_out; } +#endif /* CONFIG_CIFS_ALLOW_INSECURE_LEGACY */ int cifs_mkdir(struct user_namespace *mnt_userns, struct inode *inode, struct dentry *direntry, umode_t mode) @@ -1892,6 +1920,7 @@ int cifs_mkdir(struct user_namespace *mnt_userns, struct inode *inode, goto mkdir_out; } +#ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY if (cap_unix(tcon->ses) && (CIFS_UNIX_POSIX_PATH_OPS_CAP & le64_to_cpu(tcon->fsUnixInfo.Capability))) { rc = cifs_posix_mkdir(inode, direntry, mode, full_path, cifs_sb, @@ -1899,6 +1928,7 @@ int cifs_mkdir(struct user_namespace *mnt_userns, struct inode *inode, if (rc != -EOPNOTSUPP) goto mkdir_out; } +#endif /* CONFIG_CIFS_ALLOW_INSECURE_LEGACY */ if (!server->ops->mkdir) { rc = -ENOSYS; @@ -2015,9 +2045,12 @@ cifs_do_rename(const unsigned int xid, struct dentry *from_dentry, struct tcon_link *tlink; struct cifs_tcon *tcon; struct TCP_Server_Info *server; +#ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY struct cifs_fid fid; struct cifs_open_parms oparms; - int oplock, rc; + int oplock; +#endif /* CONFIG_CIFS_ALLOW_INSECURE_LEGACY */ + int rc; tlink = cifs_sb_tlink(cifs_sb); if (IS_ERR(tlink)) @@ -2043,6 +2076,7 @@ cifs_do_rename(const unsigned int xid, struct dentry *from_dentry, if (server->vals->protocol_id != 0) goto do_rename_exit; +#ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY /* open-file renames don't work across directories */ if (to_dentry->d_parent != from_dentry->d_parent) goto do_rename_exit; @@ -2064,6 +2098,7 @@ cifs_do_rename(const unsigned int xid, struct dentry *from_dentry, cifs_sb->local_nls, cifs_remap(cifs_sb)); CIFSSMBClose(xid, tcon, fid.netfid); } +#endif /* CONFIG_CIFS_ALLOW_INSECURE_LEGACY */ do_rename_exit: if (rc == 0) d_move(from_dentry, to_dentry); @@ -2081,11 +2116,13 @@ cifs_rename2(struct user_namespace *mnt_userns, struct inode *source_dir, struct cifs_sb_info *cifs_sb; struct tcon_link *tlink; struct cifs_tcon *tcon; - FILE_UNIX_BASIC_INFO *info_buf_source = NULL; - FILE_UNIX_BASIC_INFO *info_buf_target; unsigned int xid; int rc, tmprc; int retry_count = 0; + FILE_UNIX_BASIC_INFO *info_buf_source = NULL; +#ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY + FILE_UNIX_BASIC_INFO *info_buf_target; +#endif /* CONFIG_CIFS_ALLOW_INSECURE_LEGACY */ if (flags & ~RENAME_NOREPLACE) return -EINVAL; @@ -2139,6 +2176,7 @@ cifs_rename2(struct user_namespace *mnt_userns, struct inode *source_dir, if (flags & RENAME_NOREPLACE) goto cifs_rename_exit; +#ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY if (rc == -EEXIST && tcon->unix_ext) { /* * Are src and dst hardlinks of same inode? We can only tell @@ -2178,6 +2216,8 @@ cifs_rename2(struct user_namespace *mnt_userns, struct inode *source_dir, */ unlink_target: +#endif /* CONFIG_CIFS_ALLOW_INSECURE_LEGACY */ + /* Try unlinking the target dentry if it's not negative */ if (d_really_is_positive(target_dentry) && (rc == -EACCES || rc == -EEXIST)) { if (d_is_dir(target_dentry)) @@ -2337,14 +2377,18 @@ int cifs_revalidate_file_attr(struct file *filp) { int rc = 0; struct dentry *dentry = file_dentry(filp); +#ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY struct cifsFileInfo *cfile = (struct cifsFileInfo *) filp->private_data; +#endif /* CONFIG_CIFS_ALLOW_INSECURE_LEGACY */ if (!cifs_dentry_needs_reval(dentry)) return rc; +#ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY if (tlink_tcon(cfile->tlink)->unix_ext) rc = cifs_get_file_info_unix(filp); else +#endif /* CONFIG_CIFS_ALLOW_INSECURE_LEGACY */ rc = cifs_get_file_info(filp); return rc; @@ -2653,6 +2697,7 @@ set_size_out: return rc; } +#ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY static int cifs_setattr_unix(struct dentry *direntry, struct iattr *attrs) { @@ -2800,6 +2845,7 @@ out: free_xid(xid); return rc; } +#endif /* CONFIG_CIFS_ALLOW_INSECURE_LEGACY */ static int cifs_setattr_nounix(struct dentry *direntry, struct iattr *attrs) @@ -2995,16 +3041,20 @@ cifs_setattr(struct user_namespace *mnt_userns, struct dentry *direntry, struct iattr *attrs) { struct cifs_sb_info *cifs_sb = CIFS_SB(direntry->d_sb); - struct cifs_tcon *pTcon = cifs_sb_master_tcon(cifs_sb); int rc, retries = 0; +#ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY + struct cifs_tcon *pTcon = cifs_sb_master_tcon(cifs_sb); +#endif /* CONFIG_CIFS_ALLOW_INSECURE_LEGACY */ if (unlikely(cifs_forced_shutdown(cifs_sb))) return -EIO; do { +#ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY if (pTcon->unix_ext) rc = cifs_setattr_unix(direntry, attrs); else +#endif /* CONFIG_CIFS_ALLOW_INSECURE_LEGACY */ rc = cifs_setattr_nounix(direntry, attrs); retries++; } while (is_retryable_error(rc) && retries < 2); diff --git a/fs/cifs/ioctl.c b/fs/cifs/ioctl.c index 0359b604bdbc..b6e6e5d6c8dd 100644 --- a/fs/cifs/ioctl.c +++ b/fs/cifs/ioctl.c @@ -333,6 +333,7 @@ long cifs_ioctl(struct file *filep, unsigned int command, unsigned long arg) tcon = tlink_tcon(pSMBFile->tlink); caps = le64_to_cpu(tcon->fsUnixInfo.Capability); #ifdef CONFIG_CIFS_POSIX +#ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY if (CIFS_UNIX_EXTATTR_CAP & caps) { __u64 ExtAttrMask = 0; rc = CIFSGetExtAttr(xid, tcon, @@ -345,6 +346,7 @@ long cifs_ioctl(struct file *filep, unsigned int command, unsigned long arg) if (rc != EOPNOTSUPP) break; } +#endif /* CONFIG_CIFS_ALLOW_INSECURE_LEGACY */ #endif /* CONFIG_CIFS_POSIX */ rc = 0; if (CIFS_I(inode)->cifsAttrs & ATTR_COMPRESSED) { diff --git a/fs/cifs/link.c b/fs/cifs/link.c index bbdf3281559c..6803cb27eecc 100644 --- a/fs/cifs/link.c +++ b/fs/cifs/link.c @@ -286,6 +286,7 @@ out: return rc; } +#ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY /* * SMB 1.0 Protocol specific functions */ @@ -368,6 +369,7 @@ cifs_create_mf_symlink(unsigned int xid, struct cifs_tcon *tcon, CIFSSMBClose(xid, tcon, fid.netfid); return rc; } +#endif /* CONFIG_CIFS_ALLOW_INSECURE_LEGACY */ /* * SMB 2.1/SMB3 Protocol specific functions @@ -532,11 +534,15 @@ cifs_hardlink(struct dentry *old_file, struct inode *inode, goto cifs_hl_exit; } +#ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY if (tcon->unix_ext) rc = CIFSUnixCreateHardLink(xid, tcon, from_name, to_name, cifs_sb->local_nls, cifs_remap(cifs_sb)); else { +#else + { +#endif /* CONFIG_CIFS_ALLOW_INSECURE_LEGACY */ server = tcon->ses->server; if (!server->ops->create_hardlink) { rc = -ENOSYS; @@ -704,10 +710,12 @@ cifs_symlink(struct user_namespace *mnt_userns, struct inode *inode, /* BB what if DFS and this volume is on different share? BB */ if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MF_SYMLINKS) rc = create_mf_symlink(xid, pTcon, cifs_sb, full_path, symname); +#ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY else if (pTcon->unix_ext) rc = CIFSUnixCreateSymLink(xid, pTcon, full_path, symname, cifs_sb->local_nls, cifs_remap(cifs_sb)); +#endif /* CONFIG_CIFS_ALLOW_INSECURE_LEGACY */ /* else rc = CIFSCreateReparseSymLink(xid, pTcon, fromName, toName, cifs_sb_target->local_nls); */ diff --git a/fs/cifs/misc.c b/fs/cifs/misc.c index 0e84e6fcf8ab..7a906067db04 100644 --- a/fs/cifs/misc.c +++ b/fs/cifs/misc.c @@ -69,6 +69,7 @@ sesInfoAlloc(void) ret_buf = kzalloc(sizeof(struct cifs_ses), GFP_KERNEL); if (ret_buf) { atomic_inc(&sesInfoAllocCount); + spin_lock_init(&ret_buf->ses_lock); ret_buf->ses_status = SES_NEW; ++ret_buf->ses_count; INIT_LIST_HEAD(&ret_buf->smb_ses_list); @@ -126,6 +127,7 @@ tconInfoAlloc(void) atomic_inc(&tconInfoAllocCount); ret_buf->status = TID_NEW; ++ret_buf->tc_count; + spin_lock_init(&ret_buf->tc_lock); INIT_LIST_HEAD(&ret_buf->openFileList); INIT_LIST_HEAD(&ret_buf->tcon_list); spin_lock_init(&ret_buf->open_file_lock); @@ -172,9 +174,9 @@ cifs_buf_get(void) /* clear the first few header bytes */ /* for most paths, more is cleared in header_assemble */ memset(ret_buf, 0, buf_size + 3); - atomic_inc(&bufAllocCount); + atomic_inc(&buf_alloc_count); #ifdef CONFIG_CIFS_STATS2 - atomic_inc(&totBufAllocCount); + atomic_inc(&total_buf_alloc_count); #endif /* CONFIG_CIFS_STATS2 */ return ret_buf; @@ -189,7 +191,7 @@ cifs_buf_release(void *buf_to_free) } mempool_free(buf_to_free, cifs_req_poolp); - atomic_dec(&bufAllocCount); + atomic_dec(&buf_alloc_count); return; } @@ -205,9 +207,9 @@ cifs_small_buf_get(void) ret_buf = mempool_alloc(cifs_sm_req_poolp, GFP_NOFS); /* No need to clear memory here, cleared in header assemble */ /* memset(ret_buf, 0, sizeof(struct smb_hdr) + 27);*/ - atomic_inc(&smBufAllocCount); + atomic_inc(&small_buf_alloc_count); #ifdef CONFIG_CIFS_STATS2 - atomic_inc(&totSmBufAllocCount); + atomic_inc(&total_small_buf_alloc_count); #endif /* CONFIG_CIFS_STATS2 */ return ret_buf; @@ -223,7 +225,7 @@ cifs_small_buf_release(void *buf_to_free) } mempool_free(buf_to_free, cifs_sm_req_poolp); - atomic_dec(&smBufAllocCount); + atomic_dec(&small_buf_alloc_count); return; } @@ -400,7 +402,6 @@ is_valid_oplock_break(char *buffer, struct TCP_Server_Info *srv) { struct smb_hdr *buf = (struct smb_hdr *)buffer; struct smb_com_lock_req *pSMB = (struct smb_com_lock_req *)buf; - struct list_head *tmp, *tmp1, *tmp2; struct cifs_ses *ses; struct cifs_tcon *tcon; struct cifsInodeInfo *pCifsInode; @@ -467,18 +468,14 @@ is_valid_oplock_break(char *buffer, struct TCP_Server_Info *srv) /* look up tcon based on tid & uid */ spin_lock(&cifs_tcp_ses_lock); - list_for_each(tmp, &srv->smb_ses_list) { - ses = list_entry(tmp, struct cifs_ses, smb_ses_list); - list_for_each(tmp1, &ses->tcon_list) { - tcon = list_entry(tmp1, struct cifs_tcon, tcon_list); + list_for_each_entry(ses, &srv->smb_ses_list, smb_ses_list) { + list_for_each_entry(tcon, &ses->tcon_list, tcon_list) { if (tcon->tid != buf->Tid) continue; cifs_stats_inc(&tcon->stats.cifs_stats.num_oplock_brks); spin_lock(&tcon->open_file_lock); - list_for_each(tmp2, &tcon->openFileList) { - netfile = list_entry(tmp2, struct cifsFileInfo, - tlist); + list_for_each_entry(netfile, &tcon->openFileList, tlist) { if (pSMB->Fid != netfile->fid.netfid) continue; @@ -763,14 +760,12 @@ void cifs_close_all_deferred_files(struct cifs_tcon *tcon) { struct cifsFileInfo *cfile; - struct list_head *tmp; struct file_list *tmp_list, *tmp_next_list; struct list_head file_head; INIT_LIST_HEAD(&file_head); spin_lock(&tcon->open_file_lock); - list_for_each(tmp, &tcon->openFileList) { - cfile = list_entry(tmp, struct cifsFileInfo, tlist); + list_for_each_entry(cfile, &tcon->openFileList, tlist) { if (delayed_work_pending(&cfile->deferred)) { if (cancel_delayed_work(&cfile->deferred)) { tmp_list = kmalloc(sizeof(struct file_list), GFP_ATOMIC); @@ -793,7 +788,6 @@ void cifs_close_deferred_file_under_dentry(struct cifs_tcon *tcon, const char *path) { struct cifsFileInfo *cfile; - struct list_head *tmp; struct file_list *tmp_list, *tmp_next_list; struct list_head file_head; void *page; @@ -802,8 +796,7 @@ cifs_close_deferred_file_under_dentry(struct cifs_tcon *tcon, const char *path) INIT_LIST_HEAD(&file_head); page = alloc_dentry_path(); spin_lock(&tcon->open_file_lock); - list_for_each(tmp, &tcon->openFileList) { - cfile = list_entry(tmp, struct cifsFileInfo, tlist); + list_for_each_entry(cfile, &tcon->openFileList, tlist) { full_path = build_path_from_dentry(cfile->dentry, page); if (strstr(full_path, path)) { if (delayed_work_pending(&cfile->deferred)) { diff --git a/fs/cifs/netmisc.c b/fs/cifs/netmisc.c index 235aa1b395eb..28caae7aed1b 100644 --- a/fs/cifs/netmisc.c +++ b/fs/cifs/netmisc.c @@ -911,7 +911,7 @@ map_and_check_smb_error(struct mid_q_entry *mid, bool logErr) unsigned int smbCalcSize(void *buf, struct TCP_Server_Info *server) { - struct smb_hdr *ptr = (struct smb_hdr *)buf; + struct smb_hdr *ptr = buf; return (sizeof(struct smb_hdr) + (2 * ptr->WordCount) + 2 /* size of the bcc field */ + get_bcc(ptr)); } diff --git a/fs/cifs/sess.c b/fs/cifs/sess.c index 02c8b2906196..3af3b05b6c74 100644 --- a/fs/cifs/sess.c +++ b/fs/cifs/sess.c @@ -499,6 +499,7 @@ out: return rc; } +#ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY static __u32 cifs_ssetup_hdr(struct cifs_ses *ses, struct TCP_Server_Info *server, SESSION_SETUP_ANDX *pSMB) @@ -591,7 +592,6 @@ static void unicode_domain_string(char **pbcc_area, struct cifs_ses *ses, *pbcc_area = bcc_ptr; } - static void unicode_ssetup_strings(char **pbcc_area, struct cifs_ses *ses, const struct nls_table *nls_cp) { @@ -753,6 +753,7 @@ static void decode_ascii_ssetup(char **pbcc_area, __u16 bleft, for it later, but it is not very important */ cifs_dbg(FYI, "ascii: bytes left %d\n", bleft); } +#endif /* CONFIG_CIFS_ALLOW_INSECURE_LEGACY */ int decode_ntlmssp_challenge(char *bcc_ptr, int blob_len, struct cifs_ses *ses) @@ -1170,6 +1171,7 @@ struct sess_data { struct kvec iov[3]; }; +#ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY static int sess_alloc_buffer(struct sess_data *sess_data, int wct) { @@ -1846,3 +1848,4 @@ out: kfree(sess_data); return rc; } +#endif /* CONFIG_CIFS_ALLOW_INSECURE_LEGACY */ diff --git a/fs/cifs/smb1ops.c b/fs/cifs/smb1ops.c index 2e20ee4dab7b..f36b2d2d40ca 100644 --- a/fs/cifs/smb1ops.c +++ b/fs/cifs/smb1ops.c @@ -92,17 +92,17 @@ cifs_find_mid(struct TCP_Server_Info *server, char *buffer) struct smb_hdr *buf = (struct smb_hdr *)buffer; struct mid_q_entry *mid; - spin_lock(&GlobalMid_Lock); + spin_lock(&server->mid_lock); list_for_each_entry(mid, &server->pending_mid_q, qhead) { if (compare_mid(mid->mid, buf) && mid->mid_state == MID_REQUEST_SUBMITTED && le16_to_cpu(mid->command) == buf->Command) { kref_get(&mid->refcount); - spin_unlock(&GlobalMid_Lock); + spin_unlock(&server->mid_lock); return mid; } } - spin_unlock(&GlobalMid_Lock); + spin_unlock(&server->mid_lock); return NULL; } @@ -166,7 +166,7 @@ cifs_get_next_mid(struct TCP_Server_Info *server) __u16 last_mid, cur_mid; bool collision, reconnect = false; - spin_lock(&GlobalMid_Lock); + spin_lock(&server->mid_lock); /* mid is 16 bit only for CIFS/SMB */ cur_mid = (__u16)((server->CurrentMid) & 0xffff); @@ -225,7 +225,7 @@ cifs_get_next_mid(struct TCP_Server_Info *server) } cur_mid++; } - spin_unlock(&GlobalMid_Lock); + spin_unlock(&server->mid_lock); if (reconnect) { cifs_signal_cifsd_for_reconnect(server, false); diff --git a/fs/cifs/smb2misc.c b/fs/cifs/smb2misc.c index 17813c3d0c6e..818cc4dee0e2 100644 --- a/fs/cifs/smb2misc.c +++ b/fs/cifs/smb2misc.c @@ -132,15 +132,15 @@ static __u32 get_neg_ctxt_len(struct smb2_hdr *hdr, __u32 len, } int -smb2_check_message(char *buf, unsigned int len, struct TCP_Server_Info *srvr) +smb2_check_message(char *buf, unsigned int len, struct TCP_Server_Info *server) { struct smb2_hdr *shdr = (struct smb2_hdr *)buf; struct smb2_pdu *pdu = (struct smb2_pdu *)shdr; - __u64 mid; - __u32 clc_len; /* calculated length */ - int command; - int pdu_size = sizeof(struct smb2_pdu); int hdr_size = sizeof(struct smb2_hdr); + int pdu_size = sizeof(struct smb2_pdu); + int command; + __u32 calc_len; /* calculated length */ + __u64 mid; /* * Add function to do table lookup of StructureSize by command @@ -154,7 +154,7 @@ smb2_check_message(char *buf, unsigned int len, struct TCP_Server_Info *srvr) /* decrypt frame now that it is completely read in */ spin_lock(&cifs_tcp_ses_lock); - list_for_each_entry(iter, &srvr->smb_ses_list, smb_ses_list) { + list_for_each_entry(iter, &server->smb_ses_list, smb_ses_list) { if (iter->Suid == le64_to_cpu(thdr->SessionId)) { ses = iter; break; @@ -221,30 +221,33 @@ smb2_check_message(char *buf, unsigned int len, struct TCP_Server_Info *srvr) } } - clc_len = smb2_calc_size(buf, srvr); + calc_len = smb2_calc_size(buf, server); + + /* For SMB2_IOCTL, OutputOffset and OutputLength are optional, so might + * be 0, and not a real miscalculation */ + if (command == SMB2_IOCTL_HE && calc_len == 0) + return 0; - if (shdr->Command == SMB2_NEGOTIATE) - clc_len += get_neg_ctxt_len(shdr, len, clc_len); + if (command == SMB2_NEGOTIATE_HE) + calc_len += get_neg_ctxt_len(shdr, len, calc_len); - if (len != clc_len) { - cifs_dbg(FYI, "Calculated size %u length %u mismatch mid %llu\n", - clc_len, len, mid); + if (len != calc_len) { /* create failed on symlink */ if (command == SMB2_CREATE_HE && shdr->Status == STATUS_STOPPED_ON_SYMLINK) return 0; /* Windows 7 server returns 24 bytes more */ - if (clc_len + 24 == len && command == SMB2_OPLOCK_BREAK_HE) + if (calc_len + 24 == len && command == SMB2_OPLOCK_BREAK_HE) return 0; /* server can return one byte more due to implied bcc[0] */ - if (clc_len == len + 1) + if (calc_len == len + 1) return 0; /* * Some windows servers (win2016) will pad also the final * PDU in a compound to 8 bytes. */ - if (((clc_len + 7) & ~7) == len) + if (((calc_len + 7) & ~7) == len) return 0; /* @@ -253,12 +256,18 @@ smb2_check_message(char *buf, unsigned int len, struct TCP_Server_Info *srvr) * SMB2/SMB3 frame length (header + smb2 response specific data) * Some windows servers also pad up to 8 bytes when compounding. */ - if (clc_len < len) + if (calc_len < len) return 0; - pr_warn_once( - "srv rsp too short, len %d not %d. cmd:%d mid:%llu\n", - len, clc_len, command, mid); + /* Only log a message if len was really miscalculated */ + if (unlikely(cifsFYI)) + cifs_dbg(FYI, "Server response too short: calculated " + "length %u doesn't match read length %u (cmd=%d, mid=%llu)\n", + calc_len, len, command, mid); + else + pr_warn("Server response too short: calculated length " + "%u doesn't match read length %u (cmd=%d, mid=%llu)\n", + calc_len, len, command, mid); return 1; } @@ -402,7 +411,7 @@ smb2_get_data_area_len(int *off, int *len, struct smb2_hdr *shdr) unsigned int smb2_calc_size(void *buf, struct TCP_Server_Info *srvr) { - struct smb2_pdu *pdu = (struct smb2_pdu *)buf; + struct smb2_pdu *pdu = buf; struct smb2_hdr *shdr = &pdu->hdr; int offset; /* the offset from the beginning of SMB to data area */ int data_length; /* the length of the variable length data area */ diff --git a/fs/cifs/smb2ops.c b/fs/cifs/smb2ops.c index 8802995b2d3d..c0039dc0715a 100644 --- a/fs/cifs/smb2ops.c +++ b/fs/cifs/smb2ops.c @@ -126,13 +126,13 @@ smb2_add_credits(struct TCP_Server_Info *server, optype, scredits, add); } - spin_lock(&cifs_tcp_ses_lock); + spin_lock(&server->srv_lock); if (server->tcpStatus == CifsNeedReconnect || server->tcpStatus == CifsExiting) { - spin_unlock(&cifs_tcp_ses_lock); + spin_unlock(&server->srv_lock); return; } - spin_unlock(&cifs_tcp_ses_lock); + spin_unlock(&server->srv_lock); switch (rc) { case -1: @@ -218,12 +218,12 @@ smb2_wait_mtu_credits(struct TCP_Server_Info *server, unsigned int size, spin_lock(&server->req_lock); } else { spin_unlock(&server->req_lock); - spin_lock(&cifs_tcp_ses_lock); + spin_lock(&server->srv_lock); if (server->tcpStatus == CifsExiting) { - spin_unlock(&cifs_tcp_ses_lock); + spin_unlock(&server->srv_lock); return -ENOENT; } - spin_unlock(&cifs_tcp_ses_lock); + spin_unlock(&server->srv_lock); spin_lock(&server->req_lock); scredits = server->credits; @@ -319,19 +319,19 @@ smb2_get_next_mid(struct TCP_Server_Info *server) { __u64 mid; /* for SMB2 we need the current value */ - spin_lock(&GlobalMid_Lock); + spin_lock(&server->mid_lock); mid = server->CurrentMid++; - spin_unlock(&GlobalMid_Lock); + spin_unlock(&server->mid_lock); return mid; } static void smb2_revert_current_mid(struct TCP_Server_Info *server, const unsigned int val) { - spin_lock(&GlobalMid_Lock); + spin_lock(&server->mid_lock); if (server->CurrentMid >= val) server->CurrentMid -= val; - spin_unlock(&GlobalMid_Lock); + spin_unlock(&server->mid_lock); } static struct mid_q_entry * @@ -346,7 +346,7 @@ __smb2_find_mid(struct TCP_Server_Info *server, char *buf, bool dequeue) return NULL; } - spin_lock(&GlobalMid_Lock); + spin_lock(&server->mid_lock); list_for_each_entry(mid, &server->pending_mid_q, qhead) { if ((mid->mid == wire_mid) && (mid->mid_state == MID_REQUEST_SUBMITTED) && @@ -356,11 +356,11 @@ __smb2_find_mid(struct TCP_Server_Info *server, char *buf, bool dequeue) list_del_init(&mid->qhead); mid->mid_flags |= MID_DELETED; } - spin_unlock(&GlobalMid_Lock); + spin_unlock(&server->mid_lock); return mid; } } - spin_unlock(&GlobalMid_Lock); + spin_unlock(&server->mid_lock); return NULL; } @@ -403,9 +403,9 @@ smb2_negotiate(const unsigned int xid, { int rc; - spin_lock(&GlobalMid_Lock); + spin_lock(&server->mid_lock); server->CurrentMid = 0; - spin_unlock(&GlobalMid_Lock); + spin_unlock(&server->mid_lock); rc = SMB2_negotiate(xid, ses, server); /* BB we probably don't need to retry with modern servers */ if (rc == -EAGAIN) @@ -1145,9 +1145,7 @@ move_smb2_ea_to_cifs(char *dst, size_t dst_size, size_t name_len, value_len, user_name_len; while (src_size > 0) { - name = &src->ea_data[0]; name_len = (size_t)src->ea_name_length; - value = &src->ea_data[src->ea_name_length + 1]; value_len = (size_t)le16_to_cpu(src->ea_value_length); if (name_len == 0) @@ -1159,6 +1157,9 @@ move_smb2_ea_to_cifs(char *dst, size_t dst_size, goto out; } + name = &src->ea_data[0]; + value = &src->ea_data[src->ea_name_length + 1]; + if (ea_name) { if (ea_name_len == name_len && memcmp(ea_name, name, name_len) == 0) { @@ -2574,7 +2575,6 @@ static void smb2_is_network_name_deleted(char *buf, struct TCP_Server_Info *server) { struct smb2_hdr *shdr = (struct smb2_hdr *)buf; - struct list_head *tmp, *tmp1; struct cifs_ses *ses; struct cifs_tcon *tcon; @@ -2582,12 +2582,12 @@ smb2_is_network_name_deleted(char *buf, struct TCP_Server_Info *server) return; spin_lock(&cifs_tcp_ses_lock); - list_for_each(tmp, &server->smb_ses_list) { - ses = list_entry(tmp, struct cifs_ses, smb_ses_list); - list_for_each(tmp1, &ses->tcon_list) { - tcon = list_entry(tmp1, struct cifs_tcon, tcon_list); + list_for_each_entry(ses, &server->smb_ses_list, smb_ses_list) { + list_for_each_entry(tcon, &ses->tcon_list, tcon_list) { if (tcon->tid == le32_to_cpu(shdr->Id.SyncId.TreeId)) { + spin_lock(&tcon->tc_lock); tcon->need_reconnect = true; + spin_unlock(&tcon->tc_lock); spin_unlock(&cifs_tcp_ses_lock); pr_warn_once("Server share %s deleted.\n", tcon->treeName); @@ -4563,9 +4563,11 @@ smb2_get_enc_key(struct TCP_Server_Info *server, __u64 ses_id, int enc, u8 *key) list_for_each_entry(server, &cifs_tcp_ses_list, tcp_ses_list) { list_for_each_entry(ses, &server->smb_ses_list, smb_ses_list) { if (ses->Suid == ses_id) { + spin_lock(&ses->ses_lock); ses_enc_key = enc ? ses->smb3encryptionkey : ses->smb3decryptionkey; memcpy(key, ses_enc_key, SMB3_ENC_DEC_KEY_SIZE); + spin_unlock(&ses->ses_lock); spin_unlock(&cifs_tcp_ses_lock); return 0; } @@ -5080,23 +5082,24 @@ static void smb2_decrypt_offload(struct work_struct *work) mid->callback(mid); } else { - spin_lock(&cifs_tcp_ses_lock); - spin_lock(&GlobalMid_Lock); + spin_lock(&dw->server->srv_lock); if (dw->server->tcpStatus == CifsNeedReconnect) { + spin_lock(&dw->server->mid_lock); mid->mid_state = MID_RETRY_NEEDED; - spin_unlock(&GlobalMid_Lock); - spin_unlock(&cifs_tcp_ses_lock); + spin_unlock(&dw->server->mid_lock); + spin_unlock(&dw->server->srv_lock); mid->callback(mid); } else { + spin_lock(&dw->server->mid_lock); mid->mid_state = MID_REQUEST_SUBMITTED; mid->mid_flags &= ~(MID_DELETED); list_add_tail(&mid->qhead, &dw->server->pending_mid_q); - spin_unlock(&GlobalMid_Lock); - spin_unlock(&cifs_tcp_ses_lock); + spin_unlock(&dw->server->mid_lock); + spin_unlock(&dw->server->srv_lock); } } - cifs_mid_q_entry_release(mid); + release_mid(mid); } free_pages: diff --git a/fs/cifs/smb2pdu.c b/fs/cifs/smb2pdu.c index c705de32e225..590a1d4ac140 100644 --- a/fs/cifs/smb2pdu.c +++ b/fs/cifs/smb2pdu.c @@ -162,7 +162,7 @@ smb2_reconnect(__le16 smb2_command, struct cifs_tcon *tcon, if (smb2_command == SMB2_TREE_CONNECT || smb2_command == SMB2_IOCTL) return 0; - spin_lock(&cifs_tcp_ses_lock); + spin_lock(&tcon->tc_lock); if (tcon->status == TID_EXITING) { /* * only tree disconnect, open, and write, @@ -172,13 +172,13 @@ smb2_reconnect(__le16 smb2_command, struct cifs_tcon *tcon, if ((smb2_command != SMB2_WRITE) && (smb2_command != SMB2_CREATE) && (smb2_command != SMB2_TREE_DISCONNECT)) { - spin_unlock(&cifs_tcp_ses_lock); + spin_unlock(&tcon->tc_lock); cifs_dbg(FYI, "can not send cmd %d while umounting\n", smb2_command); return -ENODEV; } } - spin_unlock(&cifs_tcp_ses_lock); + spin_unlock(&tcon->tc_lock); if ((!tcon->ses) || (tcon->ses->ses_status == SES_EXITING) || (!tcon->ses->server) || !server) return -EIO; @@ -217,12 +217,12 @@ smb2_reconnect(__le16 smb2_command, struct cifs_tcon *tcon, } /* are we still trying to reconnect? */ - spin_lock(&cifs_tcp_ses_lock); + spin_lock(&server->srv_lock); if (server->tcpStatus != CifsNeedReconnect) { - spin_unlock(&cifs_tcp_ses_lock); + spin_unlock(&server->srv_lock); break; } - spin_unlock(&cifs_tcp_ses_lock); + spin_unlock(&server->srv_lock); if (retries && --retries) continue; @@ -256,13 +256,13 @@ smb2_reconnect(__le16 smb2_command, struct cifs_tcon *tcon, * and the server never sends an answer the socket will be closed * and tcpStatus set to reconnect. */ - spin_lock(&cifs_tcp_ses_lock); + spin_lock(&server->srv_lock); if (server->tcpStatus == CifsNeedReconnect) { - spin_unlock(&cifs_tcp_ses_lock); + spin_unlock(&server->srv_lock); rc = -EHOSTDOWN; goto out; } - spin_unlock(&cifs_tcp_ses_lock); + spin_unlock(&server->srv_lock); /* * need to prevent multiple threads trying to simultaneously @@ -354,7 +354,7 @@ fill_small_buf(__le16 smb2_command, struct cifs_tcon *tcon, void *buf, unsigned int *total_len) { - struct smb2_pdu *spdu = (struct smb2_pdu *)buf; + struct smb2_pdu *spdu = buf; /* lookup word count ie StructureSize from table */ __u16 parmsize = smb2_req_struct_sizes[le16_to_cpu(smb2_command)]; @@ -3776,7 +3776,7 @@ smb2_echo_callback(struct mid_q_entry *mid) credits.instance = server->reconnect_instance; } - DeleteMidQEntry(mid); + release_mid(mid); add_credits(server, &credits, CIFS_ECHO_OP); } @@ -3911,15 +3911,15 @@ SMB2_echo(struct TCP_Server_Info *server) cifs_dbg(FYI, "In echo request for conn_id %lld\n", server->conn_id); - spin_lock(&cifs_tcp_ses_lock); + spin_lock(&server->srv_lock); if (server->ops->need_neg && server->ops->need_neg(server)) { - spin_unlock(&cifs_tcp_ses_lock); + spin_unlock(&server->srv_lock); /* No need to send echo on newly established connections */ mod_delayed_work(cifsiod_wq, &server->reconnect, 0); return rc; } - spin_unlock(&cifs_tcp_ses_lock); + spin_unlock(&server->srv_lock); rc = smb2_plain_req_init(SMB2_ECHO, NULL, server, (void **)&req, &total_len); @@ -4201,7 +4201,7 @@ smb2_readv_callback(struct mid_q_entry *mid) rdata->offset, rdata->got_bytes); queue_work(cifsiod_wq, &rdata->work); - DeleteMidQEntry(mid); + release_mid(mid); add_credits(server, &credits, 0); } @@ -4440,7 +4440,7 @@ smb2_writev_callback(struct mid_q_entry *mid) wdata->offset, wdata->bytes); queue_work(cifsiod_wq, &wdata->work); - DeleteMidQEntry(mid); + release_mid(mid); add_credits(server, &credits, 0); } diff --git a/fs/cifs/smb2transport.c b/fs/cifs/smb2transport.c index 55e79f6ee78d..1a5fc3314dbf 100644 --- a/fs/cifs/smb2transport.c +++ b/fs/cifs/smb2transport.c @@ -640,13 +640,13 @@ smb2_sign_rqst(struct smb_rqst *rqst, struct TCP_Server_Info *server) if (!is_signed) return 0; - spin_lock(&cifs_tcp_ses_lock); + spin_lock(&server->srv_lock); if (server->ops->need_neg && server->ops->need_neg(server)) { - spin_unlock(&cifs_tcp_ses_lock); + spin_unlock(&server->srv_lock); return 0; } - spin_unlock(&cifs_tcp_ses_lock); + spin_unlock(&server->srv_lock); if (!is_binding && !server->session_estab) { strncpy(shdr->Signature, "BSRSPYL", 8); return 0; @@ -750,7 +750,7 @@ smb2_mid_entry_alloc(const struct smb2_hdr *shdr, temp->callback = cifs_wake_up_task; temp->callback_data = current; - atomic_inc(&midCount); + atomic_inc(&mid_count); temp->mid_state = MID_REQUEST_ALLOCATED; trace_smb3_cmd_enter(le32_to_cpu(shdr->Id.SyncId.TreeId), le64_to_cpu(shdr->SessionId), @@ -762,28 +762,30 @@ static int smb2_get_mid_entry(struct cifs_ses *ses, struct TCP_Server_Info *server, struct smb2_hdr *shdr, struct mid_q_entry **mid) { - spin_lock(&cifs_tcp_ses_lock); + spin_lock(&server->srv_lock); if (server->tcpStatus == CifsExiting) { - spin_unlock(&cifs_tcp_ses_lock); + spin_unlock(&server->srv_lock); return -ENOENT; } if (server->tcpStatus == CifsNeedReconnect) { - spin_unlock(&cifs_tcp_ses_lock); + spin_unlock(&server->srv_lock); cifs_dbg(FYI, "tcp session dead - return to caller to retry\n"); return -EAGAIN; } if (server->tcpStatus == CifsNeedNegotiate && shdr->Command != SMB2_NEGOTIATE) { - spin_unlock(&cifs_tcp_ses_lock); + spin_unlock(&server->srv_lock); return -EAGAIN; } + spin_unlock(&server->srv_lock); + spin_lock(&ses->ses_lock); if (ses->ses_status == SES_NEW) { if ((shdr->Command != SMB2_SESSION_SETUP) && (shdr->Command != SMB2_NEGOTIATE)) { - spin_unlock(&cifs_tcp_ses_lock); + spin_unlock(&ses->ses_lock); return -EAGAIN; } /* else ok - we are setting up session */ @@ -791,19 +793,19 @@ smb2_get_mid_entry(struct cifs_ses *ses, struct TCP_Server_Info *server, if (ses->ses_status == SES_EXITING) { if (shdr->Command != SMB2_LOGOFF) { - spin_unlock(&cifs_tcp_ses_lock); + spin_unlock(&ses->ses_lock); return -EAGAIN; } /* else ok - we are shutting down the session */ } - spin_unlock(&cifs_tcp_ses_lock); + spin_unlock(&ses->ses_lock); *mid = smb2_mid_entry_alloc(shdr, server); if (*mid == NULL) return -ENOMEM; - spin_lock(&GlobalMid_Lock); + spin_lock(&server->mid_lock); list_add_tail(&(*mid)->qhead, &server->pending_mid_q); - spin_unlock(&GlobalMid_Lock); + spin_unlock(&server->mid_lock); return 0; } @@ -854,7 +856,7 @@ smb2_setup_request(struct cifs_ses *ses, struct TCP_Server_Info *server, rc = smb2_sign_rqst(rqst, server); if (rc) { revert_current_mid_from_hdr(server, shdr); - cifs_delete_mid(mid); + delete_mid(mid); return ERR_PTR(rc); } @@ -869,13 +871,13 @@ smb2_setup_async_request(struct TCP_Server_Info *server, struct smb_rqst *rqst) (struct smb2_hdr *)rqst->rq_iov[0].iov_base; struct mid_q_entry *mid; - spin_lock(&cifs_tcp_ses_lock); + spin_lock(&server->srv_lock); if (server->tcpStatus == CifsNeedNegotiate && shdr->Command != SMB2_NEGOTIATE) { - spin_unlock(&cifs_tcp_ses_lock); + spin_unlock(&server->srv_lock); return ERR_PTR(-EAGAIN); } - spin_unlock(&cifs_tcp_ses_lock); + spin_unlock(&server->srv_lock); smb2_seq_num_into_buf(server, shdr); @@ -888,7 +890,7 @@ smb2_setup_async_request(struct TCP_Server_Info *server, struct smb_rqst *rqst) rc = smb2_sign_rqst(rqst, server); if (rc) { revert_current_mid_from_hdr(server, shdr); - DeleteMidQEntry(mid); + release_mid(mid); return ERR_PTR(rc); } diff --git a/fs/cifs/transport.c b/fs/cifs/transport.c index bfc9bd55870a..de7aeced7e16 100644 --- a/fs/cifs/transport.c +++ b/fs/cifs/transport.c @@ -21,6 +21,7 @@ #include <asm/processor.h> #include <linux/mempool.h> #include <linux/sched/signal.h> +#include <linux/task_io_accounting_ops.h> #include "cifspdu.h" #include "cifsglob.h" #include "cifsproto.h" @@ -37,13 +38,13 @@ cifs_wake_up_task(struct mid_q_entry *mid) wake_up_process(mid->callback_data); } -struct mid_q_entry * -AllocMidQEntry(const struct smb_hdr *smb_buffer, struct TCP_Server_Info *server) +static struct mid_q_entry * +alloc_mid(const struct smb_hdr *smb_buffer, struct TCP_Server_Info *server) { struct mid_q_entry *temp; if (server == NULL) { - cifs_dbg(VFS, "Null TCP session in AllocMidQEntry\n"); + cifs_dbg(VFS, "%s: null TCP session\n", __func__); return NULL; } @@ -68,12 +69,12 @@ AllocMidQEntry(const struct smb_hdr *smb_buffer, struct TCP_Server_Info *server) temp->callback = cifs_wake_up_task; temp->callback_data = current; - atomic_inc(&midCount); + atomic_inc(&mid_count); temp->mid_state = MID_REQUEST_ALLOCATED; return temp; } -static void _cifs_mid_q_entry_release(struct kref *refcount) +static void __release_mid(struct kref *refcount) { struct mid_q_entry *midEntry = container_of(refcount, struct mid_q_entry, refcount); @@ -91,7 +92,7 @@ static void _cifs_mid_q_entry_release(struct kref *refcount) server->ops->handle_cancelled_mid(midEntry, server); midEntry->mid_state = MID_FREE; - atomic_dec(&midCount); + atomic_dec(&mid_count); if (midEntry->large_buf) cifs_buf_release(midEntry->resp_buf); else @@ -152,29 +153,26 @@ static void _cifs_mid_q_entry_release(struct kref *refcount) mempool_free(midEntry, cifs_mid_poolp); } -void cifs_mid_q_entry_release(struct mid_q_entry *midEntry) +void release_mid(struct mid_q_entry *mid) { - spin_lock(&GlobalMid_Lock); - kref_put(&midEntry->refcount, _cifs_mid_q_entry_release); - spin_unlock(&GlobalMid_Lock); -} + struct TCP_Server_Info *server = mid->server; -void DeleteMidQEntry(struct mid_q_entry *midEntry) -{ - cifs_mid_q_entry_release(midEntry); + spin_lock(&server->mid_lock); + kref_put(&mid->refcount, __release_mid); + spin_unlock(&server->mid_lock); } void -cifs_delete_mid(struct mid_q_entry *mid) +delete_mid(struct mid_q_entry *mid) { - spin_lock(&GlobalMid_Lock); + spin_lock(&mid->server->mid_lock); if (!(mid->mid_flags & MID_DELETED)) { list_del_init(&mid->qhead); mid->mid_flags |= MID_DELETED; } - spin_unlock(&GlobalMid_Lock); + spin_unlock(&mid->server->mid_lock); - DeleteMidQEntry(mid); + release_mid(mid); } /* @@ -577,12 +575,12 @@ wait_for_free_credits(struct TCP_Server_Info *server, const int num_credits, } else { spin_unlock(&server->req_lock); - spin_lock(&cifs_tcp_ses_lock); + spin_lock(&server->srv_lock); if (server->tcpStatus == CifsExiting) { - spin_unlock(&cifs_tcp_ses_lock); + spin_unlock(&server->srv_lock); return -ENOENT; } - spin_unlock(&cifs_tcp_ses_lock); + spin_unlock(&server->srv_lock); /* * For normal commands, reserve the last MAX_COMPOUND @@ -725,11 +723,11 @@ cifs_wait_mtu_credits(struct TCP_Server_Info *server, unsigned int size, static int allocate_mid(struct cifs_ses *ses, struct smb_hdr *in_buf, struct mid_q_entry **ppmidQ) { - spin_lock(&cifs_tcp_ses_lock); + spin_lock(&ses->ses_lock); if (ses->ses_status == SES_NEW) { if ((in_buf->Command != SMB_COM_SESSION_SETUP_ANDX) && (in_buf->Command != SMB_COM_NEGOTIATE)) { - spin_unlock(&cifs_tcp_ses_lock); + spin_unlock(&ses->ses_lock); return -EAGAIN; } /* else ok - we are setting up session */ @@ -738,19 +736,19 @@ static int allocate_mid(struct cifs_ses *ses, struct smb_hdr *in_buf, if (ses->ses_status == SES_EXITING) { /* check if SMB session is bad because we are setting it up */ if (in_buf->Command != SMB_COM_LOGOFF_ANDX) { - spin_unlock(&cifs_tcp_ses_lock); + spin_unlock(&ses->ses_lock); return -EAGAIN; } /* else ok - we are shutting down session */ } - spin_unlock(&cifs_tcp_ses_lock); + spin_unlock(&ses->ses_lock); - *ppmidQ = AllocMidQEntry(in_buf, ses->server); + *ppmidQ = alloc_mid(in_buf, ses->server); if (*ppmidQ == NULL) return -ENOMEM; - spin_lock(&GlobalMid_Lock); + spin_lock(&ses->server->mid_lock); list_add_tail(&(*ppmidQ)->qhead, &ses->server->pending_mid_q); - spin_unlock(&GlobalMid_Lock); + spin_unlock(&ses->server->mid_lock); return 0; } @@ -782,13 +780,13 @@ cifs_setup_async_request(struct TCP_Server_Info *server, struct smb_rqst *rqst) if (server->sign) hdr->Flags2 |= SMBFLG2_SECURITY_SIGNATURE; - mid = AllocMidQEntry(hdr, server); + mid = alloc_mid(hdr, server); if (mid == NULL) return ERR_PTR(-ENOMEM); rc = cifs_sign_rqst(rqst, server, &mid->sequence_number); if (rc) { - DeleteMidQEntry(mid); + release_mid(mid); return ERR_PTR(rc); } @@ -849,9 +847,9 @@ cifs_call_async(struct TCP_Server_Info *server, struct smb_rqst *rqst, mid->mid_state = MID_REQUEST_SUBMITTED; /* put it on the pending_mid_q */ - spin_lock(&GlobalMid_Lock); + spin_lock(&server->mid_lock); list_add_tail(&mid->qhead, &server->pending_mid_q); - spin_unlock(&GlobalMid_Lock); + spin_unlock(&server->mid_lock); /* * Need to store the time in mid before calling I/O. For call_async, @@ -865,7 +863,7 @@ cifs_call_async(struct TCP_Server_Info *server, struct smb_rqst *rqst, if (rc < 0) { revert_current_mid(server, mid->credits); server->sequence_number -= 2; - cifs_delete_mid(mid); + delete_mid(mid); } cifs_server_unlock(server); @@ -912,10 +910,10 @@ cifs_sync_mid_result(struct mid_q_entry *mid, struct TCP_Server_Info *server) cifs_dbg(FYI, "%s: cmd=%d mid=%llu state=%d\n", __func__, le16_to_cpu(mid->command), mid->mid, mid->mid_state); - spin_lock(&GlobalMid_Lock); + spin_lock(&server->mid_lock); switch (mid->mid_state) { case MID_RESPONSE_RECEIVED: - spin_unlock(&GlobalMid_Lock); + spin_unlock(&server->mid_lock); return rc; case MID_RETRY_NEEDED: rc = -EAGAIN; @@ -935,9 +933,9 @@ cifs_sync_mid_result(struct mid_q_entry *mid, struct TCP_Server_Info *server) __func__, mid->mid, mid->mid_state); rc = -EIO; } - spin_unlock(&GlobalMid_Lock); + spin_unlock(&server->mid_lock); - DeleteMidQEntry(mid); + release_mid(mid); return rc; } @@ -997,7 +995,7 @@ cifs_setup_request(struct cifs_ses *ses, struct TCP_Server_Info *ignored, return ERR_PTR(rc); rc = cifs_sign_rqst(rqst, ses->server, &mid->sequence_number); if (rc) { - cifs_delete_mid(mid); + delete_mid(mid); return ERR_PTR(rc); } return mid; @@ -1026,7 +1024,7 @@ static void cifs_cancelled_callback(struct mid_q_entry *mid) { cifs_compound_callback(mid); - DeleteMidQEntry(mid); + release_mid(mid); } /* @@ -1078,12 +1076,12 @@ compound_send_recv(const unsigned int xid, struct cifs_ses *ses, return -EIO; } - spin_lock(&cifs_tcp_ses_lock); + spin_lock(&server->srv_lock); if (server->tcpStatus == CifsExiting) { - spin_unlock(&cifs_tcp_ses_lock); + spin_unlock(&server->srv_lock); return -ENOENT; } - spin_unlock(&cifs_tcp_ses_lock); + spin_unlock(&server->srv_lock); /* * Wait for all the requests to become available. @@ -1130,7 +1128,7 @@ compound_send_recv(const unsigned int xid, struct cifs_ses *ses, if (IS_ERR(midQ[i])) { revert_current_mid(server, i); for (j = 0; j < i; j++) - cifs_delete_mid(midQ[j]); + delete_mid(midQ[j]); cifs_server_unlock(server); /* Update # of requests on wire to server */ @@ -1186,17 +1184,17 @@ compound_send_recv(const unsigned int xid, struct cifs_ses *ses, /* * Compounding is never used during session establish. */ - spin_lock(&cifs_tcp_ses_lock); + spin_lock(&ses->ses_lock); if ((ses->ses_status == SES_NEW) || (optype & CIFS_NEG_OP) || (optype & CIFS_SESS_OP)) { - spin_unlock(&cifs_tcp_ses_lock); + spin_unlock(&ses->ses_lock); cifs_server_lock(server); smb311_update_preauth_hash(ses, server, rqst[0].rq_iov, rqst[0].rq_nvec); cifs_server_unlock(server); - spin_lock(&cifs_tcp_ses_lock); + spin_lock(&ses->ses_lock); } - spin_unlock(&cifs_tcp_ses_lock); + spin_unlock(&ses->ses_lock); for (i = 0; i < num_rqst; i++) { rc = wait_for_response(server, midQ[i]); @@ -1208,14 +1206,14 @@ compound_send_recv(const unsigned int xid, struct cifs_ses *ses, cifs_server_dbg(FYI, "Cancelling wait for mid %llu cmd: %d\n", midQ[i]->mid, le16_to_cpu(midQ[i]->command)); send_cancel(server, &rqst[i], midQ[i]); - spin_lock(&GlobalMid_Lock); + spin_lock(&server->mid_lock); midQ[i]->mid_flags |= MID_WAIT_CANCELLED; if (midQ[i]->mid_state == MID_REQUEST_SUBMITTED) { midQ[i]->callback = cifs_cancelled_callback; cancelled_mid[i] = true; credits[i].value = 0; } - spin_unlock(&GlobalMid_Lock); + spin_unlock(&server->mid_lock); } } @@ -1250,7 +1248,7 @@ compound_send_recv(const unsigned int xid, struct cifs_ses *ses, rc = server->ops->check_receive(midQ[i], server, flags & CIFS_LOG_ERROR); - /* mark it so buf will not be freed by cifs_delete_mid */ + /* mark it so buf will not be freed by delete_mid */ if ((flags & CIFS_NO_RSP_BUF) == 0) midQ[i]->resp_buf = NULL; @@ -1259,19 +1257,19 @@ compound_send_recv(const unsigned int xid, struct cifs_ses *ses, /* * Compounding is never used during session establish. */ - spin_lock(&cifs_tcp_ses_lock); + spin_lock(&ses->ses_lock); if ((ses->ses_status == SES_NEW) || (optype & CIFS_NEG_OP) || (optype & CIFS_SESS_OP)) { struct kvec iov = { .iov_base = resp_iov[0].iov_base, .iov_len = resp_iov[0].iov_len }; - spin_unlock(&cifs_tcp_ses_lock); + spin_unlock(&ses->ses_lock); cifs_server_lock(server); smb311_update_preauth_hash(ses, server, &iov, 1); cifs_server_unlock(server); - spin_lock(&cifs_tcp_ses_lock); + spin_lock(&ses->ses_lock); } - spin_unlock(&cifs_tcp_ses_lock); + spin_unlock(&ses->ses_lock); out: /* @@ -1282,7 +1280,7 @@ out: */ for (i = 0; i < num_rqst; i++) { if (!cancelled_mid[i]) - cifs_delete_mid(midQ[i]); + delete_mid(midQ[i]); } return rc; @@ -1360,12 +1358,12 @@ SendReceive(const unsigned int xid, struct cifs_ses *ses, return -EIO; } - spin_lock(&cifs_tcp_ses_lock); + spin_lock(&server->srv_lock); if (server->tcpStatus == CifsExiting) { - spin_unlock(&cifs_tcp_ses_lock); + spin_unlock(&server->srv_lock); return -ENOENT; } - spin_unlock(&cifs_tcp_ses_lock); + spin_unlock(&server->srv_lock); /* Ensure that we do not send more than 50 overlapping requests to the same server. We may make this configurable later or @@ -1419,15 +1417,15 @@ SendReceive(const unsigned int xid, struct cifs_ses *ses, rc = wait_for_response(server, midQ); if (rc != 0) { send_cancel(server, &rqst, midQ); - spin_lock(&GlobalMid_Lock); + spin_lock(&server->mid_lock); if (midQ->mid_state == MID_REQUEST_SUBMITTED) { /* no longer considered to be "in-flight" */ - midQ->callback = DeleteMidQEntry; - spin_unlock(&GlobalMid_Lock); + midQ->callback = release_mid; + spin_unlock(&server->mid_lock); add_credits(server, &credits, 0); return rc; } - spin_unlock(&GlobalMid_Lock); + spin_unlock(&server->mid_lock); } rc = cifs_sync_mid_result(midQ, server); @@ -1447,7 +1445,7 @@ SendReceive(const unsigned int xid, struct cifs_ses *ses, memcpy(out_buf, midQ->resp_buf, *pbytes_returned + 4); rc = cifs_check_receive(midQ, server, 0); out: - cifs_delete_mid(midQ); + delete_mid(midQ); add_credits(server, &credits, 0); return rc; @@ -1505,12 +1503,12 @@ SendReceiveBlockingLock(const unsigned int xid, struct cifs_tcon *tcon, return -EIO; } - spin_lock(&cifs_tcp_ses_lock); + spin_lock(&server->srv_lock); if (server->tcpStatus == CifsExiting) { - spin_unlock(&cifs_tcp_ses_lock); + spin_unlock(&server->srv_lock); return -ENOENT; } - spin_unlock(&cifs_tcp_ses_lock); + spin_unlock(&server->srv_lock); /* Ensure that we do not send more than 50 overlapping requests to the same server. We may make this configurable later or @@ -1540,7 +1538,7 @@ SendReceiveBlockingLock(const unsigned int xid, struct cifs_tcon *tcon, rc = cifs_sign_smb(in_buf, server, &midQ->sequence_number); if (rc) { - cifs_delete_mid(midQ); + delete_mid(midQ); cifs_server_unlock(server); return rc; } @@ -1557,7 +1555,7 @@ SendReceiveBlockingLock(const unsigned int xid, struct cifs_tcon *tcon, cifs_server_unlock(server); if (rc < 0) { - cifs_delete_mid(midQ); + delete_mid(midQ); return rc; } @@ -1568,19 +1566,19 @@ SendReceiveBlockingLock(const unsigned int xid, struct cifs_tcon *tcon, (server->tcpStatus != CifsNew))); /* Were we interrupted by a signal ? */ - spin_lock(&cifs_tcp_ses_lock); + spin_lock(&server->srv_lock); if ((rc == -ERESTARTSYS) && (midQ->mid_state == MID_REQUEST_SUBMITTED) && ((server->tcpStatus == CifsGood) || (server->tcpStatus == CifsNew))) { - spin_unlock(&cifs_tcp_ses_lock); + spin_unlock(&server->srv_lock); if (in_buf->Command == SMB_COM_TRANSACTION2) { /* POSIX lock. We send a NT_CANCEL SMB to cause the blocking lock to return. */ rc = send_cancel(server, &rqst, midQ); if (rc) { - cifs_delete_mid(midQ); + delete_mid(midQ); return rc; } } else { @@ -1592,7 +1590,7 @@ SendReceiveBlockingLock(const unsigned int xid, struct cifs_tcon *tcon, /* If we get -ENOLCK back the lock may have already been removed. Don't exit in this case. */ if (rc && rc != -ENOLCK) { - cifs_delete_mid(midQ); + delete_mid(midQ); return rc; } } @@ -1600,21 +1598,21 @@ SendReceiveBlockingLock(const unsigned int xid, struct cifs_tcon *tcon, rc = wait_for_response(server, midQ); if (rc) { send_cancel(server, &rqst, midQ); - spin_lock(&GlobalMid_Lock); + spin_lock(&server->mid_lock); if (midQ->mid_state == MID_REQUEST_SUBMITTED) { /* no longer considered to be "in-flight" */ - midQ->callback = DeleteMidQEntry; - spin_unlock(&GlobalMid_Lock); + midQ->callback = release_mid; + spin_unlock(&server->mid_lock); return rc; } - spin_unlock(&GlobalMid_Lock); + spin_unlock(&server->mid_lock); } /* We got the response - restart system call. */ rstart = 1; - spin_lock(&cifs_tcp_ses_lock); + spin_lock(&server->srv_lock); } - spin_unlock(&cifs_tcp_ses_lock); + spin_unlock(&server->srv_lock); rc = cifs_sync_mid_result(midQ, server); if (rc != 0) @@ -1631,8 +1629,186 @@ SendReceiveBlockingLock(const unsigned int xid, struct cifs_tcon *tcon, memcpy(out_buf, midQ->resp_buf, *pbytes_returned + 4); rc = cifs_check_receive(midQ, server, 0); out: - cifs_delete_mid(midQ); + delete_mid(midQ); if (rstart && rc == -EACCES) return -ERESTARTSYS; return rc; } + +/* + * Discard any remaining data in the current SMB. To do this, we borrow the + * current bigbuf. + */ +int +cifs_discard_remaining_data(struct TCP_Server_Info *server) +{ + unsigned int rfclen = server->pdu_size; + int remaining = rfclen + server->vals->header_preamble_size - + server->total_read; + + while (remaining > 0) { + int length; + + length = cifs_discard_from_socket(server, + min_t(size_t, remaining, + CIFSMaxBufSize + MAX_HEADER_SIZE(server))); + if (length < 0) + return length; + server->total_read += length; + remaining -= length; + } + + return 0; +} + +static int +__cifs_readv_discard(struct TCP_Server_Info *server, struct mid_q_entry *mid, + bool malformed) +{ + int length; + + length = cifs_discard_remaining_data(server); + dequeue_mid(mid, malformed); + mid->resp_buf = server->smallbuf; + server->smallbuf = NULL; + return length; +} + +static int +cifs_readv_discard(struct TCP_Server_Info *server, struct mid_q_entry *mid) +{ + struct cifs_readdata *rdata = mid->callback_data; + + return __cifs_readv_discard(server, mid, rdata->result); +} + +int +cifs_readv_receive(struct TCP_Server_Info *server, struct mid_q_entry *mid) +{ + int length, len; + unsigned int data_offset, data_len; + struct cifs_readdata *rdata = mid->callback_data; + char *buf = server->smallbuf; + unsigned int buflen = server->pdu_size + + server->vals->header_preamble_size; + bool use_rdma_mr = false; + + cifs_dbg(FYI, "%s: mid=%llu offset=%llu bytes=%u\n", + __func__, mid->mid, rdata->offset, rdata->bytes); + + /* + * read the rest of READ_RSP header (sans Data array), or whatever we + * can if there's not enough data. At this point, we've read down to + * the Mid. + */ + len = min_t(unsigned int, buflen, server->vals->read_rsp_size) - + HEADER_SIZE(server) + 1; + + length = cifs_read_from_socket(server, + buf + HEADER_SIZE(server) - 1, len); + if (length < 0) + return length; + server->total_read += length; + + if (server->ops->is_session_expired && + server->ops->is_session_expired(buf)) { + cifs_reconnect(server, true); + return -1; + } + + if (server->ops->is_status_pending && + server->ops->is_status_pending(buf, server)) { + cifs_discard_remaining_data(server); + return -1; + } + + /* set up first two iov for signature check and to get credits */ + rdata->iov[0].iov_base = buf; + rdata->iov[0].iov_len = server->vals->header_preamble_size; + rdata->iov[1].iov_base = buf + server->vals->header_preamble_size; + rdata->iov[1].iov_len = + server->total_read - server->vals->header_preamble_size; + cifs_dbg(FYI, "0: iov_base=%p iov_len=%zu\n", + rdata->iov[0].iov_base, rdata->iov[0].iov_len); + cifs_dbg(FYI, "1: iov_base=%p iov_len=%zu\n", + rdata->iov[1].iov_base, rdata->iov[1].iov_len); + + /* Was the SMB read successful? */ + rdata->result = server->ops->map_error(buf, false); + if (rdata->result != 0) { + cifs_dbg(FYI, "%s: server returned error %d\n", + __func__, rdata->result); + /* normal error on read response */ + return __cifs_readv_discard(server, mid, false); + } + + /* Is there enough to get to the rest of the READ_RSP header? */ + if (server->total_read < server->vals->read_rsp_size) { + cifs_dbg(FYI, "%s: server returned short header. got=%u expected=%zu\n", + __func__, server->total_read, + server->vals->read_rsp_size); + rdata->result = -EIO; + return cifs_readv_discard(server, mid); + } + + data_offset = server->ops->read_data_offset(buf) + + server->vals->header_preamble_size; + if (data_offset < server->total_read) { + /* + * win2k8 sometimes sends an offset of 0 when the read + * is beyond the EOF. Treat it as if the data starts just after + * the header. + */ + cifs_dbg(FYI, "%s: data offset (%u) inside read response header\n", + __func__, data_offset); + data_offset = server->total_read; + } else if (data_offset > MAX_CIFS_SMALL_BUFFER_SIZE) { + /* data_offset is beyond the end of smallbuf */ + cifs_dbg(FYI, "%s: data offset (%u) beyond end of smallbuf\n", + __func__, data_offset); + rdata->result = -EIO; + return cifs_readv_discard(server, mid); + } + + cifs_dbg(FYI, "%s: total_read=%u data_offset=%u\n", + __func__, server->total_read, data_offset); + + len = data_offset - server->total_read; + if (len > 0) { + /* read any junk before data into the rest of smallbuf */ + length = cifs_read_from_socket(server, + buf + server->total_read, len); + if (length < 0) + return length; + server->total_read += length; + } + + /* how much data is in the response? */ +#ifdef CONFIG_CIFS_SMB_DIRECT + use_rdma_mr = rdata->mr; +#endif + data_len = server->ops->read_data_length(buf, use_rdma_mr); + if (!use_rdma_mr && (data_offset + data_len > buflen)) { + /* data_len is corrupt -- discard frame */ + rdata->result = -EIO; + return cifs_readv_discard(server, mid); + } + + length = rdata->read_into_pages(server, rdata, data_len); + if (length < 0) + return length; + + server->total_read += length; + + cifs_dbg(FYI, "total_read=%u buflen=%u remaining=%u\n", + server->total_read, buflen, data_len); + + /* discard anything left over */ + if (server->total_read < buflen) + return cifs_readv_discard(server, mid); + + dequeue_mid(mid, false); + mid->resp_buf = server->smallbuf; + server->smallbuf = NULL; + return length; +} diff --git a/fs/cifs/xattr.c b/fs/cifs/xattr.c index 9d486fbbfbbd..998fa51f9b68 100644 --- a/fs/cifs/xattr.c +++ b/fs/cifs/xattr.c @@ -201,6 +201,7 @@ static int cifs_xattr_set(const struct xattr_handler *handler, break; } +#ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY case XATTR_ACL_ACCESS: #ifdef CONFIG_CIFS_POSIX if (!value) @@ -224,6 +225,7 @@ static int cifs_xattr_set(const struct xattr_handler *handler, cifs_remap(cifs_sb)); #endif /* CONFIG_CIFS_POSIX */ break; +#endif /* CONFIG_CIFS_ALLOW_INSECURE_LEGACY */ } out: @@ -364,7 +366,7 @@ static int cifs_xattr_get(const struct xattr_handler *handler, } break; } - +#ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY case XATTR_ACL_ACCESS: #ifdef CONFIG_CIFS_POSIX if (sb->s_flags & SB_POSIXACL) @@ -384,6 +386,7 @@ static int cifs_xattr_get(const struct xattr_handler *handler, cifs_remap(cifs_sb)); #endif /* CONFIG_CIFS_POSIX */ break; +#endif /* ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY */ } /* We could add an additional check for streams ie |