summaryrefslogtreecommitdiff
path: root/fs/smb
diff options
context:
space:
mode:
Diffstat (limited to 'fs/smb')
-rw-r--r--fs/smb/client/Makefile2
-rw-r--r--fs/smb/client/cached_dir.c61
-rw-r--r--fs/smb/client/cached_dir.h14
-rw-r--r--fs/smb/client/cifs_debug.c100
-rw-r--r--fs/smb/client/cifs_ioctl.h2
-rw-r--r--fs/smb/client/cifsencrypt.c83
-rw-r--r--fs/smb/client/cifsfs.c28
-rw-r--r--fs/smb/client/cifsfs.h8
-rw-r--r--fs/smb/client/cifsglob.h59
-rw-r--r--fs/smb/client/cifspdu.h11
-rw-r--r--fs/smb/client/cifsproto.h31
-rw-r--r--fs/smb/client/cifssmb.c246
-rw-r--r--fs/smb/client/cifstransport.c566
-rw-r--r--fs/smb/client/connect.c167
-rw-r--r--fs/smb/client/dir.c25
-rw-r--r--fs/smb/client/file.c55
-rw-r--r--fs/smb/client/fs_context.c93
-rw-r--r--fs/smb/client/fs_context.h65
-rw-r--r--fs/smb/client/ioctl.c2
-rw-r--r--fs/smb/client/link.c24
-rw-r--r--fs/smb/client/misc.c14
-rw-r--r--fs/smb/client/namespace.c4
-rw-r--r--fs/smb/client/readdir.c40
-rw-r--r--fs/smb/client/reparse.c61
-rw-r--r--fs/smb/client/reparse.h4
-rw-r--r--fs/smb/client/sess.c37
-rw-r--r--fs/smb/client/smb1ops.c271
-rw-r--r--fs/smb/client/smb2inode.c13
-rw-r--r--fs/smb/client/smb2ops.c100
-rw-r--r--fs/smb/client/smb2pdu.c174
-rw-r--r--fs/smb/client/smb2proto.h8
-rw-r--r--fs/smb/client/smb2transport.c4
-rw-r--r--fs/smb/client/smbdirect.c986
-rw-r--r--fs/smb/client/smbdirect.h161
-rw-r--r--fs/smb/client/trace.h24
-rw-r--r--fs/smb/client/transport.c616
-rw-r--r--fs/smb/common/smbdirect/smbdirect.h37
-rw-r--r--fs/smb/common/smbdirect/smbdirect_pdu.h55
-rw-r--r--fs/smb/common/smbdirect/smbdirect_socket.h161
-rw-r--r--fs/smb/server/Kconfig1
-rw-r--r--fs/smb/server/auth.c34
-rw-r--r--fs/smb/server/auth.h2
-rw-r--r--fs/smb/server/connection.c2
-rw-r--r--fs/smb/server/connection.h3
-rw-r--r--fs/smb/server/crypto_ctx.c8
-rw-r--r--fs/smb/server/crypto_ctx.h4
-rw-r--r--fs/smb/server/oplock.c14
-rw-r--r--fs/smb/server/server.c1
-rw-r--r--fs/smb/server/smb2pdu.c272
-rw-r--r--fs/smb/server/smb2pdu.h3
-rw-r--r--fs/smb/server/smb_common.c2
-rw-r--r--fs/smb/server/transport_rdma.c112
-rw-r--r--fs/smb/server/transport_tcp.c26
-rw-r--r--fs/smb/server/vfs.c277
-rw-r--r--fs/smb/server/vfs.h7
-rw-r--r--fs/smb/server/vfs_cache.c33
-rw-r--r--fs/smb/server/vfs_cache.h1
57 files changed, 2988 insertions, 2226 deletions
diff --git a/fs/smb/client/Makefile b/fs/smb/client/Makefile
index 22023e30915b..4c97b31a25c2 100644
--- a/fs/smb/client/Makefile
+++ b/fs/smb/client/Makefile
@@ -32,6 +32,6 @@ cifs-$(CONFIG_CIFS_SMB_DIRECT) += smbdirect.o
cifs-$(CONFIG_CIFS_ROOT) += cifsroot.o
-cifs-$(CONFIG_CIFS_ALLOW_INSECURE_LEGACY) += smb1ops.o cifssmb.o
+cifs-$(CONFIG_CIFS_ALLOW_INSECURE_LEGACY) += smb1ops.o cifssmb.o cifstransport.o
cifs-$(CONFIG_CIFS_COMPRESSION) += compress.o compress/lz77.o
diff --git a/fs/smb/client/cached_dir.c b/fs/smb/client/cached_dir.c
index fe738623cf1b..b69daeb1301b 100644
--- a/fs/smb/client/cached_dir.c
+++ b/fs/smb/client/cached_dir.c
@@ -29,7 +29,6 @@ static struct cached_fid *find_or_create_cached_dir(struct cached_fids *cfids,
{
struct cached_fid *cfid;
- spin_lock(&cfids->cfid_list_lock);
list_for_each_entry(cfid, &cfids->entries, entry) {
if (!strcmp(cfid->path, path)) {
/*
@@ -38,25 +37,20 @@ static struct cached_fid *find_or_create_cached_dir(struct cached_fids *cfids,
* being deleted due to a lease break.
*/
if (!cfid->time || !cfid->has_lease) {
- spin_unlock(&cfids->cfid_list_lock);
return NULL;
}
kref_get(&cfid->refcount);
- spin_unlock(&cfids->cfid_list_lock);
return cfid;
}
}
if (lookup_only) {
- spin_unlock(&cfids->cfid_list_lock);
return NULL;
}
if (cfids->num_entries >= max_cached_dirs) {
- spin_unlock(&cfids->cfid_list_lock);
return NULL;
}
cfid = init_cached_dir(path);
if (cfid == NULL) {
- spin_unlock(&cfids->cfid_list_lock);
return NULL;
}
cfid->cfids = cfids;
@@ -74,7 +68,6 @@ static struct cached_fid *find_or_create_cached_dir(struct cached_fids *cfids,
*/
cfid->has_lease = true;
- spin_unlock(&cfids->cfid_list_lock);
return cfid;
}
@@ -109,7 +102,8 @@ path_to_dentry(struct cifs_sb_info *cifs_sb, const char *path)
while (*s && *s != sep)
s++;
- child = lookup_positive_unlocked(p, dentry, s - p);
+ child = lookup_noperm_positive_unlocked(&QSTR_LEN(p, s - p),
+ dentry);
dput(dentry);
dentry = child;
} while (!IS_ERR(dentry));
@@ -161,6 +155,7 @@ int open_cached_dir(unsigned int xid, struct cifs_tcon *tcon,
struct cached_fids *cfids;
const char *npath;
int retries = 0, cur_sleep = 1;
+ __le32 lease_flags = 0;
if (cifs_sb->root == NULL)
return -ENOENT;
@@ -187,8 +182,10 @@ replay_again:
if (!utf16_path)
return -ENOMEM;
+ spin_lock(&cfids->cfid_list_lock);
cfid = find_or_create_cached_dir(cfids, path, lookup_only, tcon->max_cached_dirs);
if (cfid == NULL) {
+ spin_unlock(&cfids->cfid_list_lock);
kfree(utf16_path);
return -ENOENT;
}
@@ -197,8 +194,8 @@ replay_again:
* Otherwise, it is either a new entry or laundromat worker removed it
* from @cfids->entries. Caller will put last reference if the latter.
*/
- spin_lock(&cfids->cfid_list_lock);
if (cfid->has_lease && cfid->time) {
+ cfid->last_access_time = jiffies;
spin_unlock(&cfids->cfid_list_lock);
*ret_cfid = cfid;
kfree(utf16_path);
@@ -206,8 +203,10 @@ replay_again:
}
spin_unlock(&cfids->cfid_list_lock);
+ pfid = &cfid->fid;
+
/*
- * Skip any prefix paths in @path as lookup_positive_unlocked() ends up
+ * Skip any prefix paths in @path as lookup_noperm_positive_unlocked() ends up
* calling ->lookup() which already adds those through
* build_path_from_dentry(). Also, do it earlier as we might reconnect
* below when trying to send compounded request and then potentially
@@ -227,6 +226,25 @@ replay_again:
rc = -ENOENT;
goto out;
}
+ if (dentry->d_parent && server->dialect >= SMB30_PROT_ID) {
+ struct cached_fid *parent_cfid;
+
+ spin_lock(&cfids->cfid_list_lock);
+ list_for_each_entry(parent_cfid, &cfids->entries, entry) {
+ if (parent_cfid->dentry == dentry->d_parent) {
+ cifs_dbg(FYI, "found a parent cached file handle\n");
+ if (parent_cfid->has_lease && parent_cfid->time) {
+ lease_flags
+ |= SMB2_LEASE_FLAG_PARENT_LEASE_KEY_SET_LE;
+ memcpy(pfid->parent_lease_key,
+ parent_cfid->fid.lease_key,
+ SMB2_LEASE_KEY_SIZE);
+ }
+ break;
+ }
+ }
+ spin_unlock(&cfids->cfid_list_lock);
+ }
}
cfid->dentry = dentry;
cfid->tcon = tcon;
@@ -241,7 +259,6 @@ replay_again:
if (smb3_encryption_required(tcon))
flags |= CIFS_TRANSFORM_REQ;
- pfid = &cfid->fid;
server->ops->new_lease_key(pfid);
memset(rqst, 0, sizeof(rqst));
@@ -261,6 +278,7 @@ replay_again:
FILE_READ_EA,
.disposition = FILE_OPEN,
.fid = pfid,
+ .lease_flags = lease_flags,
.replay = !!(retries),
};
@@ -346,6 +364,7 @@ replay_again:
cfid->file_all_info_is_valid = true;
cfid->time = jiffies;
+ cfid->last_access_time = jiffies;
spin_unlock(&cfids->cfid_list_lock);
/* At this point the directory handle is fully cached */
rc = 0;
@@ -492,8 +511,17 @@ void close_all_cached_dirs(struct cifs_sb_info *cifs_sb)
spin_lock(&cfids->cfid_list_lock);
list_for_each_entry(cfid, &cfids->entries, entry) {
tmp_list = kmalloc(sizeof(*tmp_list), GFP_ATOMIC);
- if (tmp_list == NULL)
- break;
+ if (tmp_list == NULL) {
+ /*
+ * If the malloc() fails, we won't drop all
+ * dentries, and unmounting is likely to trigger
+ * a 'Dentry still in use' error.
+ */
+ cifs_tcon_dbg(VFS, "Out of memory while dropping dentries\n");
+ spin_unlock(&cfids->cfid_list_lock);
+ spin_unlock(&cifs_sb->tlink_tree_lock);
+ goto done;
+ }
spin_lock(&cfid->fid_lock);
tmp_list->dentry = cfid->dentry;
cfid->dentry = NULL;
@@ -505,6 +533,7 @@ void close_all_cached_dirs(struct cifs_sb_info *cifs_sb)
}
spin_unlock(&cifs_sb->tlink_tree_lock);
+done:
list_for_each_entry_safe(tmp_list, q, &entry, entry) {
list_del(&tmp_list->entry);
dput(tmp_list->dentry);
@@ -590,7 +619,7 @@ static void cached_dir_put_work(struct work_struct *work)
queue_work(serverclose_wq, &cfid->close_work);
}
-int cached_dir_lease_break(struct cifs_tcon *tcon, __u8 lease_key[16])
+bool cached_dir_lease_break(struct cifs_tcon *tcon, __u8 lease_key[16])
{
struct cached_fids *cfids = tcon->cfids;
struct cached_fid *cfid;
@@ -703,8 +732,8 @@ static void cfids_laundromat_worker(struct work_struct *work)
spin_lock(&cfids->cfid_list_lock);
list_for_each_entry_safe(cfid, q, &cfids->entries, entry) {
- if (cfid->time &&
- time_after(jiffies, cfid->time + HZ * dir_cache_timeout)) {
+ if (cfid->last_access_time &&
+ time_after(jiffies, cfid->last_access_time + HZ * dir_cache_timeout)) {
cfid->on_list = false;
list_move(&cfid->entry, &entry);
cfids->num_entries--;
diff --git a/fs/smb/client/cached_dir.h b/fs/smb/client/cached_dir.h
index 1dfe79d947a6..46b5a2fdf15b 100644
--- a/fs/smb/client/cached_dir.h
+++ b/fs/smb/client/cached_dir.h
@@ -14,19 +14,18 @@ struct cached_dirent {
char *name;
int namelen;
loff_t pos;
-
struct cifs_fattr fattr;
};
struct cached_dirents {
bool is_valid:1;
bool is_failed:1;
- struct dir_context *ctx; /*
- * Only used to make sure we only take entries
- * from a single context. Never dereferenced.
- */
+ struct file *file; /*
+ * Used to associate the cache with a single
+ * open file instance.
+ */
struct mutex de_mutex;
- int pos; /* Expected ctx->pos */
+ loff_t pos; /* Expected ctx->pos */
struct list_head entries;
};
@@ -39,6 +38,7 @@ struct cached_fid {
bool on_list:1;
bool file_all_info_is_valid:1;
unsigned long time; /* jiffies of when lease was taken */
+ unsigned long last_access_time; /* jiffies of when last accessed */
struct kref refcount;
struct cifs_fid fid;
spinlock_t fid_lock;
@@ -80,6 +80,6 @@ extern void drop_cached_dir_by_name(const unsigned int xid,
struct cifs_sb_info *cifs_sb);
extern void close_all_cached_dirs(struct cifs_sb_info *cifs_sb);
extern void invalidate_all_cached_dirs(struct cifs_tcon *tcon);
-extern int cached_dir_lease_break(struct cifs_tcon *tcon, __u8 lease_key[16]);
+extern bool cached_dir_lease_break(struct cifs_tcon *tcon, __u8 lease_key[16]);
#endif /* _CACHED_DIR_H */
diff --git a/fs/smb/client/cifs_debug.c b/fs/smb/client/cifs_debug.c
index e03c890de0a0..beb4f18f05ef 100644
--- a/fs/smb/client/cifs_debug.c
+++ b/fs/smb/client/cifs_debug.c
@@ -26,6 +26,7 @@
#include "smbdirect.h"
#endif
#include "cifs_swn.h"
+#include "cached_dir.h"
void
cifs_dump_mem(char *label, void *data, int length)
@@ -59,7 +60,7 @@ void cifs_dump_mids(struct TCP_Server_Info *server)
return;
cifs_dbg(VFS, "Dump pending requests:\n");
- spin_lock(&server->mid_lock);
+ spin_lock(&server->mid_queue_lock);
list_for_each_entry(mid_entry, &server->pending_mid_q, qhead) {
cifs_dbg(VFS, "State: %d Cmd: %d Pid: %d Cbdata: %p Mid %llu\n",
mid_entry->mid_state,
@@ -82,7 +83,7 @@ void cifs_dump_mids(struct TCP_Server_Info *server)
mid_entry->resp_buf, 62);
}
}
- spin_unlock(&server->mid_lock);
+ spin_unlock(&server->mid_queue_lock);
#endif /* CONFIG_CIFS_DEBUG2 */
}
@@ -280,6 +281,54 @@ static int cifs_debug_files_proc_show(struct seq_file *m, void *v)
return 0;
}
+static int cifs_debug_dirs_proc_show(struct seq_file *m, void *v)
+{
+ struct list_head *stmp, *tmp, *tmp1;
+ struct TCP_Server_Info *server;
+ struct cifs_ses *ses;
+ struct cifs_tcon *tcon;
+ struct cached_fids *cfids;
+ struct cached_fid *cfid;
+ LIST_HEAD(entry);
+
+ seq_puts(m, "# Version:1\n");
+ seq_puts(m, "# Format:\n");
+ seq_puts(m, "# <tree id> <sess id> <persistent fid> <path>\n");
+
+ spin_lock(&cifs_tcp_ses_lock);
+ list_for_each(stmp, &cifs_tcp_ses_list) {
+ server = list_entry(stmp, struct TCP_Server_Info,
+ tcp_ses_list);
+ list_for_each(tmp, &server->smb_ses_list) {
+ ses = list_entry(tmp, struct cifs_ses, smb_ses_list);
+ list_for_each(tmp1, &ses->tcon_list) {
+ tcon = list_entry(tmp1, struct cifs_tcon, tcon_list);
+ cfids = tcon->cfids;
+ spin_lock(&cfids->cfid_list_lock); /* check lock ordering */
+ seq_printf(m, "Num entries: %d\n", cfids->num_entries);
+ list_for_each_entry(cfid, &cfids->entries, entry) {
+ seq_printf(m, "0x%x 0x%llx 0x%llx %s",
+ tcon->tid,
+ ses->Suid,
+ cfid->fid.persistent_fid,
+ cfid->path);
+ if (cfid->file_all_info_is_valid)
+ seq_printf(m, "\tvalid file info");
+ if (cfid->dirents.is_valid)
+ seq_printf(m, ", valid dirents");
+ seq_printf(m, "\n");
+ }
+ spin_unlock(&cfids->cfid_list_lock);
+
+
+ }
+ }
+ }
+ spin_unlock(&cifs_tcp_ses_lock);
+ seq_putc(m, '\n');
+ return 0;
+}
+
static __always_inline const char *compression_alg_str(__le16 alg)
{
switch (alg) {
@@ -362,6 +411,11 @@ static int cifs_debug_data_proc_show(struct seq_file *m, void *v)
c = 0;
spin_lock(&cifs_tcp_ses_lock);
list_for_each_entry(server, &cifs_tcp_ses_list, tcp_ses_list) {
+#ifdef CONFIG_CIFS_SMB_DIRECT
+ struct smbdirect_socket *sc;
+ struct smbdirect_socket_parameters *sp;
+#endif
+
/* channel info will be printed as a part of sessions below */
if (SERVER_IS_CHAN(server))
continue;
@@ -383,25 +437,27 @@ static int cifs_debug_data_proc_show(struct seq_file *m, void *v)
seq_printf(m, "\nSMBDirect transport not available");
goto skip_rdma;
}
+ sc = &server->smbd_conn->socket;
+ sp = &sc->parameters;
seq_printf(m, "\nSMBDirect (in hex) protocol version: %x "
"transport status: %x",
server->smbd_conn->protocol,
- server->smbd_conn->transport_status);
+ server->smbd_conn->socket.status);
seq_printf(m, "\nConn receive_credit_max: %x "
"send_credit_target: %x max_send_size: %x",
- server->smbd_conn->receive_credit_max,
- server->smbd_conn->send_credit_target,
- server->smbd_conn->max_send_size);
+ sp->recv_credit_max,
+ sp->send_credit_target,
+ sp->max_send_size);
seq_printf(m, "\nConn max_fragmented_recv_size: %x "
"max_fragmented_send_size: %x max_receive_size:%x",
- server->smbd_conn->max_fragmented_recv_size,
- server->smbd_conn->max_fragmented_send_size,
- server->smbd_conn->max_receive_size);
+ sp->max_fragmented_recv_size,
+ sp->max_fragmented_send_size,
+ sp->max_recv_size);
seq_printf(m, "\nConn keep_alive_interval: %x "
"max_readwrite_size: %x rdma_readwrite_threshold: %x",
- server->smbd_conn->keep_alive_interval,
- server->smbd_conn->max_readwrite_size,
+ sp->keepalive_interval_msec * 1000,
+ sp->max_read_write_size,
server->smbd_conn->rdma_readwrite_threshold);
seq_printf(m, "\nDebug count_get_receive_buffer: %x "
"count_put_receive_buffer: %x count_send_empty: %x",
@@ -411,15 +467,13 @@ static int cifs_debug_data_proc_show(struct seq_file *m, void *v)
seq_printf(m, "\nRead Queue count_reassembly_queue: %x "
"count_enqueue_reassembly_queue: %x "
"count_dequeue_reassembly_queue: %x "
- "fragment_reassembly_remaining: %x "
"reassembly_data_length: %x "
"reassembly_queue_length: %x",
server->smbd_conn->count_reassembly_queue,
server->smbd_conn->count_enqueue_reassembly_queue,
server->smbd_conn->count_dequeue_reassembly_queue,
- server->smbd_conn->fragment_reassembly_remaining,
- server->smbd_conn->reassembly_data_length,
- server->smbd_conn->reassembly_queue_length);
+ sc->recv_io.reassembly.data_length,
+ sc->recv_io.reassembly.queue_length);
seq_printf(m, "\nCurrent Credits send_credits: %x "
"receive_credits: %x receive_credit_target: %x",
atomic_read(&server->smbd_conn->send_credits),
@@ -427,10 +481,8 @@ static int cifs_debug_data_proc_show(struct seq_file *m, void *v)
server->smbd_conn->receive_credit_target);
seq_printf(m, "\nPending send_pending: %x ",
atomic_read(&server->smbd_conn->send_pending));
- seq_printf(m, "\nReceive buffers count_receive_queue: %x "
- "count_empty_packet_queue: %x",
- server->smbd_conn->count_receive_queue,
- server->smbd_conn->count_empty_packet_queue);
+ seq_printf(m, "\nReceive buffers count_receive_queue: %x ",
+ server->smbd_conn->count_receive_queue);
seq_printf(m, "\nMR responder_resources: %x "
"max_frmr_depth: %x mr_type: %x",
server->smbd_conn->responder_resources,
@@ -618,7 +670,7 @@ skip_rdma:
seq_printf(m, "\n\tServer ConnectionId: 0x%llx",
chan_server->conn_id);
- spin_lock(&chan_server->mid_lock);
+ spin_lock(&chan_server->mid_queue_lock);
list_for_each_entry(mid_entry, &chan_server->pending_mid_q, qhead) {
seq_printf(m, "\n\t\tState: %d com: %d pid: %d cbdata: %p mid %llu",
mid_entry->mid_state,
@@ -627,7 +679,7 @@ skip_rdma:
mid_entry->callback_data,
mid_entry->mid);
}
- spin_unlock(&chan_server->mid_lock);
+ spin_unlock(&chan_server->mid_queue_lock);
}
spin_unlock(&ses->chan_lock);
seq_puts(m, "\n--\n");
@@ -858,6 +910,9 @@ cifs_proc_init(void)
proc_create_single("open_files", 0400, proc_fs_cifs,
cifs_debug_files_proc_show);
+ proc_create_single("open_dirs", 0400, proc_fs_cifs,
+ cifs_debug_dirs_proc_show);
+
proc_create("Stats", 0644, proc_fs_cifs, &cifs_stats_proc_ops);
proc_create("cifsFYI", 0644, proc_fs_cifs, &cifsFYI_proc_ops);
proc_create("traceSMB", 0644, proc_fs_cifs, &traceSMB_proc_ops);
@@ -902,6 +957,7 @@ cifs_proc_clean(void)
remove_proc_entry("DebugData", proc_fs_cifs);
remove_proc_entry("open_files", proc_fs_cifs);
+ remove_proc_entry("open_dirs", proc_fs_cifs);
remove_proc_entry("cifsFYI", proc_fs_cifs);
remove_proc_entry("traceSMB", proc_fs_cifs);
remove_proc_entry("Stats", proc_fs_cifs);
@@ -1100,7 +1156,7 @@ static ssize_t cifs_security_flags_proc_write(struct file *file,
if ((count < 1) || (count > 11))
return -EINVAL;
- memset(flags_string, 0, 12);
+ memset(flags_string, 0, sizeof(flags_string));
if (copy_from_user(flags_string, buffer, count))
return -EFAULT;
diff --git a/fs/smb/client/cifs_ioctl.h b/fs/smb/client/cifs_ioctl.h
index 26327442e383..b51ce64fcccf 100644
--- a/fs/smb/client/cifs_ioctl.h
+++ b/fs/smb/client/cifs_ioctl.h
@@ -61,7 +61,7 @@ struct smb_query_info {
struct smb3_key_debug_info {
__u64 Suid;
__u16 cipher_type;
- __u8 auth_key[16]; /* SMB2_NTLMV2_SESSKEY_SIZE */
+ __u8 auth_key[SMB2_NTLMV2_SESSKEY_SIZE];
__u8 smb3encryptionkey[SMB3_SIGN_KEY_SIZE];
__u8 smb3decryptionkey[SMB3_SIGN_KEY_SIZE];
} __packed;
diff --git a/fs/smb/client/cifsencrypt.c b/fs/smb/client/cifsencrypt.c
index 35892df7335c..3cc686246908 100644
--- a/fs/smb/client/cifsencrypt.c
+++ b/fs/smb/client/cifsencrypt.c
@@ -343,7 +343,7 @@ static struct ntlmssp2_name *find_next_av(struct cifs_ses *ses,
len = AV_LEN(av);
if (AV_TYPE(av) == NTLMSSP_AV_EOL)
return NULL;
- if (!len || (u8 *)av + sizeof(*av) + len > end)
+ if ((u8 *)av + sizeof(*av) + len > end)
return NULL;
return av;
}
@@ -363,7 +363,7 @@ static int find_av_name(struct cifs_ses *ses, u16 type, char **name, u16 maxlen)
av_for_each_entry(ses, av) {
len = AV_LEN(av);
- if (AV_TYPE(av) != type)
+ if (AV_TYPE(av) != type || !len)
continue;
if (!IS_ALIGNED(len, sizeof(__le16))) {
cifs_dbg(VFS | ONCE, "%s: bad length(%u) for type %u\n",
@@ -532,17 +532,67 @@ CalcNTLMv2_response(const struct cifs_ses *ses, char *ntlmv2_hash, struct shash_
return rc;
}
+/*
+ * Set up NTLMv2 response blob with SPN (cifs/<hostname>) appended to the
+ * existing list of AV pairs.
+ */
+static int set_auth_key_response(struct cifs_ses *ses)
+{
+ size_t baselen = CIFS_SESS_KEY_SIZE + sizeof(struct ntlmv2_resp);
+ size_t len, spnlen, tilen = 0, num_avs = 2 /* SPN + EOL */;
+ struct TCP_Server_Info *server = ses->server;
+ char *spn __free(kfree) = NULL;
+ struct ntlmssp2_name *av;
+ char *rsp = NULL;
+ int rc;
+
+ spnlen = strlen(server->hostname);
+ len = sizeof("cifs/") + spnlen;
+ spn = kmalloc(len, GFP_KERNEL);
+ if (!spn) {
+ rc = -ENOMEM;
+ goto out;
+ }
+
+ spnlen = scnprintf(spn, len, "cifs/%.*s",
+ (int)spnlen, server->hostname);
+
+ av_for_each_entry(ses, av)
+ tilen += sizeof(*av) + AV_LEN(av);
+
+ len = baselen + tilen + spnlen * sizeof(__le16) + num_avs * sizeof(*av);
+ rsp = kmalloc(len, GFP_KERNEL);
+ if (!rsp) {
+ rc = -ENOMEM;
+ goto out;
+ }
+
+ memcpy(rsp + baselen, ses->auth_key.response, tilen);
+ av = (void *)(rsp + baselen + tilen);
+ av->type = cpu_to_le16(NTLMSSP_AV_TARGET_NAME);
+ av->length = cpu_to_le16(spnlen * sizeof(__le16));
+ cifs_strtoUTF16((__le16 *)av->data, spn, spnlen, ses->local_nls);
+ av = (void *)((__u8 *)av + sizeof(*av) + AV_LEN(av));
+ av->type = cpu_to_le16(NTLMSSP_AV_EOL);
+ av->length = 0;
+
+ rc = 0;
+ ses->auth_key.len = len;
+out:
+ ses->auth_key.response = rsp;
+ return rc;
+}
+
int
setup_ntlmv2_rsp(struct cifs_ses *ses, const struct nls_table *nls_cp)
{
struct shash_desc *hmacmd5 = NULL;
- int rc;
- int baselen;
- unsigned int tilen;
+ unsigned char *tiblob = NULL; /* target info blob */
struct ntlmv2_resp *ntlmv2;
char ntlmv2_hash[16];
- unsigned char *tiblob = NULL; /* target info blob */
__le64 rsp_timestamp;
+ __u64 cc;
+ int rc;
if (nls_cp == NULL) {
cifs_dbg(VFS, "%s called with nls_cp==NULL\n", __func__);
@@ -588,32 +638,25 @@ setup_ntlmv2_rsp(struct cifs_ses *ses, const struct nls_table *nls_cp)
* (as Windows 7 does)
*/
rsp_timestamp = find_timestamp(ses);
+ get_random_bytes(&cc, sizeof(cc));
- baselen = CIFS_SESS_KEY_SIZE + sizeof(struct ntlmv2_resp);
- tilen = ses->auth_key.len;
- tiblob = ses->auth_key.response;
+ cifs_server_lock(ses->server);
- ses->auth_key.response = kmalloc(baselen + tilen, GFP_KERNEL);
- if (!ses->auth_key.response) {
- rc = -ENOMEM;
+ tiblob = ses->auth_key.response;
+ rc = set_auth_key_response(ses);
+ if (rc) {
ses->auth_key.len = 0;
- goto setup_ntlmv2_rsp_ret;
+ goto unlock;
}
- ses->auth_key.len += baselen;
ntlmv2 = (struct ntlmv2_resp *)
(ses->auth_key.response + CIFS_SESS_KEY_SIZE);
ntlmv2->blob_signature = cpu_to_le32(0x00000101);
ntlmv2->reserved = 0;
ntlmv2->time = rsp_timestamp;
-
- get_random_bytes(&ntlmv2->client_chal, sizeof(ntlmv2->client_chal));
+ ntlmv2->client_chal = cc;
ntlmv2->reserved2 = 0;
- memcpy(ses->auth_key.response + baselen, tiblob, tilen);
-
- cifs_server_lock(ses->server);
-
rc = cifs_alloc_hash("hmac(md5)", &hmacmd5);
if (rc) {
cifs_dbg(VFS, "Could not allocate HMAC-MD5, rc=%d\n", rc);
diff --git a/fs/smb/client/cifsfs.c b/fs/smb/client/cifsfs.c
index a08c42363ffc..3bd85ab2deb1 100644
--- a/fs/smb/client/cifsfs.c
+++ b/fs/smb/client/cifsfs.c
@@ -70,7 +70,6 @@ bool require_gcm_256; /* false by default */
bool enable_negotiate_signing; /* false by default */
unsigned int global_secflags = CIFSSEC_DEF;
/* unsigned int ntlmv2_support = 0; */
-unsigned int sign_CIFS_PDUs = 1;
/*
* Global transaction id (XID) information
@@ -78,7 +77,7 @@ unsigned int sign_CIFS_PDUs = 1;
unsigned int GlobalCurrentXid; /* protected by GlobalMid_Lock */
unsigned int GlobalTotalActiveXid; /* prot by GlobalMid_Lock */
unsigned int GlobalMaxActiveXid; /* prot by GlobalMid_Lock */
-spinlock_t GlobalMid_Lock; /* protects above & list operations on midQ entries */
+DEFINE_SPINLOCK(GlobalMid_Lock); /* protects above & list operations on midQ entries */
/*
* Global counters, updated atomically
@@ -98,7 +97,7 @@ atomic_t total_buf_alloc_count;
atomic_t total_small_buf_alloc_count;
#endif/* STATS2 */
struct list_head cifs_tcp_ses_list;
-spinlock_t cifs_tcp_ses_lock;
+DEFINE_SPINLOCK(cifs_tcp_ses_lock);
static const struct super_operations cifs_super_ops;
unsigned int CIFSMaxBufSize = CIFS_MAX_MSGSIZE;
module_param(CIFSMaxBufSize, uint, 0444);
@@ -261,9 +260,9 @@ cifs_read_super(struct super_block *sb)
}
if (tcon->nocase)
- sb->s_d_op = &cifs_ci_dentry_ops;
+ set_default_d_op(sb, &cifs_ci_dentry_ops);
else
- sb->s_d_op = &cifs_dentry_ops;
+ set_default_d_op(sb, &cifs_dentry_ops);
sb->s_root = d_make_root(inode);
if (!sb->s_root) {
@@ -724,7 +723,7 @@ cifs_show_options(struct seq_file *s, struct dentry *root)
else
seq_puts(s, ",nativesocket");
seq_show_option(s, "symlink",
- cifs_symlink_type_str(get_cifs_symlink_type(cifs_sb)));
+ cifs_symlink_type_str(cifs_symlink_type(cifs_sb)));
seq_printf(s, ",rsize=%u", cifs_sb->ctx->rsize);
seq_printf(s, ",wsize=%u", cifs_sb->ctx->wsize);
@@ -929,7 +928,8 @@ cifs_get_root(struct smb3_fs_context *ctx, struct super_block *sb)
while (*s && *s != sep)
s++;
- child = lookup_positive_unlocked(p, dentry, s - p);
+ child = lookup_noperm_positive_unlocked(&QSTR_LEN(p, s - p),
+ dentry);
dput(dentry);
dentry = child;
} while (!IS_ERR(dentry));
@@ -1525,7 +1525,7 @@ const struct file_operations cifs_file_ops = {
.flock = cifs_flock,
.fsync = cifs_fsync,
.flush = cifs_flush,
- .mmap = cifs_file_mmap,
+ .mmap_prepare = cifs_file_mmap_prepare,
.splice_read = filemap_splice_read,
.splice_write = iter_file_splice_write,
.llseek = cifs_llseek,
@@ -1545,7 +1545,7 @@ const struct file_operations cifs_file_strict_ops = {
.flock = cifs_flock,
.fsync = cifs_strict_fsync,
.flush = cifs_flush,
- .mmap = cifs_file_strict_mmap,
+ .mmap_prepare = cifs_file_strict_mmap_prepare,
.splice_read = filemap_splice_read,
.splice_write = iter_file_splice_write,
.llseek = cifs_llseek,
@@ -1565,7 +1565,7 @@ const struct file_operations cifs_file_direct_ops = {
.flock = cifs_flock,
.fsync = cifs_fsync,
.flush = cifs_flush,
- .mmap = cifs_file_mmap,
+ .mmap_prepare = cifs_file_mmap_prepare,
.splice_read = copy_splice_read,
.splice_write = iter_file_splice_write,
.unlocked_ioctl = cifs_ioctl,
@@ -1583,7 +1583,7 @@ const struct file_operations cifs_file_nobrl_ops = {
.release = cifs_close,
.fsync = cifs_fsync,
.flush = cifs_flush,
- .mmap = cifs_file_mmap,
+ .mmap_prepare = cifs_file_mmap_prepare,
.splice_read = filemap_splice_read,
.splice_write = iter_file_splice_write,
.llseek = cifs_llseek,
@@ -1601,7 +1601,7 @@ const struct file_operations cifs_file_strict_nobrl_ops = {
.release = cifs_close,
.fsync = cifs_strict_fsync,
.flush = cifs_flush,
- .mmap = cifs_file_strict_mmap,
+ .mmap_prepare = cifs_file_strict_mmap_prepare,
.splice_read = filemap_splice_read,
.splice_write = iter_file_splice_write,
.llseek = cifs_llseek,
@@ -1619,7 +1619,7 @@ const struct file_operations cifs_file_direct_nobrl_ops = {
.release = cifs_close,
.fsync = cifs_fsync,
.flush = cifs_flush,
- .mmap = cifs_file_mmap,
+ .mmap_prepare = cifs_file_mmap_prepare,
.splice_read = copy_splice_read,
.splice_write = iter_file_splice_write,
.unlocked_ioctl = cifs_ioctl,
@@ -1863,8 +1863,6 @@ init_cifs(void)
GlobalCurrentXid = 0;
GlobalTotalActiveXid = 0;
GlobalMaxActiveXid = 0;
- spin_lock_init(&cifs_tcp_ses_lock);
- spin_lock_init(&GlobalMid_Lock);
cifs_lock_secret = get_random_u32();
diff --git a/fs/smb/client/cifsfs.h b/fs/smb/client/cifsfs.h
index ca435a3841b8..487f39cff77e 100644
--- a/fs/smb/client/cifsfs.h
+++ b/fs/smb/client/cifsfs.h
@@ -103,8 +103,8 @@ extern int cifs_lock(struct file *, int, struct file_lock *);
extern int cifs_fsync(struct file *, loff_t, loff_t, int);
extern int cifs_strict_fsync(struct file *, loff_t, loff_t, int);
extern int cifs_flush(struct file *, fl_owner_t id);
-extern int cifs_file_mmap(struct file *file, struct vm_area_struct *vma);
-extern int cifs_file_strict_mmap(struct file *file, struct vm_area_struct *vma);
+int cifs_file_mmap_prepare(struct vm_area_desc *desc);
+int cifs_file_strict_mmap_prepare(struct vm_area_desc *desc);
extern const struct file_operations cifs_dir_ops;
extern int cifs_readdir(struct file *file, struct dir_context *ctx);
@@ -145,6 +145,6 @@ extern const struct export_operations cifs_export_ops;
#endif /* CONFIG_CIFS_NFSD_EXPORT */
/* when changing internal version - update following two lines at same time */
-#define SMB3_PRODUCT_BUILD 54
-#define CIFS_VERSION "2.54"
+#define SMB3_PRODUCT_BUILD 55
+#define CIFS_VERSION "2.55"
#endif /* _CIFSFS_H */
diff --git a/fs/smb/client/cifsglob.h b/fs/smb/client/cifsglob.h
index 3b32116b0b49..e6830ab3a546 100644
--- a/fs/smb/client/cifsglob.h
+++ b/fs/smb/client/cifsglob.h
@@ -556,7 +556,7 @@ struct smb_version_operations {
void (*set_oplock_level)(struct cifsInodeInfo *cinode, __u32 oplock, __u16 epoch,
bool *purge_cache);
/* create lease context buffer for CREATE request */
- char * (*create_lease_buf)(u8 *lease_key, u8 oplock);
+ char * (*create_lease_buf)(u8 *lease_key, u8 oplock, u8 *parent_lease_key, __le32 le_flags);
/* parse lease context buffer and return oplock/epoch info */
__u8 (*parse_lease_buf)(void *buf, __u16 *epoch, char *lkey);
ssize_t (*copychunk_range)(const unsigned int,
@@ -627,12 +627,14 @@ struct smb_version_operations {
bool (*is_network_name_deleted)(char *buf, struct TCP_Server_Info *srv);
struct reparse_data_buffer * (*get_reparse_point_buffer)(const struct kvec *rsp_iov,
u32 *plen);
- int (*create_reparse_symlink)(const unsigned int xid,
- struct inode *inode,
- struct dentry *dentry,
- struct cifs_tcon *tcon,
- const char *full_path,
- const char *symname);
+ struct inode * (*create_reparse_inode)(struct cifs_open_info_data *data,
+ struct super_block *sb,
+ const unsigned int xid,
+ struct cifs_tcon *tcon,
+ const char *full_path,
+ bool directory,
+ struct kvec *reparse_iov,
+ struct kvec *xattr_iov);
};
struct smb_version_values {
@@ -709,6 +711,7 @@ inc_rfc1001_len(void *buf, int count)
struct TCP_Server_Info {
struct list_head tcp_ses_list;
struct list_head smb_ses_list;
+ struct list_head rlist; /* reconnect list */
spinlock_t srv_lock; /* protect anything here that is not protected */
__u64 conn_id; /* connection identifier (useful for debugging) */
int srv_count; /* reference counter */
@@ -729,7 +732,8 @@ struct TCP_Server_Info {
#endif
wait_queue_head_t response_q;
wait_queue_head_t request_q; /* if more than maxmpx to srvr must block*/
- spinlock_t mid_lock; /* protect mid queue and it's entries */
+ spinlock_t mid_queue_lock; /* protect mid queue */
+ spinlock_t mid_counter_lock;
struct list_head pending_mid_q;
bool noblocksnd; /* use blocking sendmsg */
bool noautotune; /* do not autotune send buf sizes */
@@ -767,14 +771,16 @@ struct TCP_Server_Info {
/* SMB_COM_WRITE_RAW or SMB_COM_READ_RAW. */
unsigned int capabilities; /* selective disabling of caps by smb sess */
int timeAdj; /* Adjust for difference in server time zone in sec */
- __u64 CurrentMid; /* multiplex id - rotating counter, protected by GlobalMid_Lock */
+ __u64 current_mid; /* multiplex id - rotating counter, protected by mid_counter_lock */
char cryptkey[CIFS_CRYPTO_KEY_SIZE]; /* used by ntlm, ntlmv2 etc */
/* 16th byte of RFC1001 workstation name is always null */
char workstation_RFC1001_name[RFC1001_NAME_LEN_WITH_NULL];
__u32 sequence_number; /* for signing, protected by srv_mutex */
__u32 reconnect_instance; /* incremented on each reconnect */
+ __le32 session_key_id; /* retrieved from negotiate response and send in session setup request */
struct session_key session_key;
unsigned long lstrp; /* when we got last response from this server */
+ unsigned long neg_start; /* when negotiate started (jiffies) */
struct cifs_secmech secmech; /* crypto sec mech functs, descriptors */
#define CIFS_NEGFLAVOR_UNENCAP 1 /* wct == 17, but no ext_sec */
#define CIFS_NEGFLAVOR_EXTENDED 2 /* wct == 17, ext_sec bit set */
@@ -1084,6 +1090,7 @@ struct cifs_chan {
};
#define CIFS_SES_FLAG_SCALE_CHANNELS (0x1)
+#define CIFS_SES_FLAGS_PENDING_QUERY_INTERFACES (0x2)
/*
* Session structure. One of these for each uid session with a particular host
@@ -1300,6 +1307,7 @@ struct cifs_tcon {
bool use_persistent:1; /* use persistent instead of durable handles */
bool no_lease:1; /* Do not request leases on files or directories */
bool use_witness:1; /* use witness protocol */
+ bool dummy:1; /* dummy tcon used for reconnecting channels */
__le32 capabilities;
__u32 share_flags;
__u32 maximal_access;
@@ -1441,6 +1449,7 @@ struct cifs_open_parms {
bool reconnect:1;
bool replay:1; /* indicates that this open is for a replay */
struct kvec *ea_cctx;
+ __le32 lease_flags;
};
struct cifs_fid {
@@ -1448,6 +1457,7 @@ struct cifs_fid {
__u64 persistent_fid; /* persist file id for smb2 */
__u64 volatile_fid; /* volatile file id for smb2 */
__u8 lease_key[SMB2_LEASE_KEY_SIZE]; /* lease key for smb2 */
+ __u8 parent_lease_key[SMB2_LEASE_KEY_SIZE];
__u8 create_guid[16];
__u32 access;
struct cifs_pending_open *pending_open;
@@ -1720,9 +1730,10 @@ struct mid_q_entry {
unsigned int resp_buf_size;
int mid_state; /* wish this were enum but can not pass to wait_event */
int mid_rc; /* rc for MID_RC */
- unsigned int mid_flags;
__le16 command; /* smb command code */
unsigned int optype; /* operation type */
+ bool wait_cancelled:1; /* Cancelled while waiting for response */
+ bool deleted_from_q:1; /* Whether Mid has been dequeued frem pending_mid_q */
bool large_buf:1; /* if valid response, is pointer to large buf */
bool multiRsp:1; /* multiple trans2 responses for one request */
bool multiEnd:1; /* both received */
@@ -1884,10 +1895,6 @@ static inline bool is_replayable_error(int error)
#define MID_RESPONSE_READY 0x40 /* ready for other process handle the rsp */
#define MID_RC 0x80 /* mid_rc contains custom rc */
-/* Flags */
-#define MID_WAIT_CANCELLED 1 /* Cancelled while waiting for response */
-#define MID_DELETED 2 /* Mid has been dequeued/deleted */
-
/* Types of response buffer returned from SendReceive2 */
#define CIFS_NO_BUFFER 0 /* Response buffer not returned */
#define CIFS_SMALL_BUFFER 1
@@ -1988,8 +1995,7 @@ require use of the stronger protocol */
* TCP_Server_Info-> TCP_Server_Info cifs_get_tcp_session
* reconnect_mutex
* TCP_Server_Info->srv_mutex TCP_Server_Info cifs_get_tcp_session
- * cifs_ses->session_mutex cifs_ses sesInfoAlloc
- * cifs_tcon
+ * cifs_ses->session_mutex cifs_ses sesInfoAlloc
* cifs_tcon->open_file_lock cifs_tcon->openFileList tconInfoAlloc
* cifs_tcon->pending_opens
* cifs_tcon->stat_lock cifs_tcon->bytes_read tconInfoAlloc
@@ -1999,30 +2005,34 @@ require use of the stronger protocol */
* GlobalCurrentXid
* GlobalTotalActiveXid
* TCP_Server_Info->srv_lock (anything in struct not protected by another lock and can change)
- * TCP_Server_Info->mid_lock TCP_Server_Info->pending_mid_q cifs_get_tcp_session
- * ->CurrentMid
- * (any changes in mid_q_entry fields)
+ * TCP_Server_Info->mid_queue_lock TCP_Server_Info->pending_mid_q cifs_get_tcp_session
+ * mid_q_entry->deleted_from_q
+ * TCP_Server_Info->mid_counter_lock TCP_Server_Info->current_mid cifs_get_tcp_session
* TCP_Server_Info->req_lock TCP_Server_Info->in_flight cifs_get_tcp_session
* ->credits
* ->echo_credits
* ->oplock_credits
* ->reconnect_instance
* cifs_ses->ses_lock (anything that is not protected by another lock and can change)
+ * sesInfoAlloc
* cifs_ses->iface_lock cifs_ses->iface_list sesInfoAlloc
* ->iface_count
* ->iface_last_update
- * cifs_ses->chan_lock cifs_ses->chans
+ * cifs_ses->chan_lock cifs_ses->chans sesInfoAlloc
* ->chans_need_reconnect
* ->chans_in_reconnect
* cifs_tcon->tc_lock (anything that is not protected by another lock and can change)
+ * tcon_info_alloc
* inode->i_rwsem, taken by fs/netfs/locking.c e.g. should be taken before cifsInodeInfo locks
* cifsInodeInfo->open_file_lock cifsInodeInfo->openFileList cifs_alloc_inode
* cifsInodeInfo->writers_lock cifsInodeInfo->writers cifsInodeInfo_alloc
* cifsInodeInfo->lock_sem cifsInodeInfo->llist cifs_init_once
* ->can_cache_brlcks
* cifsInodeInfo->deferred_lock cifsInodeInfo->deferred_closes cifsInodeInfo_alloc
- * cached_fids->cfid_list_lock cifs_tcon->cfids->entries init_cached_dirs
- * cifsFileInfo->fh_mutex cifsFileInfo cifs_new_fileinfo
+ * cached_fids->cfid_list_lock cifs_tcon->cfids->entries init_cached_dirs
+ * cached_fid->fid_lock (anything that is not protected by another lock and can change)
+ * init_cached_dir
+ * cifsFileInfo->fh_mutex cifsFileInfo cifs_new_fileinfo
* cifsFileInfo->file_info_lock cifsFileInfo->count cifs_new_fileinfo
* ->invalidHandle initiate_cifs_search
* ->oplock_break_cancelled
@@ -2365,4 +2375,9 @@ static inline bool cifs_netbios_name(const char *name, size_t namelen)
return ret;
}
+#define CIFS_REPARSE_SUPPORT(tcon) \
+ ((tcon)->posix_extensions || \
+ (le32_to_cpu((tcon)->fsAttrInfo.Attributes) & \
+ FILE_SUPPORTS_REPARSE_POINTS))
+
#endif /* _CIFS_GLOB_H */
diff --git a/fs/smb/client/cifspdu.h b/fs/smb/client/cifspdu.h
index 18d67ab113f0..d9cf7db0ac35 100644
--- a/fs/smb/client/cifspdu.h
+++ b/fs/smb/client/cifspdu.h
@@ -597,7 +597,7 @@ typedef union smb_com_session_setup_andx {
__le16 MaxBufferSize;
__le16 MaxMpxCount;
__le16 VcNumber;
- __u32 SessionKey;
+ __le32 SessionKey;
__le16 SecurityBlobLength;
__u32 Reserved;
__le32 Capabilities; /* see below */
@@ -616,7 +616,7 @@ typedef union smb_com_session_setup_andx {
__le16 MaxBufferSize;
__le16 MaxMpxCount;
__le16 VcNumber;
- __u32 SessionKey;
+ __le32 SessionKey;
__le16 CaseInsensitivePasswordLength; /* ASCII password len */
__le16 CaseSensitivePasswordLength; /* Unicode password length*/
__u32 Reserved; /* see below */
@@ -654,7 +654,7 @@ typedef union smb_com_session_setup_andx {
__le16 MaxBufferSize;
__le16 MaxMpxCount;
__le16 VcNumber;
- __u32 SessionKey;
+ __le32 SessionKey;
__le16 PasswordLength;
__u32 Reserved; /* encrypt key len and offset */
__le16 ByteCount;
@@ -1266,10 +1266,9 @@ typedef struct smb_com_query_information_rsp {
typedef struct smb_com_setattr_req {
struct smb_hdr hdr; /* wct = 8 */
__le16 attr;
- __le16 time_low;
- __le16 time_high;
+ __le32 last_write_time;
__le16 reserved[5]; /* must be zero */
- __u16 ByteCount;
+ __le16 ByteCount;
__u8 BufferFormat; /* 4 = ASCII */
unsigned char fileName[];
} __attribute__((packed)) SETATTR_REQ;
diff --git a/fs/smb/client/cifsproto.h b/fs/smb/client/cifsproto.h
index 59f6fdfe560e..c34c533b2efa 100644
--- a/fs/smb/client/cifsproto.h
+++ b/fs/smb/client/cifsproto.h
@@ -116,16 +116,31 @@ extern int SendReceive(const unsigned int /* xid */ , struct cifs_ses *,
int * /* bytes returned */ , const int);
extern int SendReceiveNoRsp(const unsigned int xid, struct cifs_ses *ses,
char *in_buf, int flags);
+int cifs_sync_mid_result(struct mid_q_entry *mid, struct TCP_Server_Info *server);
extern struct mid_q_entry *cifs_setup_request(struct cifs_ses *,
struct TCP_Server_Info *,
struct smb_rqst *);
extern struct mid_q_entry *cifs_setup_async_request(struct TCP_Server_Info *,
struct smb_rqst *);
+int __smb_send_rqst(struct TCP_Server_Info *server, int num_rqst,
+ struct smb_rqst *rqst);
extern int cifs_check_receive(struct mid_q_entry *mid,
struct TCP_Server_Info *server, bool log_error);
+int wait_for_free_request(struct TCP_Server_Info *server, const int flags,
+ unsigned int *instance);
extern int cifs_wait_mtu_credits(struct TCP_Server_Info *server,
size_t size, size_t *num,
struct cifs_credits *credits);
+
+static inline int
+send_cancel(struct TCP_Server_Info *server, struct smb_rqst *rqst,
+ struct mid_q_entry *mid)
+{
+ return server->ops->send_cancel ?
+ server->ops->send_cancel(server, rqst, mid) : 0;
+}
+
+int wait_for_response(struct TCP_Server_Info *server, struct mid_q_entry *midQ);
extern int SendReceive2(const unsigned int /* xid */ , struct cifs_ses *,
struct kvec *, int /* nvec to send */,
int * /* type of buf returned */, const int flags,
@@ -136,6 +151,7 @@ extern int SendReceiveBlockingLock(const unsigned int xid,
struct smb_hdr *out_buf,
int *bytes_returned);
+void smb2_query_server_interfaces(struct work_struct *work);
void
cifs_signal_cifsd_for_reconnect(struct TCP_Server_Info *server,
bool all_channels);
@@ -151,8 +167,7 @@ extern bool is_size_safe_to_change(struct cifsInodeInfo *cifsInode, __u64 eof,
bool from_readdir);
extern void cifs_update_eof(struct cifsInodeInfo *cifsi, loff_t offset,
unsigned int bytes_written);
-void cifs_write_subrequest_terminated(struct cifs_io_subrequest *wdata, ssize_t result,
- bool was_async);
+void cifs_write_subrequest_terminated(struct cifs_io_subrequest *wdata, ssize_t result);
extern struct cifsFileInfo *find_writable_file(struct cifsInodeInfo *, int);
extern int cifs_get_writable_file(struct cifsInodeInfo *cifs_inode,
int flags,
@@ -395,6 +410,10 @@ extern int CIFSSMBQFSUnixInfo(const unsigned int xid, struct cifs_tcon *tcon);
extern int CIFSSMBQFSPosixInfo(const unsigned int xid, struct cifs_tcon *tcon,
struct kstatfs *FSData);
+extern int SMBSetInformation(const unsigned int xid, struct cifs_tcon *tcon,
+ const char *fileName, __le32 attributes, __le64 write_time,
+ const struct nls_table *nls_codepage,
+ struct cifs_sb_info *cifs_sb);
extern int CIFSSMBSetPathInfo(const unsigned int xid, struct cifs_tcon *tcon,
const char *fileName, const FILE_BASIC_INFO *data,
const struct nls_table *nls_codepage,
@@ -479,6 +498,14 @@ extern int cifs_query_reparse_point(const unsigned int xid,
const char *full_path,
u32 *tag, struct kvec *rsp,
int *rsp_buftype);
+extern struct inode *cifs_create_reparse_inode(struct cifs_open_info_data *data,
+ struct super_block *sb,
+ const unsigned int xid,
+ struct cifs_tcon *tcon,
+ const char *full_path,
+ bool directory,
+ struct kvec *reparse_iov,
+ struct kvec *xattr_iov);
extern int CIFSSMB_set_compression(const unsigned int xid,
struct cifs_tcon *tcon, __u16 fid);
extern int CIFS_open(const unsigned int xid, struct cifs_open_parms *oparms,
diff --git a/fs/smb/client/cifssmb.c b/fs/smb/client/cifssmb.c
index 60cb264a01e5..d20766f664c4 100644
--- a/fs/smb/client/cifssmb.c
+++ b/fs/smb/client/cifssmb.c
@@ -498,6 +498,7 @@ CIFSSMBNegotiate(const unsigned int xid,
server->max_rw = le32_to_cpu(pSMBr->MaxRawSize);
cifs_dbg(NOISY, "Max buf = %d\n", ses->server->maxBuf);
server->capabilities = le32_to_cpu(pSMBr->Capabilities);
+ server->session_key_id = pSMBr->SessionKey;
server->timeAdj = (int)(__s16)le16_to_cpu(pSMBr->ServerTimeZone);
server->timeAdj *= 60;
@@ -1333,7 +1334,12 @@ cifs_readv_callback(struct mid_q_entry *mid)
cifs_stats_bytes_read(tcon, rdata->got_bytes);
break;
case MID_REQUEST_SUBMITTED:
+ trace_netfs_sreq(&rdata->subreq, netfs_sreq_trace_io_req_submitted);
+ goto do_retry;
case MID_RETRY_NEEDED:
+ trace_netfs_sreq(&rdata->subreq, netfs_sreq_trace_io_retry_needed);
+do_retry:
+ __set_bit(NETFS_SREQ_NEED_RETRY, &rdata->subreq.flags);
rdata->result = -EAGAIN;
if (server->sign && rdata->got_bytes)
/* reset bytes number since we can not check a sign */
@@ -1342,8 +1348,14 @@ cifs_readv_callback(struct mid_q_entry *mid)
task_io_account_read(rdata->got_bytes);
cifs_stats_bytes_read(tcon, rdata->got_bytes);
break;
+ case MID_RESPONSE_MALFORMED:
+ trace_netfs_sreq(&rdata->subreq, netfs_sreq_trace_io_malformed);
+ rdata->result = -EIO;
+ break;
default:
+ trace_netfs_sreq(&rdata->subreq, netfs_sreq_trace_io_unknown);
rdata->result = -EIO;
+ break;
}
if (rdata->result == -ENODATA) {
@@ -1712,10 +1724,21 @@ cifs_writev_callback(struct mid_q_entry *mid)
}
break;
case MID_REQUEST_SUBMITTED:
+ trace_netfs_sreq(&wdata->subreq, netfs_sreq_trace_io_req_submitted);
+ __set_bit(NETFS_SREQ_NEED_RETRY, &wdata->subreq.flags);
+ result = -EAGAIN;
+ break;
case MID_RETRY_NEEDED:
+ trace_netfs_sreq(&wdata->subreq, netfs_sreq_trace_io_retry_needed);
+ __set_bit(NETFS_SREQ_NEED_RETRY, &wdata->subreq.flags);
result = -EAGAIN;
break;
+ case MID_RESPONSE_MALFORMED:
+ trace_netfs_sreq(&wdata->subreq, netfs_sreq_trace_io_malformed);
+ result = -EIO;
+ break;
default:
+ trace_netfs_sreq(&wdata->subreq, netfs_sreq_trace_io_unknown);
result = -EIO;
break;
}
@@ -1725,7 +1748,7 @@ cifs_writev_callback(struct mid_q_entry *mid)
server->credits, server->in_flight,
0, cifs_trace_rw_credits_write_response_clear);
wdata->credits.value = 0;
- cifs_write_subrequest_terminated(wdata, result, true);
+ cifs_write_subrequest_terminated(wdata, result);
release_mid(mid);
trace_smb3_rw_credits(credits.rreq_debug_id, credits.rreq_debug_index, 0,
server->credits, server->in_flight,
@@ -1813,7 +1836,7 @@ async_writev_out:
out:
if (rc) {
add_credits_and_wake_if(wdata->server, &wdata->credits, 0);
- cifs_write_subrequest_terminated(wdata, rc, false);
+ cifs_write_subrequest_terminated(wdata, rc);
}
}
@@ -2728,7 +2751,7 @@ int cifs_query_reparse_point(const unsigned int xid,
if (cap_unix(tcon->ses))
return -EOPNOTSUPP;
- if (!(le32_to_cpu(tcon->fsAttrInfo.Attributes) & FILE_SUPPORTS_REPARSE_POINTS))
+ if (!CIFS_REPARSE_SUPPORT(tcon))
return -EOPNOTSUPP;
oparms = (struct cifs_open_parms) {
@@ -2753,10 +2776,10 @@ int cifs_query_reparse_point(const unsigned int xid,
io_req->TotalParameterCount = 0;
io_req->TotalDataCount = 0;
- io_req->MaxParameterCount = cpu_to_le32(2);
+ io_req->MaxParameterCount = cpu_to_le32(0);
/* BB find exact data count max from sess structure BB */
io_req->MaxDataCount = cpu_to_le32(CIFSMaxBufSize & 0xFFFFFF00);
- io_req->MaxSetupCount = 4;
+ io_req->MaxSetupCount = 1;
io_req->Reserved = 0;
io_req->ParameterOffset = 0;
io_req->DataCount = 0;
@@ -2783,6 +2806,22 @@ int cifs_query_reparse_point(const unsigned int xid,
goto error;
}
+ /* SetupCount must be 1, otherwise offset to ByteCount is incorrect. */
+ if (io_rsp->SetupCount != 1) {
+ rc = -EIO;
+ goto error;
+ }
+
+ /*
+ * ReturnedDataLen is output length of executed IOCTL.
+ * DataCount is output length transferred over network.
+ * Check that we have full FSCTL_GET_REPARSE_POINT buffer.
+ */
+ if (data_count != le16_to_cpu(io_rsp->ReturnedDataLen)) {
+ rc = -EIO;
+ goto error;
+ }
+
end = 2 + get_bcc(&io_rsp->hdr) + (__u8 *)&io_rsp->ByteCount;
start = (__u8 *)&io_rsp->hdr.Protocol + data_offset;
if (start >= end) {
@@ -2812,6 +2851,134 @@ error:
return rc;
}
+struct inode *cifs_create_reparse_inode(struct cifs_open_info_data *data,
+ struct super_block *sb,
+ const unsigned int xid,
+ struct cifs_tcon *tcon,
+ const char *full_path,
+ bool directory,
+ struct kvec *reparse_iov,
+ struct kvec *xattr_iov)
+{
+ struct cifs_sb_info *cifs_sb = CIFS_SB(sb);
+ struct cifs_open_parms oparms;
+ TRANSACT_IOCTL_REQ *io_req;
+ struct inode *new = NULL;
+ struct kvec in_iov[2];
+ struct kvec out_iov;
+ struct cifs_fid fid;
+ int io_req_len;
+ int oplock = 0;
+ int buf_type = 0;
+ int rc;
+
+ cifs_tcon_dbg(FYI, "%s: path=%s\n", __func__, full_path);
+
+ /*
+ * If server filesystem does not support reparse points then do not
+ * attempt to create reparse point. This will prevent creating unusable
+ * empty object on the server.
+ */
+ if (!CIFS_REPARSE_SUPPORT(tcon))
+ return ERR_PTR(-EOPNOTSUPP);
+
+#ifndef CONFIG_CIFS_XATTR
+ if (xattr_iov)
+ return ERR_PTR(-EOPNOTSUPP);
+#endif
+
+ oparms = CIFS_OPARMS(cifs_sb, tcon, full_path,
+ FILE_READ_ATTRIBUTES | FILE_WRITE_DATA | FILE_WRITE_EA,
+ FILE_CREATE,
+ (directory ? CREATE_NOT_FILE : CREATE_NOT_DIR) | OPEN_REPARSE_POINT,
+ ACL_NO_MODE);
+ oparms.fid = &fid;
+
+ rc = CIFS_open(xid, &oparms, &oplock, NULL);
+ if (rc)
+ return ERR_PTR(rc);
+
+#ifdef CONFIG_CIFS_XATTR
+ if (xattr_iov) {
+ struct smb2_file_full_ea_info *ea;
+
+ ea = &((struct smb2_create_ea_ctx *)xattr_iov->iov_base)->ea;
+ while (1) {
+ rc = CIFSSMBSetEA(xid,
+ tcon,
+ full_path,
+ &ea->ea_data[0],
+ &ea->ea_data[ea->ea_name_length+1],
+ le16_to_cpu(ea->ea_value_length),
+ cifs_sb->local_nls,
+ cifs_sb);
+ if (rc)
+ goto out_close;
+ if (le32_to_cpu(ea->next_entry_offset) == 0)
+ break;
+ ea = (struct smb2_file_full_ea_info *)((u8 *)ea +
+ le32_to_cpu(ea->next_entry_offset));
+ }
+ }
+#endif
+
+ rc = smb_init(SMB_COM_NT_TRANSACT, 23, tcon, (void **)&io_req, NULL);
+ if (rc)
+ goto out_close;
+
+ inc_rfc1001_len(io_req, sizeof(io_req->Pad));
+
+ io_req_len = be32_to_cpu(io_req->hdr.smb_buf_length) + sizeof(io_req->hdr.smb_buf_length);
+
+ /* NT IOCTL response contains one-word long output setup buffer with size of output data. */
+ io_req->MaxSetupCount = 1;
+ /* NT IOCTL response does not contain output parameters. */
+ io_req->MaxParameterCount = cpu_to_le32(0);
+ /* FSCTL_SET_REPARSE_POINT response contains empty output data. */
+ io_req->MaxDataCount = cpu_to_le32(0);
+
+ io_req->TotalParameterCount = cpu_to_le32(0);
+ io_req->TotalDataCount = cpu_to_le32(reparse_iov->iov_len);
+ io_req->ParameterCount = io_req->TotalParameterCount;
+ io_req->ParameterOffset = cpu_to_le32(0);
+ io_req->DataCount = io_req->TotalDataCount;
+ io_req->DataOffset = cpu_to_le32(offsetof(typeof(*io_req), Data) -
+ sizeof(io_req->hdr.smb_buf_length));
+ io_req->SetupCount = 4;
+ io_req->SubCommand = cpu_to_le16(NT_TRANSACT_IOCTL);
+ io_req->FunctionCode = cpu_to_le32(FSCTL_SET_REPARSE_POINT);
+ io_req->Fid = fid.netfid;
+ io_req->IsFsctl = 1;
+ io_req->IsRootFlag = 0;
+ io_req->ByteCount = cpu_to_le16(le32_to_cpu(io_req->DataCount) + sizeof(io_req->Pad));
+
+ inc_rfc1001_len(io_req, reparse_iov->iov_len);
+
+ in_iov[0].iov_base = (char *)io_req;
+ in_iov[0].iov_len = io_req_len;
+ in_iov[1] = *reparse_iov;
+ rc = SendReceive2(xid, tcon->ses, in_iov, ARRAY_SIZE(in_iov), &buf_type,
+ CIFS_NO_RSP_BUF, &out_iov);
+
+ cifs_buf_release(io_req);
+
+ if (!rc)
+ rc = cifs_get_inode_info(&new, full_path, data, sb, xid, NULL);
+
+out_close:
+ CIFSSMBClose(xid, tcon, fid.netfid);
+
+ /*
+ * If CREATE was successful but FSCTL_SET_REPARSE_POINT failed then
+ * remove the intermediate object created by CREATE. Otherwise
+ * empty object stay on the server when reparse call failed.
+ */
+ if (rc)
+ CIFSSMBDelFile(xid, tcon, full_path, cifs_sb, NULL);
+
+ return rc ? ERR_PTR(rc) : new;
+}
+
int
CIFSSMB_set_compression(const unsigned int xid, struct cifs_tcon *tcon,
__u16 fid)
@@ -3981,6 +4148,12 @@ findFirstRetry:
pSMB->FileName[name_len] = 0;
pSMB->FileName[name_len+1] = 0;
name_len += 2;
+ } else if (!searchName[0]) {
+ pSMB->FileName[0] = CIFS_DIR_SEP(cifs_sb);
+ pSMB->FileName[1] = 0;
+ pSMB->FileName[2] = 0;
+ pSMB->FileName[3] = 0;
+ name_len = 4;
}
} else {
name_len = copy_path_name(pSMB->FileName, searchName);
@@ -3992,6 +4165,10 @@ findFirstRetry:
pSMB->FileName[name_len] = '*';
pSMB->FileName[name_len+1] = 0;
name_len += 2;
+ } else if (!searchName[0]) {
+ pSMB->FileName[0] = CIFS_DIR_SEP(cifs_sb);
+ pSMB->FileName[1] = 0;
+ name_len = 2;
}
}
@@ -4018,7 +4195,7 @@ findFirstRetry:
pSMB->SearchAttributes =
cpu_to_le16(ATTR_READONLY | ATTR_HIDDEN | ATTR_SYSTEM |
ATTR_DIRECTORY);
- pSMB->SearchCount = cpu_to_le16(CIFSMaxBufSize/sizeof(FILE_UNIX_INFO));
+ pSMB->SearchCount = cpu_to_le16(msearch ? CIFSMaxBufSize/sizeof(FILE_UNIX_INFO) : 1);
pSMB->SearchFlags = cpu_to_le16(search_flags);
pSMB->InformationLevel = cpu_to_le16(psrch_inf->info_level);
@@ -5171,6 +5348,63 @@ CIFSSMBSetFileSize(const unsigned int xid, struct cifs_tcon *tcon,
return rc;
}
+int
+SMBSetInformation(const unsigned int xid, struct cifs_tcon *tcon,
+ const char *fileName, __le32 attributes, __le64 write_time,
+ const struct nls_table *nls_codepage,
+ struct cifs_sb_info *cifs_sb)
+{
+ SETATTR_REQ *pSMB;
+ SETATTR_RSP *pSMBr;
+ struct timespec64 ts;
+ int bytes_returned;
+ int name_len;
+ int rc;
+
+ cifs_dbg(FYI, "In %s path %s\n", __func__, fileName);
+
+retry:
+ rc = smb_init(SMB_COM_SETATTR, 8, tcon, (void **) &pSMB,
+ (void **) &pSMBr);
+ if (rc)
+ return rc;
+
+ if (pSMB->hdr.Flags2 & SMBFLG2_UNICODE) {
+ name_len =
+ cifsConvertToUTF16((__le16 *) pSMB->fileName,
+ fileName, PATH_MAX, nls_codepage,
+ cifs_remap(cifs_sb));
+ name_len++; /* trailing null */
+ name_len *= 2;
+ } else {
+ name_len = copy_path_name(pSMB->fileName, fileName);
+ }
+ /* Only few attributes can be set by this command, others are not accepted by Win9x. */
+ pSMB->attr = cpu_to_le16(le32_to_cpu(attributes) &
+ (ATTR_READONLY | ATTR_HIDDEN | ATTR_SYSTEM | ATTR_ARCHIVE));
+ /* Zero write time value (in both NT and SETATTR formats) means to not change it. */
+ if (le64_to_cpu(write_time) != 0) {
+ ts = cifs_NTtimeToUnix(write_time);
+ pSMB->last_write_time = cpu_to_le32(ts.tv_sec);
+ }
+ pSMB->BufferFormat = 0x04;
+ name_len++; /* account for buffer type byte */
+ inc_rfc1001_len(pSMB, (__u16)name_len);
+ pSMB->ByteCount = cpu_to_le16(name_len);
+
+ rc = SendReceive(xid, tcon->ses, (struct smb_hdr *) pSMB,
+ (struct smb_hdr *) pSMBr, &bytes_returned, 0);
+ if (rc)
+ cifs_dbg(FYI, "Send error in %s = %d\n", __func__, rc);
+
+ cifs_buf_release(pSMB);
+
+ if (rc == -EAGAIN)
+ goto retry;
+
+ return rc;
+}
+
/* Some legacy servers such as NT4 require that the file times be set on
an open handle, rather than by pathname - this is awkward due to
potential access conflicts on the open, but it is unavoidable for these
diff --git a/fs/smb/client/cifstransport.c b/fs/smb/client/cifstransport.c
new file mode 100644
index 000000000000..352dafb888dd
--- /dev/null
+++ b/fs/smb/client/cifstransport.c
@@ -0,0 +1,566 @@
+// SPDX-License-Identifier: LGPL-2.1
+/*
+ *
+ * Copyright (C) International Business Machines Corp., 2002,2008
+ * Author(s): Steve French (sfrench@us.ibm.com)
+ * Jeremy Allison (jra@samba.org) 2006.
+ *
+ */
+
+#include <linux/fs.h>
+#include <linux/list.h>
+#include <linux/gfp.h>
+#include <linux/wait.h>
+#include <linux/net.h>
+#include <linux/delay.h>
+#include <linux/freezer.h>
+#include <linux/tcp.h>
+#include <linux/bvec.h>
+#include <linux/highmem.h>
+#include <linux/uaccess.h>
+#include <linux/processor.h>
+#include <linux/mempool.h>
+#include <linux/sched/signal.h>
+#include <linux/task_io_accounting_ops.h>
+#include "cifspdu.h"
+#include "cifsglob.h"
+#include "cifsproto.h"
+#include "cifs_debug.h"
+#include "smb2proto.h"
+#include "smbdirect.h"
+#include "compress.h"
+
+/* Max number of iovectors we can use off the stack when sending requests. */
+#define CIFS_MAX_IOV_SIZE 8
+
+static struct mid_q_entry *
+alloc_mid(const struct smb_hdr *smb_buffer, struct TCP_Server_Info *server)
+{
+ struct mid_q_entry *temp;
+
+ if (server == NULL) {
+ cifs_dbg(VFS, "%s: null TCP session\n", __func__);
+ return NULL;
+ }
+
+ temp = mempool_alloc(cifs_mid_poolp, GFP_NOFS);
+ memset(temp, 0, sizeof(struct mid_q_entry));
+ kref_init(&temp->refcount);
+ temp->mid = get_mid(smb_buffer);
+ temp->pid = current->pid;
+ temp->command = cpu_to_le16(smb_buffer->Command);
+ cifs_dbg(FYI, "For smb_command %d\n", smb_buffer->Command);
+ /* easier to use jiffies */
+ /* when mid allocated can be before when sent */
+ temp->when_alloc = jiffies;
+ temp->server = server;
+
+ /*
+ * The default is for the mid to be synchronous, so the
+ * default callback just wakes up the current task.
+ */
+ get_task_struct(current);
+ temp->creator = current;
+ temp->callback = cifs_wake_up_task;
+ temp->callback_data = current;
+
+ atomic_inc(&mid_count);
+ temp->mid_state = MID_REQUEST_ALLOCATED;
+ return temp;
+}
+
+int
+smb_send(struct TCP_Server_Info *server, struct smb_hdr *smb_buffer,
+ unsigned int smb_buf_length)
+{
+ struct kvec iov[2];
+ struct smb_rqst rqst = { .rq_iov = iov,
+ .rq_nvec = 2 };
+
+ iov[0].iov_base = smb_buffer;
+ iov[0].iov_len = 4;
+ iov[1].iov_base = (char *)smb_buffer + 4;
+ iov[1].iov_len = smb_buf_length;
+
+ return __smb_send_rqst(server, 1, &rqst);
+}
+
+static int allocate_mid(struct cifs_ses *ses, struct smb_hdr *in_buf,
+ struct mid_q_entry **ppmidQ)
+{
+ spin_lock(&ses->ses_lock);
+ if (ses->ses_status == SES_NEW) {
+ if ((in_buf->Command != SMB_COM_SESSION_SETUP_ANDX) &&
+ (in_buf->Command != SMB_COM_NEGOTIATE)) {
+ spin_unlock(&ses->ses_lock);
+ return -EAGAIN;
+ }
+ /* else ok - we are setting up session */
+ }
+
+ if (ses->ses_status == SES_EXITING) {
+ /* check if SMB session is bad because we are setting it up */
+ if (in_buf->Command != SMB_COM_LOGOFF_ANDX) {
+ spin_unlock(&ses->ses_lock);
+ return -EAGAIN;
+ }
+ /* else ok - we are shutting down session */
+ }
+ spin_unlock(&ses->ses_lock);
+
+ *ppmidQ = alloc_mid(in_buf, ses->server);
+ if (*ppmidQ == NULL)
+ return -ENOMEM;
+ spin_lock(&ses->server->mid_queue_lock);
+ list_add_tail(&(*ppmidQ)->qhead, &ses->server->pending_mid_q);
+ spin_unlock(&ses->server->mid_queue_lock);
+ return 0;
+}
+
+struct mid_q_entry *
+cifs_setup_async_request(struct TCP_Server_Info *server, struct smb_rqst *rqst)
+{
+ int rc;
+ struct smb_hdr *hdr = (struct smb_hdr *)rqst->rq_iov[0].iov_base;
+ struct mid_q_entry *mid;
+
+ if (rqst->rq_iov[0].iov_len != 4 ||
+ rqst->rq_iov[0].iov_base + 4 != rqst->rq_iov[1].iov_base)
+ return ERR_PTR(-EIO);
+
+ /* enable signing if server requires it */
+ if (server->sign)
+ hdr->Flags2 |= SMBFLG2_SECURITY_SIGNATURE;
+
+ mid = alloc_mid(hdr, server);
+ if (mid == NULL)
+ return ERR_PTR(-ENOMEM);
+
+ rc = cifs_sign_rqst(rqst, server, &mid->sequence_number);
+ if (rc) {
+ release_mid(mid);
+ return ERR_PTR(rc);
+ }
+
+ return mid;
+}
+
+/*
+ *
+ * Send an SMB Request. No response info (other than return code)
+ * needs to be parsed.
+ *
+ * flags indicate the type of request buffer and how long to wait
+ * and whether to log NT STATUS code (error) before mapping it to POSIX error
+ *
+ */
+int
+SendReceiveNoRsp(const unsigned int xid, struct cifs_ses *ses,
+ char *in_buf, int flags)
+{
+ int rc;
+ struct kvec iov[1];
+ struct kvec rsp_iov;
+ int resp_buf_type;
+
+ iov[0].iov_base = in_buf;
+ iov[0].iov_len = get_rfc1002_length(in_buf) + 4;
+ flags |= CIFS_NO_RSP_BUF;
+ rc = SendReceive2(xid, ses, iov, 1, &resp_buf_type, flags, &rsp_iov);
+ cifs_dbg(NOISY, "SendRcvNoRsp flags %d rc %d\n", flags, rc);
+
+ return rc;
+}
+
+int
+cifs_check_receive(struct mid_q_entry *mid, struct TCP_Server_Info *server,
+ bool log_error)
+{
+ unsigned int len = get_rfc1002_length(mid->resp_buf) + 4;
+
+ dump_smb(mid->resp_buf, min_t(u32, 92, len));
+
+ /* convert the length into a more usable form */
+ if (server->sign) {
+ struct kvec iov[2];
+ int rc = 0;
+ struct smb_rqst rqst = { .rq_iov = iov,
+ .rq_nvec = 2 };
+
+ iov[0].iov_base = mid->resp_buf;
+ iov[0].iov_len = 4;
+ iov[1].iov_base = (char *)mid->resp_buf + 4;
+ iov[1].iov_len = len - 4;
+ /* FIXME: add code to kill session */
+ rc = cifs_verify_signature(&rqst, server,
+ mid->sequence_number);
+ if (rc)
+ cifs_server_dbg(VFS, "SMB signature verification returned error = %d\n",
+ rc);
+ }
+
+ /* BB special case reconnect tid and uid here? */
+ return map_and_check_smb_error(mid, log_error);
+}
+
+struct mid_q_entry *
+cifs_setup_request(struct cifs_ses *ses, struct TCP_Server_Info *ignored,
+ struct smb_rqst *rqst)
+{
+ int rc;
+ struct smb_hdr *hdr = (struct smb_hdr *)rqst->rq_iov[0].iov_base;
+ struct mid_q_entry *mid;
+
+ if (rqst->rq_iov[0].iov_len != 4 ||
+ rqst->rq_iov[0].iov_base + 4 != rqst->rq_iov[1].iov_base)
+ return ERR_PTR(-EIO);
+
+ rc = allocate_mid(ses, hdr, &mid);
+ if (rc)
+ return ERR_PTR(rc);
+ rc = cifs_sign_rqst(rqst, ses->server, &mid->sequence_number);
+ if (rc) {
+ delete_mid(mid);
+ return ERR_PTR(rc);
+ }
+ return mid;
+}
+
+int
+SendReceive2(const unsigned int xid, struct cifs_ses *ses,
+ struct kvec *iov, int n_vec, int *resp_buf_type /* ret */,
+ const int flags, struct kvec *resp_iov)
+{
+ struct smb_rqst rqst;
+ struct kvec s_iov[CIFS_MAX_IOV_SIZE], *new_iov;
+ int rc;
+
+ if (n_vec + 1 > CIFS_MAX_IOV_SIZE) {
+ new_iov = kmalloc_array(n_vec + 1, sizeof(struct kvec),
+ GFP_KERNEL);
+ if (!new_iov) {
+ /* otherwise cifs_send_recv below sets resp_buf_type */
+ *resp_buf_type = CIFS_NO_BUFFER;
+ return -ENOMEM;
+ }
+ } else
+ new_iov = s_iov;
+
+ /* 1st iov is a RFC1001 length followed by the rest of the packet */
+ memcpy(new_iov + 1, iov, (sizeof(struct kvec) * n_vec));
+
+ new_iov[0].iov_base = new_iov[1].iov_base;
+ new_iov[0].iov_len = 4;
+ new_iov[1].iov_base += 4;
+ new_iov[1].iov_len -= 4;
+
+ memset(&rqst, 0, sizeof(struct smb_rqst));
+ rqst.rq_iov = new_iov;
+ rqst.rq_nvec = n_vec + 1;
+
+ rc = cifs_send_recv(xid, ses, ses->server,
+ &rqst, resp_buf_type, flags, resp_iov);
+ if (n_vec + 1 > CIFS_MAX_IOV_SIZE)
+ kfree(new_iov);
+ return rc;
+}
+
+int
+SendReceive(const unsigned int xid, struct cifs_ses *ses,
+ struct smb_hdr *in_buf, struct smb_hdr *out_buf,
+ int *pbytes_returned, const int flags)
+{
+ int rc = 0;
+ struct mid_q_entry *midQ;
+ unsigned int len = be32_to_cpu(in_buf->smb_buf_length);
+ struct kvec iov = { .iov_base = in_buf, .iov_len = len };
+ struct smb_rqst rqst = { .rq_iov = &iov, .rq_nvec = 1 };
+ struct cifs_credits credits = { .value = 1, .instance = 0 };
+ struct TCP_Server_Info *server;
+
+ if (ses == NULL) {
+ cifs_dbg(VFS, "Null smb session\n");
+ return -EIO;
+ }
+ server = ses->server;
+ if (server == NULL) {
+ cifs_dbg(VFS, "Null tcp session\n");
+ return -EIO;
+ }
+
+ spin_lock(&server->srv_lock);
+ if (server->tcpStatus == CifsExiting) {
+ spin_unlock(&server->srv_lock);
+ return -ENOENT;
+ }
+ spin_unlock(&server->srv_lock);
+
+ /* Ensure that we do not send more than 50 overlapping requests
+ to the same server. We may make this configurable later or
+ use ses->maxReq */
+
+ if (len > CIFSMaxBufSize + MAX_CIFS_HDR_SIZE - 4) {
+ cifs_server_dbg(VFS, "Invalid length, greater than maximum frame, %d\n",
+ len);
+ return -EIO;
+ }
+
+ rc = wait_for_free_request(server, flags, &credits.instance);
+ if (rc)
+ return rc;
+
+ /* make sure that we sign in the same order that we send on this socket
+ and avoid races inside tcp sendmsg code that could cause corruption
+ of smb data */
+
+ cifs_server_lock(server);
+
+ rc = allocate_mid(ses, in_buf, &midQ);
+ if (rc) {
+ cifs_server_unlock(server);
+ /* Update # of requests on wire to server */
+ add_credits(server, &credits, 0);
+ return rc;
+ }
+
+ rc = cifs_sign_smb(in_buf, server, &midQ->sequence_number);
+ if (rc) {
+ cifs_server_unlock(server);
+ goto out;
+ }
+
+ midQ->mid_state = MID_REQUEST_SUBMITTED;
+
+ rc = smb_send(server, in_buf, len);
+ cifs_save_when_sent(midQ);
+
+ if (rc < 0)
+ server->sequence_number -= 2;
+
+ cifs_server_unlock(server);
+
+ if (rc < 0)
+ goto out;
+
+ rc = wait_for_response(server, midQ);
+ if (rc != 0) {
+ send_cancel(server, &rqst, midQ);
+ spin_lock(&server->mid_queue_lock);
+ if (midQ->mid_state == MID_REQUEST_SUBMITTED ||
+ midQ->mid_state == MID_RESPONSE_RECEIVED) {
+ /* no longer considered to be "in-flight" */
+ midQ->callback = release_mid;
+ spin_unlock(&server->mid_queue_lock);
+ add_credits(server, &credits, 0);
+ return rc;
+ }
+ spin_unlock(&server->mid_queue_lock);
+ }
+
+ rc = cifs_sync_mid_result(midQ, server);
+ if (rc != 0) {
+ add_credits(server, &credits, 0);
+ return rc;
+ }
+
+ if (!midQ->resp_buf || !out_buf ||
+ midQ->mid_state != MID_RESPONSE_READY) {
+ rc = -EIO;
+ cifs_server_dbg(VFS, "Bad MID state?\n");
+ goto out;
+ }
+
+ *pbytes_returned = get_rfc1002_length(midQ->resp_buf);
+ memcpy(out_buf, midQ->resp_buf, *pbytes_returned + 4);
+ rc = cifs_check_receive(midQ, server, 0);
+out:
+ delete_mid(midQ);
+ add_credits(server, &credits, 0);
+
+ return rc;
+}
+
+/* We send a LOCKINGX_CANCEL_LOCK to cause the Windows
+ blocking lock to return. */
+
+static int
+send_lock_cancel(const unsigned int xid, struct cifs_tcon *tcon,
+ struct smb_hdr *in_buf,
+ struct smb_hdr *out_buf)
+{
+ int bytes_returned;
+ struct cifs_ses *ses = tcon->ses;
+ LOCK_REQ *pSMB = (LOCK_REQ *)in_buf;
+
+ /* We just modify the current in_buf to change
+ the type of lock from LOCKING_ANDX_SHARED_LOCK
+ or LOCKING_ANDX_EXCLUSIVE_LOCK to
+ LOCKING_ANDX_CANCEL_LOCK. */
+
+ pSMB->LockType = LOCKING_ANDX_CANCEL_LOCK|LOCKING_ANDX_LARGE_FILES;
+ pSMB->Timeout = 0;
+ pSMB->hdr.Mid = get_next_mid(ses->server);
+
+ return SendReceive(xid, ses, in_buf, out_buf,
+ &bytes_returned, 0);
+}
+
+int
+SendReceiveBlockingLock(const unsigned int xid, struct cifs_tcon *tcon,
+ struct smb_hdr *in_buf, struct smb_hdr *out_buf,
+ int *pbytes_returned)
+{
+ int rc = 0;
+ int rstart = 0;
+ struct mid_q_entry *midQ;
+ struct cifs_ses *ses;
+ unsigned int len = be32_to_cpu(in_buf->smb_buf_length);
+ struct kvec iov = { .iov_base = in_buf, .iov_len = len };
+ struct smb_rqst rqst = { .rq_iov = &iov, .rq_nvec = 1 };
+ unsigned int instance;
+ struct TCP_Server_Info *server;
+
+ if (tcon == NULL || tcon->ses == NULL) {
+ cifs_dbg(VFS, "Null smb session\n");
+ return -EIO;
+ }
+ ses = tcon->ses;
+ server = ses->server;
+
+ if (server == NULL) {
+ cifs_dbg(VFS, "Null tcp session\n");
+ return -EIO;
+ }
+
+ spin_lock(&server->srv_lock);
+ if (server->tcpStatus == CifsExiting) {
+ spin_unlock(&server->srv_lock);
+ return -ENOENT;
+ }
+ spin_unlock(&server->srv_lock);
+
+ /* Ensure that we do not send more than 50 overlapping requests
+ to the same server. We may make this configurable later or
+ use ses->maxReq */
+
+ if (len > CIFSMaxBufSize + MAX_CIFS_HDR_SIZE - 4) {
+ cifs_tcon_dbg(VFS, "Invalid length, greater than maximum frame, %d\n",
+ len);
+ return -EIO;
+ }
+
+ rc = wait_for_free_request(server, CIFS_BLOCKING_OP, &instance);
+ if (rc)
+ return rc;
+
+ /* make sure that we sign in the same order that we send on this socket
+ and avoid races inside tcp sendmsg code that could cause corruption
+ of smb data */
+
+ cifs_server_lock(server);
+
+ rc = allocate_mid(ses, in_buf, &midQ);
+ if (rc) {
+ cifs_server_unlock(server);
+ return rc;
+ }
+
+ rc = cifs_sign_smb(in_buf, server, &midQ->sequence_number);
+ if (rc) {
+ delete_mid(midQ);
+ cifs_server_unlock(server);
+ return rc;
+ }
+
+ midQ->mid_state = MID_REQUEST_SUBMITTED;
+ rc = smb_send(server, in_buf, len);
+ cifs_save_when_sent(midQ);
+
+ if (rc < 0)
+ server->sequence_number -= 2;
+
+ cifs_server_unlock(server);
+
+ if (rc < 0) {
+ delete_mid(midQ);
+ return rc;
+ }
+
+ /* Wait for a reply - allow signals to interrupt. */
+ rc = wait_event_interruptible(server->response_q,
+ (!(midQ->mid_state == MID_REQUEST_SUBMITTED ||
+ midQ->mid_state == MID_RESPONSE_RECEIVED)) ||
+ ((server->tcpStatus != CifsGood) &&
+ (server->tcpStatus != CifsNew)));
+
+ /* Were we interrupted by a signal ? */
+ spin_lock(&server->srv_lock);
+ if ((rc == -ERESTARTSYS) &&
+ (midQ->mid_state == MID_REQUEST_SUBMITTED ||
+ midQ->mid_state == MID_RESPONSE_RECEIVED) &&
+ ((server->tcpStatus == CifsGood) ||
+ (server->tcpStatus == CifsNew))) {
+ spin_unlock(&server->srv_lock);
+
+ if (in_buf->Command == SMB_COM_TRANSACTION2) {
+ /* POSIX lock. We send a NT_CANCEL SMB to cause the
+ blocking lock to return. */
+ rc = send_cancel(server, &rqst, midQ);
+ if (rc) {
+ delete_mid(midQ);
+ return rc;
+ }
+ } else {
+ /* Windows lock. We send a LOCKINGX_CANCEL_LOCK
+ to cause the blocking lock to return. */
+
+ rc = send_lock_cancel(xid, tcon, in_buf, out_buf);
+
+ /* If we get -ENOLCK back the lock may have
+ already been removed. Don't exit in this case. */
+ if (rc && rc != -ENOLCK) {
+ delete_mid(midQ);
+ return rc;
+ }
+ }
+
+ rc = wait_for_response(server, midQ);
+ if (rc) {
+ send_cancel(server, &rqst, midQ);
+ spin_lock(&server->mid_queue_lock);
+ if (midQ->mid_state == MID_REQUEST_SUBMITTED ||
+ midQ->mid_state == MID_RESPONSE_RECEIVED) {
+ /* no longer considered to be "in-flight" */
+ midQ->callback = release_mid;
+ spin_unlock(&server->mid_queue_lock);
+ return rc;
+ }
+ spin_unlock(&server->mid_queue_lock);
+ }
+
+ /* We got the response - restart system call. */
+ rstart = 1;
+ spin_lock(&server->srv_lock);
+ }
+ spin_unlock(&server->srv_lock);
+
+ rc = cifs_sync_mid_result(midQ, server);
+ if (rc != 0)
+ return rc;
+
+ /* rcvd frame is ok */
+ if (out_buf == NULL || midQ->mid_state != MID_RESPONSE_READY) {
+ rc = -EIO;
+ cifs_tcon_dbg(VFS, "Bad MID state?\n");
+ goto out;
+ }
+
+ *pbytes_returned = get_rfc1002_length(midQ->resp_buf);
+ memcpy(out_buf, midQ->resp_buf, *pbytes_returned + 4);
+ rc = cifs_check_receive(midQ, server, 0);
+out:
+ delete_mid(midQ);
+ if (rstart && rc == -EACCES)
+ return -ERESTARTSYS;
+ return rc;
+}
diff --git a/fs/smb/client/connect.c b/fs/smb/client/connect.c
index df976ce6aed9..587845a2452d 100644
--- a/fs/smb/client/connect.c
+++ b/fs/smb/client/connect.c
@@ -97,7 +97,7 @@ static int reconn_set_ipaddr_from_hostname(struct TCP_Server_Info *server)
return rc;
}
-static void smb2_query_server_interfaces(struct work_struct *work)
+void smb2_query_server_interfaces(struct work_struct *work)
{
int rc;
int xid;
@@ -116,18 +116,22 @@ static void smb2_query_server_interfaces(struct work_struct *work)
rc = server->ops->query_server_interfaces(xid, tcon, false);
free_xid(xid);
- if (rc) {
- if (rc == -EOPNOTSUPP)
- return;
-
+ if (rc)
cifs_dbg(FYI, "%s: failed to query server interfaces: %d\n",
__func__, rc);
- }
queue_delayed_work(cifsiod_wq, &tcon->query_interfaces,
(SMB_INTERFACE_POLL_INTERVAL * HZ));
}
+#define set_need_reco(server) \
+do { \
+ spin_lock(&server->srv_lock); \
+ if (server->tcpStatus != CifsExiting) \
+ server->tcpStatus = CifsNeedReconnect; \
+ spin_unlock(&server->srv_lock); \
+} while (0)
+
/*
* Update the tcpStatus for the server.
* This is used to signal the cifsd thread to call cifs_reconnect
@@ -141,39 +145,45 @@ void
cifs_signal_cifsd_for_reconnect(struct TCP_Server_Info *server,
bool all_channels)
{
- struct TCP_Server_Info *pserver;
+ struct TCP_Server_Info *nserver;
struct cifs_ses *ses;
+ LIST_HEAD(reco);
int i;
- /* If server is a channel, select the primary channel */
- pserver = SERVER_IS_CHAN(server) ? server->primary_server : server;
-
/* if we need to signal just this channel */
if (!all_channels) {
- spin_lock(&server->srv_lock);
- if (server->tcpStatus != CifsExiting)
- server->tcpStatus = CifsNeedReconnect;
- spin_unlock(&server->srv_lock);
+ set_need_reco(server);
return;
}
- spin_lock(&cifs_tcp_ses_lock);
- list_for_each_entry(ses, &pserver->smb_ses_list, smb_ses_list) {
- if (cifs_ses_exiting(ses))
- continue;
- spin_lock(&ses->chan_lock);
- for (i = 0; i < ses->chan_count; i++) {
- if (!ses->chans[i].server)
+ if (SERVER_IS_CHAN(server))
+ server = server->primary_server;
+ scoped_guard(spinlock, &cifs_tcp_ses_lock) {
+ set_need_reco(server);
+ list_for_each_entry(ses, &server->smb_ses_list, smb_ses_list) {
+ spin_lock(&ses->ses_lock);
+ if (ses->ses_status == SES_EXITING) {
+ spin_unlock(&ses->ses_lock);
continue;
-
- spin_lock(&ses->chans[i].server->srv_lock);
- if (ses->chans[i].server->tcpStatus != CifsExiting)
- ses->chans[i].server->tcpStatus = CifsNeedReconnect;
- spin_unlock(&ses->chans[i].server->srv_lock);
+ }
+ spin_lock(&ses->chan_lock);
+ for (i = 1; i < ses->chan_count; i++) {
+ nserver = ses->chans[i].server;
+ if (!nserver)
+ continue;
+ nserver->srv_count++;
+ list_add(&nserver->rlist, &reco);
+ }
+ spin_unlock(&ses->chan_lock);
+ spin_unlock(&ses->ses_lock);
}
- spin_unlock(&ses->chan_lock);
}
- spin_unlock(&cifs_tcp_ses_lock);
+
+ list_for_each_entry_safe(server, nserver, &reco, rlist) {
+ list_del_init(&server->rlist);
+ set_need_reco(server);
+ cifs_put_tcp_session(server, 0);
+ }
}
/*
@@ -311,15 +321,15 @@ cifs_abort_connection(struct TCP_Server_Info *server)
/* mark submitted MIDs for retry and issue callback */
INIT_LIST_HEAD(&retry_list);
cifs_dbg(FYI, "%s: moving mids to private list\n", __func__);
- spin_lock(&server->mid_lock);
+ spin_lock(&server->mid_queue_lock);
list_for_each_entry_safe(mid, nmid, &server->pending_mid_q, qhead) {
kref_get(&mid->refcount);
if (mid->mid_state == MID_REQUEST_SUBMITTED)
mid->mid_state = MID_RETRY_NEEDED;
list_move(&mid->qhead, &retry_list);
- mid->mid_flags |= MID_DELETED;
+ mid->deleted_from_q = true;
}
- spin_unlock(&server->mid_lock);
+ spin_unlock(&server->mid_queue_lock);
cifs_server_unlock(server);
cifs_dbg(FYI, "%s: issuing mid callbacks\n", __func__);
@@ -348,7 +358,7 @@ static bool cifs_tcp_ses_needs_reconnect(struct TCP_Server_Info *server, int num
}
cifs_dbg(FYI, "Mark tcp session as need reconnect\n");
- trace_smb3_reconnect(server->CurrentMid, server->conn_id,
+ trace_smb3_reconnect(server->current_mid, server->conn_id,
server->hostname);
server->tcpStatus = CifsNeedReconnect;
@@ -377,6 +387,13 @@ static int __cifs_reconnect(struct TCP_Server_Info *server,
if (!cifs_tcp_ses_needs_reconnect(server, 1))
return 0;
+ /*
+ * if smb session has been marked for reconnect, also reconnect all
+ * connections. This way, the other connections do not end up bad.
+ */
+ if (mark_smb_session)
+ cifs_signal_cifsd_for_reconnect(server, mark_smb_session);
+
cifs_mark_tcp_ses_conns_for_reconnect(server, mark_smb_session);
cifs_abort_connection(server);
@@ -385,7 +402,8 @@ static int __cifs_reconnect(struct TCP_Server_Info *server,
try_to_freeze();
cifs_server_lock(server);
- if (!cifs_swn_set_server_dstaddr(server)) {
+ if (!cifs_swn_set_server_dstaddr(server) &&
+ !SERVER_IS_CHAN(server)) {
/* resolve the hostname again to make sure that IP address is up-to-date */
rc = reconn_set_ipaddr_from_hostname(server);
cifs_dbg(FYI, "%s: reconn_set_ipaddr_from_hostname: rc=%d\n", __func__, rc);
@@ -661,12 +679,12 @@ server_unresponsive(struct TCP_Server_Info *server)
/*
* If we're in the process of mounting a share or reconnecting a session
* and the server abruptly shut down (e.g. socket wasn't closed, packet
- * had been ACK'ed but no SMB response), don't wait longer than 20s to
- * negotiate protocol.
+ * had been ACK'ed but no SMB response), don't wait longer than 20s from
+ * when negotiate actually started.
*/
spin_lock(&server->srv_lock);
if (server->tcpStatus == CifsInNegotiate &&
- time_after(jiffies, server->lstrp + 20 * HZ)) {
+ time_after(jiffies, server->neg_start + 20 * HZ)) {
spin_unlock(&server->srv_lock);
cifs_reconnect(server, false);
return true;
@@ -866,13 +884,13 @@ is_smb_response(struct TCP_Server_Info *server, unsigned char type)
* server there should be exactly one pending mid
* corresponding to SMB1/SMB2 Negotiate packet.
*/
- spin_lock(&server->mid_lock);
+ spin_lock(&server->mid_queue_lock);
list_for_each_entry_safe(mid, nmid, &server->pending_mid_q, qhead) {
kref_get(&mid->refcount);
list_move(&mid->qhead, &dispose_list);
- mid->mid_flags |= MID_DELETED;
+ mid->deleted_from_q = true;
}
- spin_unlock(&server->mid_lock);
+ spin_unlock(&server->mid_queue_lock);
/* Now try to reconnect once with NetBIOS session. */
server->with_rfc1001 = true;
@@ -939,7 +957,7 @@ dequeue_mid(struct mid_q_entry *mid, bool malformed)
#ifdef CONFIG_CIFS_STATS2
mid->when_received = jiffies;
#endif
- spin_lock(&mid->server->mid_lock);
+ spin_lock(&mid->server->mid_queue_lock);
if (!malformed)
mid->mid_state = MID_RESPONSE_RECEIVED;
else
@@ -948,13 +966,13 @@ dequeue_mid(struct mid_q_entry *mid, bool malformed)
* Trying to handle/dequeue a mid after the send_recv()
* function has finished processing it is a bug.
*/
- if (mid->mid_flags & MID_DELETED) {
- spin_unlock(&mid->server->mid_lock);
+ if (mid->deleted_from_q == true) {
+ spin_unlock(&mid->server->mid_queue_lock);
pr_warn_once("trying to dequeue a deleted mid\n");
} else {
list_del_init(&mid->qhead);
- mid->mid_flags |= MID_DELETED;
- spin_unlock(&mid->server->mid_lock);
+ mid->deleted_from_q = true;
+ spin_unlock(&mid->server->mid_queue_lock);
}
}
@@ -1083,16 +1101,16 @@ clean_demultiplex_info(struct TCP_Server_Info *server)
struct list_head *tmp, *tmp2;
LIST_HEAD(dispose_list);
- spin_lock(&server->mid_lock);
+ spin_lock(&server->mid_queue_lock);
list_for_each_safe(tmp, tmp2, &server->pending_mid_q) {
mid_entry = list_entry(tmp, struct mid_q_entry, qhead);
cifs_dbg(FYI, "Clearing mid %llu\n", mid_entry->mid);
kref_get(&mid_entry->refcount);
mid_entry->mid_state = MID_SHUTDOWN;
list_move(&mid_entry->qhead, &dispose_list);
- mid_entry->mid_flags |= MID_DELETED;
+ mid_entry->deleted_from_q = true;
}
- spin_unlock(&server->mid_lock);
+ spin_unlock(&server->mid_queue_lock);
/* now walk dispose list and issue callbacks */
list_for_each_safe(tmp, tmp2, &dispose_list) {
@@ -1224,7 +1242,7 @@ smb2_add_credits_from_hdr(char *buffer, struct TCP_Server_Info *server)
spin_unlock(&server->req_lock);
wake_up(&server->request_q);
- trace_smb3_hdr_credits(server->CurrentMid,
+ trace_smb3_hdr_credits(server->current_mid,
server->conn_id, server->hostname, scredits,
le16_to_cpu(shdr->CreditRequest), in_flight);
cifs_server_dbg(FYI, "%s: added %u credits total=%d\n",
@@ -1804,7 +1822,8 @@ cifs_get_tcp_session(struct smb3_fs_context *ctx,
tcp_ses->compression.requested = ctx->compress;
spin_lock_init(&tcp_ses->req_lock);
spin_lock_init(&tcp_ses->srv_lock);
- spin_lock_init(&tcp_ses->mid_lock);
+ spin_lock_init(&tcp_ses->mid_queue_lock);
+ spin_lock_init(&tcp_ses->mid_counter_lock);
INIT_LIST_HEAD(&tcp_ses->tcp_ses_list);
INIT_LIST_HEAD(&tcp_ses->smb_ses_list);
INIT_DELAYED_WORK(&tcp_ses->echo, cifs_echo_request);
@@ -2862,20 +2881,14 @@ cifs_get_tcon(struct cifs_ses *ses, struct smb3_fs_context *ctx)
tcon->max_cached_dirs = ctx->max_cached_dirs;
tcon->nodelete = ctx->nodelete;
tcon->local_lease = ctx->local_lease;
- INIT_LIST_HEAD(&tcon->pending_opens);
tcon->status = TID_GOOD;
- INIT_DELAYED_WORK(&tcon->query_interfaces,
- smb2_query_server_interfaces);
if (ses->server->dialect >= SMB30_PROT_ID &&
(ses->server->capabilities & SMB2_GLOBAL_CAP_MULTI_CHANNEL)) {
/* schedule query interfaces poll */
queue_delayed_work(cifsiod_wq, &tcon->query_interfaces,
(SMB_INTERFACE_POLL_INTERVAL * HZ));
}
-#ifdef CONFIG_CIFS_DFS_UPCALL
- INIT_DELAYED_WORK(&tcon->dfs_cache_work, dfs_cache_refresh);
-#endif
spin_lock(&cifs_tcp_ses_lock);
list_add(&tcon->tcon_list, &ses->tcon_list);
spin_unlock(&cifs_tcp_ses_lock);
@@ -3350,18 +3363,15 @@ generic_ip_connect(struct TCP_Server_Info *server)
struct net *net = cifs_net_ns(server);
struct sock *sk;
- rc = __sock_create(net, sfamily, SOCK_STREAM,
- IPPROTO_TCP, &server->ssocket, 1);
+ rc = sock_create_kern(net, sfamily, SOCK_STREAM,
+ IPPROTO_TCP, &server->ssocket);
if (rc < 0) {
cifs_server_dbg(VFS, "Error %d creating socket\n", rc);
return rc;
}
sk = server->ssocket->sk;
- __netns_tracker_free(net, &sk->ns_tracker, false);
- sk->sk_net_refcnt = 1;
- get_net_track(net, &sk->ns_tracker, GFP_KERNEL);
- sock_inuse_add(net, 1);
+ sk_net_refcnt_upgrade(sk);
/* BB other socket options to set KEEPALIVE, NODELAY? */
cifs_dbg(FYI, "Socket created\n");
@@ -3714,9 +3724,15 @@ int cifs_mount_get_tcon(struct cifs_mount_ctx *mnt_ctx)
goto out;
}
- /* if new SMB3.11 POSIX extensions are supported do not remap / and \ */
- if (tcon->posix_extensions)
+ /*
+ * if new SMB3.11 POSIX extensions are supported, do not change anything in the
+ * path (i.e., do not remap / and \ and do not map any special characters)
+ */
+ if (tcon->posix_extensions) {
cifs_sb->mnt_cifs_flags |= CIFS_MOUNT_POSIX_PATHS;
+ cifs_sb->mnt_cifs_flags &= ~(CIFS_MOUNT_MAP_SFM_CHR |
+ CIFS_MOUNT_MAP_SPECIAL_CHR);
+ }
#ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY
/* tell server which Unix caps we support */
@@ -3753,28 +3769,7 @@ int cifs_mount_get_tcon(struct cifs_mount_ctx *mnt_ctx)
}
}
- /*
- * Clamp the rsize/wsize mount arguments if they are too big for the server
- * and set the rsize/wsize to the negotiated values if not passed in by
- * the user on mount
- */
- if ((cifs_sb->ctx->wsize == 0) ||
- (cifs_sb->ctx->wsize > server->ops->negotiate_wsize(tcon, ctx))) {
- cifs_sb->ctx->wsize =
- round_down(server->ops->negotiate_wsize(tcon, ctx), PAGE_SIZE);
- /*
- * in the very unlikely event that the server sent a max write size under PAGE_SIZE,
- * (which would get rounded down to 0) then reset wsize to absolute minimum eg 4096
- */
- if (cifs_sb->ctx->wsize == 0) {
- cifs_sb->ctx->wsize = PAGE_SIZE;
- cifs_dbg(VFS, "wsize too small, reset to minimum ie PAGE_SIZE, usually 4096\n");
- }
- }
- if ((cifs_sb->ctx->rsize == 0) ||
- (cifs_sb->ctx->rsize > server->ops->negotiate_rsize(tcon, ctx)))
- cifs_sb->ctx->rsize = server->ops->negotiate_rsize(tcon, ctx);
-
+ cifs_negotiate_iosize(server, cifs_sb->ctx, tcon);
/*
* The cookie is initialized from volume info returned above.
* Inside cifs_fscache_get_super_cookie it checks
@@ -4210,7 +4205,9 @@ retry:
return 0;
}
+ server->lstrp = jiffies;
server->tcpStatus = CifsInNegotiate;
+ server->neg_start = jiffies;
spin_unlock(&server->srv_lock);
rc = server->ops->negotiate(xid, ses, server);
diff --git a/fs/smb/client/dir.c b/fs/smb/client/dir.c
index d1e95632ac54..5223edf6d11a 100644
--- a/fs/smb/client/dir.c
+++ b/fs/smb/client/dir.c
@@ -23,6 +23,7 @@
#include "fs_context.h"
#include "cifs_ioctl.h"
#include "fscache.h"
+#include "cached_dir.h"
static void
renew_parental_timestamps(struct dentry *direntry)
@@ -189,7 +190,9 @@ static int cifs_do_create(struct inode *inode, struct dentry *direntry, unsigned
int disposition;
struct TCP_Server_Info *server = tcon->ses->server;
struct cifs_open_parms oparms;
+ struct cached_fid *parent_cfid = NULL;
int rdwr_for_fscache = 0;
+ __le32 lease_flags = 0;
*oplock = 0;
if (tcon->ses->server->oplocks)
@@ -311,7 +314,28 @@ static int cifs_do_create(struct inode *inode, struct dentry *direntry, unsigned
if (!tcon->unix_ext && (mode & S_IWUGO) == 0)
create_options |= CREATE_OPTION_READONLY;
+
retry_open:
+ if (tcon->cfids && direntry->d_parent && server->dialect >= SMB30_PROT_ID) {
+ parent_cfid = NULL;
+ spin_lock(&tcon->cfids->cfid_list_lock);
+ list_for_each_entry(parent_cfid, &tcon->cfids->entries, entry) {
+ if (parent_cfid->dentry == direntry->d_parent) {
+ cifs_dbg(FYI, "found a parent cached file handle\n");
+ if (parent_cfid->has_lease && parent_cfid->time) {
+ lease_flags
+ |= SMB2_LEASE_FLAG_PARENT_LEASE_KEY_SET_LE;
+ memcpy(fid->parent_lease_key,
+ parent_cfid->fid.lease_key,
+ SMB2_LEASE_KEY_SIZE);
+ parent_cfid->dirents.is_valid = false;
+ }
+ break;
+ }
+ }
+ spin_unlock(&tcon->cfids->cfid_list_lock);
+ }
+
oparms = (struct cifs_open_parms) {
.tcon = tcon,
.cifs_sb = cifs_sb,
@@ -320,6 +344,7 @@ retry_open:
.disposition = disposition,
.path = full_path,
.fid = fid,
+ .lease_flags = lease_flags,
.mode = mode,
};
rc = server->ops->open(xid, &oparms, oplock, buf);
diff --git a/fs/smb/client/file.c b/fs/smb/client/file.c
index 9e8f404b9e56..186e061068be 100644
--- a/fs/smb/client/file.c
+++ b/fs/smb/client/file.c
@@ -52,6 +52,7 @@ static void cifs_prepare_write(struct netfs_io_subrequest *subreq)
struct netfs_io_stream *stream = &req->rreq.io_streams[subreq->stream_nr];
struct TCP_Server_Info *server;
struct cifsFileInfo *open_file = req->cfile;
+ struct cifs_sb_info *cifs_sb = CIFS_SB(wdata->rreq->inode->i_sb);
size_t wsize = req->rreq.wsize;
int rc;
@@ -63,6 +64,10 @@ static void cifs_prepare_write(struct netfs_io_subrequest *subreq)
server = cifs_pick_channel(tlink_tcon(open_file->tlink)->ses);
wdata->server = server;
+ if (cifs_sb->ctx->wsize == 0)
+ cifs_negotiate_wsize(server, cifs_sb->ctx,
+ tlink_tcon(req->cfile->tlink));
+
retry:
if (open_file->invalidHandle) {
rc = cifs_reopen_file(open_file, false);
@@ -130,7 +135,7 @@ fail:
else
trace_netfs_sreq(subreq, netfs_sreq_trace_fail);
add_credits_and_wake_if(wdata->server, &wdata->credits, 0);
- cifs_write_subrequest_terminated(wdata, rc, false);
+ cifs_write_subrequest_terminated(wdata, rc);
goto out;
}
@@ -161,9 +166,8 @@ static int cifs_prepare_read(struct netfs_io_subrequest *subreq)
rdata->server = server;
if (cifs_sb->ctx->rsize == 0)
- cifs_sb->ctx->rsize =
- server->ops->negotiate_rsize(tlink_tcon(req->cfile->tlink),
- cifs_sb->ctx);
+ cifs_negotiate_rsize(server, cifs_sb->ctx,
+ tlink_tcon(req->cfile->tlink));
rc = server->ops->wait_mtu_credits(server, cifs_sb->ctx->rsize,
&size, &rdata->credits);
@@ -219,7 +223,8 @@ static void cifs_issue_read(struct netfs_io_subrequest *subreq)
goto failed;
}
- if (subreq->rreq->origin != NETFS_DIO_READ)
+ if (subreq->rreq->origin != NETFS_UNBUFFERED_READ &&
+ subreq->rreq->origin != NETFS_DIO_READ)
__set_bit(NETFS_SREQ_CLEAR_TAIL, &subreq->flags);
trace_netfs_sreq(subreq, netfs_sreq_trace_submit);
@@ -998,15 +1003,18 @@ int cifs_open(struct inode *inode, struct file *file)
rc = cifs_get_readable_path(tcon, full_path, &cfile);
}
if (rc == 0) {
- if (file->f_flags == cfile->f_flags) {
+ unsigned int oflags = file->f_flags & ~(O_CREAT|O_EXCL|O_TRUNC);
+ unsigned int cflags = cfile->f_flags & ~(O_CREAT|O_EXCL|O_TRUNC);
+
+ if (cifs_convert_flags(oflags, 0) == cifs_convert_flags(cflags, 0) &&
+ (oflags & (O_SYNC|O_DIRECT)) == (cflags & (O_SYNC|O_DIRECT))) {
file->private_data = cfile;
spin_lock(&CIFS_I(inode)->deferred_lock);
cifs_del_deferred_close(cfile);
spin_unlock(&CIFS_I(inode)->deferred_lock);
goto use_cache;
- } else {
- _cifsFileInfo_put(cfile, true, false);
}
+ _cifsFileInfo_put(cfile, true, false);
} else {
/* hard link on the defeered close file */
rc = cifs_get_hardlink_path(tcon, inode, file);
@@ -2423,8 +2431,7 @@ int cifs_lock(struct file *file, int cmd, struct file_lock *flock)
return rc;
}
-void cifs_write_subrequest_terminated(struct cifs_io_subrequest *wdata, ssize_t result,
- bool was_async)
+void cifs_write_subrequest_terminated(struct cifs_io_subrequest *wdata, ssize_t result)
{
struct netfs_io_request *wreq = wdata->rreq;
struct netfs_inode *ictx = netfs_inode(wreq->inode);
@@ -2441,7 +2448,7 @@ void cifs_write_subrequest_terminated(struct cifs_io_subrequest *wdata, ssize_t
netfs_resize_file(ictx, wrend, true);
}
- netfs_write_subrequest_terminated(&wdata->subreq, result, was_async);
+ netfs_write_subrequest_terminated(&wdata->subreq, result);
}
struct cifsFileInfo *find_readable_file(struct cifsInodeInfo *cifs_inode,
@@ -2992,38 +2999,38 @@ static const struct vm_operations_struct cifs_file_vm_ops = {
.page_mkwrite = cifs_page_mkwrite,
};
-int cifs_file_strict_mmap(struct file *file, struct vm_area_struct *vma)
+int cifs_file_strict_mmap_prepare(struct vm_area_desc *desc)
{
int xid, rc = 0;
- struct inode *inode = file_inode(file);
+ struct inode *inode = file_inode(desc->file);
xid = get_xid();
if (!CIFS_CACHE_READ(CIFS_I(inode)))
rc = cifs_zap_mapping(inode);
if (!rc)
- rc = generic_file_mmap(file, vma);
+ rc = generic_file_mmap_prepare(desc);
if (!rc)
- vma->vm_ops = &cifs_file_vm_ops;
+ desc->vm_ops = &cifs_file_vm_ops;
free_xid(xid);
return rc;
}
-int cifs_file_mmap(struct file *file, struct vm_area_struct *vma)
+int cifs_file_mmap_prepare(struct vm_area_desc *desc)
{
int rc, xid;
xid = get_xid();
- rc = cifs_revalidate_file(file);
+ rc = cifs_revalidate_file(desc->file);
if (rc)
cifs_dbg(FYI, "Validation prior to mmap failed, error=%d\n",
rc);
if (!rc)
- rc = generic_file_mmap(file, vma);
+ rc = generic_file_mmap_prepare(desc);
if (!rc)
- vma->vm_ops = &cifs_file_vm_ops;
+ desc->vm_ops = &cifs_file_vm_ops;
free_xid(xid);
return rc;
@@ -3081,7 +3088,8 @@ void cifs_oplock_break(struct work_struct *work)
struct cifsFileInfo *cfile = container_of(work, struct cifsFileInfo,
oplock_break);
struct inode *inode = d_inode(cfile->dentry);
- struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb);
+ struct super_block *sb = inode->i_sb;
+ struct cifs_sb_info *cifs_sb = CIFS_SB(sb);
struct cifsInodeInfo *cinode = CIFS_I(inode);
struct cifs_tcon *tcon;
struct TCP_Server_Info *server;
@@ -3091,6 +3099,12 @@ void cifs_oplock_break(struct work_struct *work)
__u64 persistent_fid, volatile_fid;
__u16 net_fid;
+ /*
+ * Hold a reference to the superblock to prevent it and its inodes from
+ * being freed while we are accessing cinode. Otherwise, _cifsFileInfo_put()
+ * may release the last reference to the sb and trigger inode eviction.
+ */
+ cifs_sb_active(sb);
wait_on_bit(&cinode->flags, CIFS_INODE_PENDING_WRITERS,
TASK_UNINTERRUPTIBLE);
@@ -3163,6 +3177,7 @@ oplock_break_ack:
cifs_put_tlink(tlink);
out:
cifs_done_oplock_break(cinode);
+ cifs_sb_deactive(sb);
}
static int cifs_swap_activate(struct swap_info_struct *sis,
diff --git a/fs/smb/client/fs_context.c b/fs/smb/client/fs_context.c
index 2980941b9667..072383899e81 100644
--- a/fs/smb/client/fs_context.c
+++ b/fs/smb/client/fs_context.c
@@ -1021,6 +1021,7 @@ static int smb3_reconfigure(struct fs_context *fc)
struct dentry *root = fc->root;
struct cifs_sb_info *cifs_sb = CIFS_SB(root->d_sb);
struct cifs_ses *ses = cifs_sb_master_tcon(cifs_sb)->ses;
+ unsigned int rsize = ctx->rsize, wsize = ctx->wsize;
char *new_password = NULL, *new_password2 = NULL;
bool need_recon = false;
int rc;
@@ -1103,11 +1104,8 @@ static int smb3_reconfigure(struct fs_context *fc)
STEAL_STRING(cifs_sb, ctx, iocharset);
/* if rsize or wsize not passed in on remount, use previous values */
- if (ctx->rsize == 0)
- ctx->rsize = cifs_sb->ctx->rsize;
- if (ctx->wsize == 0)
- ctx->wsize = cifs_sb->ctx->wsize;
-
+ ctx->rsize = rsize ? CIFS_ALIGN_RSIZE(fc, rsize) : cifs_sb->ctx->rsize;
+ ctx->wsize = wsize ? CIFS_ALIGN_WSIZE(fc, wsize) : cifs_sb->ctx->wsize;
smb3_cleanup_fs_context_contents(cifs_sb->ctx);
rc = smb3_fs_context_dup(cifs_sb->ctx, ctx);
@@ -1312,7 +1310,7 @@ static int smb3_fs_context_parse_param(struct fs_context *fc,
__func__);
goto cifs_parse_mount_err;
}
- ctx->bsize = result.uint_32;
+ ctx->bsize = CIFS_ALIGN_BSIZE(fc, result.uint_32);
ctx->got_bsize = true;
break;
case Opt_rasize:
@@ -1336,24 +1334,13 @@ static int smb3_fs_context_parse_param(struct fs_context *fc,
ctx->rasize = result.uint_32;
break;
case Opt_rsize:
- ctx->rsize = result.uint_32;
+ ctx->rsize = CIFS_ALIGN_RSIZE(fc, result.uint_32);
ctx->got_rsize = true;
ctx->vol_rsize = ctx->rsize;
break;
case Opt_wsize:
- ctx->wsize = result.uint_32;
+ ctx->wsize = CIFS_ALIGN_WSIZE(fc, result.uint_32);
ctx->got_wsize = true;
- if (ctx->wsize % PAGE_SIZE != 0) {
- ctx->wsize = round_down(ctx->wsize, PAGE_SIZE);
- if (ctx->wsize == 0) {
- ctx->wsize = PAGE_SIZE;
- cifs_dbg(VFS, "wsize too small, reset to minimum %ld\n", PAGE_SIZE);
- } else {
- cifs_dbg(VFS,
- "wsize rounded down to %d to multiple of PAGE_SIZE %ld\n",
- ctx->wsize, PAGE_SIZE);
- }
- }
ctx->vol_wsize = ctx->wsize;
break;
case Opt_acregmax:
@@ -1488,35 +1475,21 @@ static int smb3_fs_context_parse_param(struct fs_context *fc,
pr_warn("username too long\n");
goto cifs_parse_mount_err;
}
- ctx->username = kstrdup(param->string, GFP_KERNEL);
- if (ctx->username == NULL) {
- cifs_errorf(fc, "OOM when copying username string\n");
- goto cifs_parse_mount_err;
- }
+ ctx->username = no_free_ptr(param->string);
break;
case Opt_pass:
kfree_sensitive(ctx->password);
ctx->password = NULL;
if (strlen(param->string) == 0)
break;
-
- ctx->password = kstrdup(param->string, GFP_KERNEL);
- if (ctx->password == NULL) {
- cifs_errorf(fc, "OOM when copying password string\n");
- goto cifs_parse_mount_err;
- }
+ ctx->password = no_free_ptr(param->string);
break;
case Opt_pass2:
kfree_sensitive(ctx->password2);
ctx->password2 = NULL;
if (strlen(param->string) == 0)
break;
-
- ctx->password2 = kstrdup(param->string, GFP_KERNEL);
- if (ctx->password2 == NULL) {
- cifs_errorf(fc, "OOM when copying password2 string\n");
- goto cifs_parse_mount_err;
- }
+ ctx->password2 = no_free_ptr(param->string);
break;
case Opt_ip:
if (strlen(param->string) == 0) {
@@ -1539,11 +1512,7 @@ static int smb3_fs_context_parse_param(struct fs_context *fc,
}
kfree(ctx->domainname);
- ctx->domainname = kstrdup(param->string, GFP_KERNEL);
- if (ctx->domainname == NULL) {
- cifs_errorf(fc, "OOM when copying domainname string\n");
- goto cifs_parse_mount_err;
- }
+ ctx->domainname = no_free_ptr(param->string);
cifs_dbg(FYI, "Domain name set\n");
break;
case Opt_srcaddr:
@@ -1563,11 +1532,7 @@ static int smb3_fs_context_parse_param(struct fs_context *fc,
if (strncasecmp(param->string, "default", 7) != 0) {
kfree(ctx->iocharset);
- ctx->iocharset = kstrdup(param->string, GFP_KERNEL);
- if (ctx->iocharset == NULL) {
- cifs_errorf(fc, "OOM when copying iocharset string\n");
- goto cifs_parse_mount_err;
- }
+ ctx->iocharset = no_free_ptr(param->string);
}
/* if iocharset not set then load_nls_default
* is used by caller
@@ -1687,6 +1652,7 @@ static int smb3_fs_context_parse_param(struct fs_context *fc,
pr_warn_once("conflicting posix mount options specified\n");
ctx->linux_ext = 1;
ctx->no_linux_ext = 0;
+ ctx->nonativesocket = 1; /* POSIX mounts use NFS style reparse points */
}
break;
case Opt_nocase:
@@ -1837,10 +1803,14 @@ static int smb3_fs_context_parse_param(struct fs_context *fc,
cifs_errorf(fc, "symlinkroot mount options must be absolute path\n");
goto cifs_parse_mount_err;
}
- kfree(ctx->symlinkroot);
- ctx->symlinkroot = kstrdup(param->string, GFP_KERNEL);
- if (!ctx->symlinkroot)
+ if (strnlen(param->string, PATH_MAX) == PATH_MAX) {
+ cifs_errorf(fc, "symlinkroot path too long (max path length: %u)\n",
+ PATH_MAX - 1);
goto cifs_parse_mount_err;
+ }
+ kfree(ctx->symlinkroot);
+ ctx->symlinkroot = param->string;
+ param->string = NULL;
break;
}
/* case Opt_ignore: - is ignored as expected ... */
@@ -1850,13 +1820,6 @@ static int smb3_fs_context_parse_param(struct fs_context *fc,
goto cifs_parse_mount_err;
}
- /*
- * By default resolve all native absolute symlinks relative to "/mnt/".
- * Same default has drvfs driver running in WSL for resolving SMB shares.
- */
- if (!ctx->symlinkroot)
- ctx->symlinkroot = kstrdup("/mnt/", GFP_KERNEL);
-
return 0;
cifs_parse_mount_err:
@@ -1867,24 +1830,6 @@ static int smb3_fs_context_parse_param(struct fs_context *fc,
return -EINVAL;
}
-enum cifs_symlink_type get_cifs_symlink_type(struct cifs_sb_info *cifs_sb)
-{
- if (cifs_sb->ctx->symlink_type == CIFS_SYMLINK_TYPE_DEFAULT) {
- if (cifs_sb->ctx->mfsymlinks)
- return CIFS_SYMLINK_TYPE_MFSYMLINKS;
- else if (cifs_sb->ctx->sfu_emul)
- return CIFS_SYMLINK_TYPE_SFU;
- else if (cifs_sb->ctx->linux_ext && !cifs_sb->ctx->no_linux_ext)
- return CIFS_SYMLINK_TYPE_UNIX;
- else if (cifs_sb->ctx->reparse_type != CIFS_REPARSE_TYPE_NONE)
- return CIFS_SYMLINK_TYPE_NATIVE;
- else
- return CIFS_SYMLINK_TYPE_NONE;
- } else {
- return cifs_sb->ctx->symlink_type;
- }
-}
-
int smb3_init_fs_context(struct fs_context *fc)
{
struct smb3_fs_context *ctx;
diff --git a/fs/smb/client/fs_context.h b/fs/smb/client/fs_context.h
index d1d29249bcdb..b0fec6b9a23b 100644
--- a/fs/smb/client/fs_context.h
+++ b/fs/smb/client/fs_context.h
@@ -20,6 +20,21 @@
cifs_dbg(VFS, fmt, ## __VA_ARGS__); \
} while (0)
+static inline size_t cifs_io_align(struct fs_context *fc,
+ const char *name, size_t size)
+{
+ if (!size || !IS_ALIGNED(size, PAGE_SIZE)) {
+ cifs_errorf(fc, "unaligned %s, making it a multiple of %lu bytes\n",
+ name, PAGE_SIZE);
+ size = umax(round_down(size, PAGE_SIZE), PAGE_SIZE);
+ }
+ return size;
+}
+
+#define CIFS_ALIGN_WSIZE(_fc, _size) cifs_io_align(_fc, "wsize", _size)
+#define CIFS_ALIGN_RSIZE(_fc, _size) cifs_io_align(_fc, "rsize", _size)
+#define CIFS_ALIGN_BSIZE(_fc, _size) cifs_io_align(_fc, "bsize", _size)
+
enum smb_version {
Smb_1 = 1,
Smb_20,
@@ -326,7 +341,23 @@ struct smb3_fs_context {
extern const struct fs_parameter_spec smb3_fs_parameters[];
-extern enum cifs_symlink_type get_cifs_symlink_type(struct cifs_sb_info *cifs_sb);
+static inline enum cifs_symlink_type cifs_symlink_type(struct cifs_sb_info *cifs_sb)
+{
+ bool posix = cifs_sb_master_tcon(cifs_sb)->posix_extensions;
+
+ if (cifs_sb->ctx->symlink_type != CIFS_SYMLINK_TYPE_DEFAULT)
+ return cifs_sb->ctx->symlink_type;
+
+ if (cifs_sb->ctx->mfsymlinks)
+ return CIFS_SYMLINK_TYPE_MFSYMLINKS;
+ else if (cifs_sb->ctx->sfu_emul)
+ return CIFS_SYMLINK_TYPE_SFU;
+ else if (cifs_sb->ctx->linux_ext && !cifs_sb->ctx->no_linux_ext)
+ return posix ? CIFS_SYMLINK_TYPE_NATIVE : CIFS_SYMLINK_TYPE_UNIX;
+ else if (cifs_sb->ctx->reparse_type != CIFS_REPARSE_TYPE_NONE)
+ return CIFS_SYMLINK_TYPE_NATIVE;
+ return CIFS_SYMLINK_TYPE_NONE;
+}
extern int smb3_init_fs_context(struct fs_context *fc);
extern void smb3_cleanup_fs_context_contents(struct smb3_fs_context *ctx);
@@ -361,4 +392,36 @@ static inline void cifs_mount_unlock(void)
mutex_unlock(&cifs_mount_mutex);
}
+static inline void cifs_negotiate_rsize(struct TCP_Server_Info *server,
+ struct smb3_fs_context *ctx,
+ struct cifs_tcon *tcon)
+{
+ unsigned int size;
+
+ size = umax(server->ops->negotiate_rsize(tcon, ctx), PAGE_SIZE);
+ if (ctx->rsize)
+ size = umax(umin(ctx->rsize, size), PAGE_SIZE);
+ ctx->rsize = round_down(size, PAGE_SIZE);
+}
+
+static inline void cifs_negotiate_wsize(struct TCP_Server_Info *server,
+ struct smb3_fs_context *ctx,
+ struct cifs_tcon *tcon)
+{
+ unsigned int size;
+
+ size = umax(server->ops->negotiate_wsize(tcon, ctx), PAGE_SIZE);
+ if (ctx->wsize)
+ size = umax(umin(ctx->wsize, size), PAGE_SIZE);
+ ctx->wsize = round_down(size, PAGE_SIZE);
+}
+
+static inline void cifs_negotiate_iosize(struct TCP_Server_Info *server,
+ struct smb3_fs_context *ctx,
+ struct cifs_tcon *tcon)
+{
+ cifs_negotiate_rsize(server, ctx, tcon);
+ cifs_negotiate_wsize(server, ctx, tcon);
+}
+
#endif
diff --git a/fs/smb/client/ioctl.c b/fs/smb/client/ioctl.c
index 56439da4f119..0a9935ce05a5 100644
--- a/fs/smb/client/ioctl.c
+++ b/fs/smb/client/ioctl.c
@@ -506,7 +506,7 @@ long cifs_ioctl(struct file *filep, unsigned int command, unsigned long arg)
le16_to_cpu(tcon->ses->server->cipher_type);
pkey_inf.Suid = tcon->ses->Suid;
memcpy(pkey_inf.auth_key, tcon->ses->auth_key.response,
- 16 /* SMB2_NTLMV2_SESSKEY_SIZE */);
+ SMB2_NTLMV2_SESSKEY_SIZE);
memcpy(pkey_inf.smb3decryptionkey,
tcon->ses->smb3decryptionkey, SMB3_SIGN_KEY_SIZE);
memcpy(pkey_inf.smb3encryptionkey,
diff --git a/fs/smb/client/link.c b/fs/smb/client/link.c
index 769752ad2c5c..fe80e711cd75 100644
--- a/fs/smb/client/link.c
+++ b/fs/smb/client/link.c
@@ -19,6 +19,7 @@
#include "smb2proto.h"
#include "cifs_ioctl.h"
#include "fs_context.h"
+#include "reparse.h"
/*
* M-F Symlink Functions - Begin
@@ -570,7 +571,6 @@ cifs_symlink(struct mnt_idmap *idmap, struct inode *inode,
int rc = -EOPNOTSUPP;
unsigned int xid;
struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb);
- struct TCP_Server_Info *server;
struct tcon_link *tlink;
struct cifs_tcon *pTcon;
const char *full_path;
@@ -593,7 +593,6 @@ cifs_symlink(struct mnt_idmap *idmap, struct inode *inode,
goto symlink_exit;
}
pTcon = tlink_tcon(tlink);
- server = cifs_pick_channel(pTcon->ses);
full_path = build_path_from_dentry(direntry, page);
if (IS_ERR(full_path)) {
@@ -606,14 +605,7 @@ cifs_symlink(struct mnt_idmap *idmap, struct inode *inode,
/* BB what if DFS and this volume is on different share? BB */
rc = -EOPNOTSUPP;
- switch (get_cifs_symlink_type(cifs_sb)) {
- case CIFS_SYMLINK_TYPE_DEFAULT:
- /* should not happen, get_cifs_symlink_type() resolves the default */
- break;
-
- case CIFS_SYMLINK_TYPE_NONE:
- break;
-
+ switch (cifs_symlink_type(cifs_sb)) {
case CIFS_SYMLINK_TYPE_UNIX:
#ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY
if (pTcon->unix_ext) {
@@ -643,16 +635,14 @@ cifs_symlink(struct mnt_idmap *idmap, struct inode *inode,
case CIFS_SYMLINK_TYPE_NATIVE:
case CIFS_SYMLINK_TYPE_NFS:
case CIFS_SYMLINK_TYPE_WSL:
- if (server->ops->create_reparse_symlink &&
- (le32_to_cpu(pTcon->fsAttrInfo.Attributes) & FILE_SUPPORTS_REPARSE_POINTS)) {
- rc = server->ops->create_reparse_symlink(xid, inode,
- direntry,
- pTcon,
- full_path,
- symname);
+ if (CIFS_REPARSE_SUPPORT(pTcon)) {
+ rc = create_reparse_symlink(xid, inode, direntry, pTcon,
+ full_path, symname);
goto symlink_exit;
}
break;
+ default:
+ break;
}
if (rc == 0) {
diff --git a/fs/smb/client/misc.c b/fs/smb/client/misc.c
index 7b6ed9b23e71..da23cc12a52c 100644
--- a/fs/smb/client/misc.c
+++ b/fs/smb/client/misc.c
@@ -151,6 +151,12 @@ tcon_info_alloc(bool dir_leases_enabled, enum smb3_tcon_ref_trace trace)
#ifdef CONFIG_CIFS_DFS_UPCALL
INIT_LIST_HEAD(&ret_buf->dfs_ses_list);
#endif
+ INIT_LIST_HEAD(&ret_buf->pending_opens);
+ INIT_DELAYED_WORK(&ret_buf->query_interfaces,
+ smb2_query_server_interfaces);
+#ifdef CONFIG_CIFS_DFS_UPCALL
+ INIT_DELAYED_WORK(&ret_buf->dfs_cache_work, dfs_cache_refresh);
+#endif
return ret_buf;
}
@@ -326,6 +332,14 @@ check_smb_hdr(struct smb_hdr *smb)
if (smb->Command == SMB_COM_LOCKING_ANDX)
return 0;
+ /*
+ * Windows NT server returns error resposne (e.g. STATUS_DELETE_PENDING
+ * or STATUS_OBJECT_NAME_NOT_FOUND or ERRDOS/ERRbadfile or any other)
+ * for some TRANS2 requests without the RESPONSE flag set in header.
+ */
+ if (smb->Command == SMB_COM_TRANSACTION2 && smb->Status.CifsError != 0)
+ return 0;
+
cifs_dbg(VFS, "Server sent request, not response. mid=%u\n",
get_mid(smb));
return 1;
diff --git a/fs/smb/client/namespace.c b/fs/smb/client/namespace.c
index e3f9213131c4..52a520349cb7 100644
--- a/fs/smb/client/namespace.c
+++ b/fs/smb/client/namespace.c
@@ -146,6 +146,9 @@ static char *automount_fullpath(struct dentry *dentry, void *page)
}
spin_unlock(&tcon->tc_lock);
+ if (unlikely(!page))
+ return ERR_PTR(-ENOMEM);
+
s = dentry_path_raw(dentry, page, PATH_MAX);
if (IS_ERR(s))
return s;
@@ -283,7 +286,6 @@ struct vfsmount *cifs_d_automount(struct path *path)
return newmnt;
}
- mntget(newmnt); /* prevent immediate expiration */
mnt_set_expiry(newmnt, &cifs_automount_list);
schedule_delayed_work(&cifs_automount_task,
cifs_mountpoint_expiry_timeout);
diff --git a/fs/smb/client/readdir.c b/fs/smb/client/readdir.c
index 50f96259d9ad..4e5460206397 100644
--- a/fs/smb/client/readdir.c
+++ b/fs/smb/client/readdir.c
@@ -9,6 +9,7 @@
*
*/
#include <linux/fs.h>
+#include <linux/namei.h>
#include <linux/pagemap.h>
#include <linux/slab.h>
#include <linux/stat.h>
@@ -78,7 +79,7 @@ cifs_prime_dcache(struct dentry *parent, struct qstr *name,
cifs_dbg(FYI, "%s: for %s\n", __func__, name->name);
- dentry = d_hash_and_lookup(parent, name);
+ dentry = try_lookup_noperm(name, parent);
if (!dentry) {
/*
* If we know that the inode will need to be revalidated
@@ -263,7 +264,7 @@ cifs_posix_to_fattr(struct cifs_fattr *fattr, struct smb2_posix_info *info,
/* The Mode field in the response can now include the file type as well */
fattr->cf_mode = wire_mode_to_posix(le32_to_cpu(info->Mode),
fattr->cf_cifsattrs & ATTR_DIRECTORY);
- fattr->cf_dtype = S_DT(le32_to_cpu(info->Mode));
+ fattr->cf_dtype = S_DT(fattr->cf_mode);
switch (fattr->cf_mode & S_IFMT) {
case S_IFLNK:
@@ -733,7 +734,10 @@ find_cifs_entry(const unsigned int xid, struct cifs_tcon *tcon, loff_t pos,
else
cifs_buf_release(cfile->srch_inf.
ntwrk_buf_start);
+ /* Reset all pointers to the network buffer to prevent stale references */
cfile->srch_inf.ntwrk_buf_start = NULL;
+ cfile->srch_inf.srch_entries_start = NULL;
+ cfile->srch_inf.last_entry = NULL;
}
rc = initiate_cifs_search(xid, file, full_path);
if (rc) {
@@ -756,11 +760,11 @@ find_cifs_entry(const unsigned int xid, struct cifs_tcon *tcon, loff_t pos,
rc = server->ops->query_dir_next(xid, tcon, &cfile->fid,
search_flags,
&cfile->srch_inf);
+ if (rc)
+ return -ENOENT;
/* FindFirst/Next set last_entry to NULL on malformed reply */
if (cfile->srch_inf.last_entry)
cifs_save_resume_key(cfile->srch_inf.last_entry, cfile);
- if (rc)
- return -ENOENT;
}
if (index_to_find < cfile->srch_inf.index_of_last_entry) {
/* we found the buffer that contains the entry */
@@ -847,9 +851,9 @@ static bool emit_cached_dirents(struct cached_dirents *cde,
}
static void update_cached_dirents_count(struct cached_dirents *cde,
- struct dir_context *ctx)
+ struct file *file)
{
- if (cde->ctx != ctx)
+ if (cde->file != file)
return;
if (cde->is_valid || cde->is_failed)
return;
@@ -858,9 +862,9 @@ static void update_cached_dirents_count(struct cached_dirents *cde,
}
static void finished_cached_dirents_count(struct cached_dirents *cde,
- struct dir_context *ctx)
+ struct dir_context *ctx, struct file *file)
{
- if (cde->ctx != ctx)
+ if (cde->file != file)
return;
if (cde->is_valid || cde->is_failed)
return;
@@ -873,11 +877,12 @@ static void finished_cached_dirents_count(struct cached_dirents *cde,
static void add_cached_dirent(struct cached_dirents *cde,
struct dir_context *ctx,
const char *name, int namelen,
- struct cifs_fattr *fattr)
+ struct cifs_fattr *fattr,
+ struct file *file)
{
struct cached_dirent *de;
- if (cde->ctx != ctx)
+ if (cde->file != file)
return;
if (cde->is_valid || cde->is_failed)
return;
@@ -907,7 +912,8 @@ static void add_cached_dirent(struct cached_dirents *cde,
static bool cifs_dir_emit(struct dir_context *ctx,
const char *name, int namelen,
struct cifs_fattr *fattr,
- struct cached_fid *cfid)
+ struct cached_fid *cfid,
+ struct file *file)
{
bool rc;
ino_t ino = cifs_uniqueid_to_ino_t(fattr->cf_uniqueid);
@@ -919,7 +925,7 @@ static bool cifs_dir_emit(struct dir_context *ctx,
if (cfid) {
mutex_lock(&cfid->dirents.de_mutex);
add_cached_dirent(&cfid->dirents, ctx, name, namelen,
- fattr);
+ fattr, file);
mutex_unlock(&cfid->dirents.de_mutex);
}
@@ -1019,7 +1025,7 @@ static int cifs_filldir(char *find_entry, struct file *file,
cifs_prime_dcache(file_dentry(file), &name, &fattr);
return !cifs_dir_emit(ctx, name.name, name.len,
- &fattr, cfid);
+ &fattr, cfid, file);
}
@@ -1070,8 +1076,8 @@ int cifs_readdir(struct file *file, struct dir_context *ctx)
* we need to initialize scanning and storing the
* directory content.
*/
- if (ctx->pos == 0 && cfid->dirents.ctx == NULL) {
- cfid->dirents.ctx = ctx;
+ if (ctx->pos == 0 && cfid->dirents.file == NULL) {
+ cfid->dirents.file = file;
cfid->dirents.pos = 2;
}
/*
@@ -1139,7 +1145,7 @@ int cifs_readdir(struct file *file, struct dir_context *ctx)
} else {
if (cfid) {
mutex_lock(&cfid->dirents.de_mutex);
- finished_cached_dirents_count(&cfid->dirents, ctx);
+ finished_cached_dirents_count(&cfid->dirents, ctx, file);
mutex_unlock(&cfid->dirents.de_mutex);
}
cifs_dbg(FYI, "Could not find entry\n");
@@ -1180,7 +1186,7 @@ int cifs_readdir(struct file *file, struct dir_context *ctx)
ctx->pos++;
if (cfid) {
mutex_lock(&cfid->dirents.de_mutex);
- update_cached_dirents_count(&cfid->dirents, ctx);
+ update_cached_dirents_count(&cfid->dirents, file);
mutex_unlock(&cfid->dirents.de_mutex);
}
diff --git a/fs/smb/client/reparse.c b/fs/smb/client/reparse.c
index bb25e77c5540..7869cec58f52 100644
--- a/fs/smb/client/reparse.c
+++ b/fs/smb/client/reparse.c
@@ -34,11 +34,11 @@ static int detect_directory_symlink_target(struct cifs_sb_info *cifs_sb,
const char *symname,
bool *directory);
-int smb2_create_reparse_symlink(const unsigned int xid, struct inode *inode,
+int create_reparse_symlink(const unsigned int xid, struct inode *inode,
struct dentry *dentry, struct cifs_tcon *tcon,
const char *full_path, const char *symname)
{
- switch (get_cifs_symlink_type(CIFS_SB(inode->i_sb))) {
+ switch (cifs_symlink_type(CIFS_SB(inode->i_sb))) {
case CIFS_SYMLINK_TYPE_NATIVE:
return create_native_symlink(xid, inode, dentry, tcon, full_path, symname);
case CIFS_SYMLINK_TYPE_NFS:
@@ -57,6 +57,7 @@ static int create_native_symlink(const unsigned int xid, struct inode *inode,
struct reparse_symlink_data_buffer *buf = NULL;
struct cifs_open_info_data data = {};
struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb);
+ const char *symroot = cifs_sb->ctx->symlinkroot;
struct inode *new;
struct kvec iov;
__le16 *path = NULL;
@@ -82,7 +83,8 @@ static int create_native_symlink(const unsigned int xid, struct inode *inode,
.symlink_target = symlink_target,
};
- if (!(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_POSIX_PATHS) && symname[0] == '/') {
+ if (!(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_POSIX_PATHS) &&
+ symroot && symname[0] == '/') {
/*
* This is a request to create an absolute symlink on the server
* which does not support POSIX paths, and expects symlink in
@@ -92,7 +94,7 @@ static int create_native_symlink(const unsigned int xid, struct inode *inode,
* ensure compatibility of this symlink stored in absolute form
* on the SMB server.
*/
- if (!strstarts(symname, cifs_sb->ctx->symlinkroot)) {
+ if (!strstarts(symname, symroot)) {
/*
* If the absolute Linux symlink target path is not
* inside "symlinkroot" location then there is no way
@@ -101,12 +103,12 @@ static int create_native_symlink(const unsigned int xid, struct inode *inode,
cifs_dbg(VFS,
"absolute symlink '%s' cannot be converted to NT format "
"because it is outside of symlinkroot='%s'\n",
- symname, cifs_sb->ctx->symlinkroot);
+ symname, symroot);
rc = -EINVAL;
goto out;
}
- len = strlen(cifs_sb->ctx->symlinkroot);
- if (cifs_sb->ctx->symlinkroot[len-1] != '/')
+ len = strlen(symroot);
+ if (symroot[len - 1] != '/')
len++;
if (symname[len] >= 'a' && symname[len] <= 'z' &&
(symname[len+1] == '/' || symname[len+1] == '\0')) {
@@ -225,7 +227,8 @@ static int create_native_symlink(const unsigned int xid, struct inode *inode,
iov.iov_base = buf;
iov.iov_len = len;
- new = smb2_get_reparse_inode(&data, inode->i_sb, xid,
+ new = tcon->ses->server->ops->create_reparse_inode(
+ &data, inode->i_sb, xid,
tcon, full_path, directory,
&iov, NULL);
if (!IS_ERR(new))
@@ -397,7 +400,8 @@ static int create_native_socket(const unsigned int xid, struct inode *inode,
struct inode *new;
int rc = 0;
- new = smb2_get_reparse_inode(&data, inode->i_sb, xid,
+ new = tcon->ses->server->ops->create_reparse_inode(
+ &data, inode->i_sb, xid,
tcon, full_path, false, &iov, NULL);
if (!IS_ERR(new))
d_instantiate(dentry, new);
@@ -490,7 +494,8 @@ static int mknod_nfs(unsigned int xid, struct inode *inode,
.symlink_target = kstrdup(symname, GFP_KERNEL),
};
- new = smb2_get_reparse_inode(&data, inode->i_sb, xid,
+ new = tcon->ses->server->ops->create_reparse_inode(
+ &data, inode->i_sb, xid,
tcon, full_path, false, &iov, NULL);
if (!IS_ERR(new))
d_instantiate(dentry, new);
@@ -683,7 +688,8 @@ static int mknod_wsl(unsigned int xid, struct inode *inode,
memcpy(data.wsl.eas, &cc->ea, len);
data.wsl.eas_len = len;
- new = smb2_get_reparse_inode(&data, inode->i_sb,
+ new = tcon->ses->server->ops->create_reparse_inode(
+ &data, inode->i_sb,
xid, tcon, full_path, false,
&reparse_iov, &xattr_iov);
if (!IS_ERR(new))
@@ -696,7 +702,7 @@ static int mknod_wsl(unsigned int xid, struct inode *inode,
return rc;
}
-int smb2_mknod_reparse(unsigned int xid, struct inode *inode,
+int mknod_reparse(unsigned int xid, struct inode *inode,
struct dentry *dentry, struct cifs_tcon *tcon,
const char *full_path, umode_t mode, dev_t dev)
{
@@ -782,6 +788,7 @@ int smb2_parse_native_symlink(char **target, const char *buf, unsigned int len,
const char *full_path,
struct cifs_sb_info *cifs_sb)
{
+ const char *symroot = cifs_sb->ctx->symlinkroot;
char sep = CIFS_DIR_SEP(cifs_sb);
char *linux_target = NULL;
char *smb_target = NULL;
@@ -815,7 +822,8 @@ int smb2_parse_native_symlink(char **target, const char *buf, unsigned int len,
goto out;
}
- if (!(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_POSIX_PATHS) && !relative) {
+ if (!(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_POSIX_PATHS) &&
+ symroot && !relative) {
/*
* This is an absolute symlink from the server which does not
* support POSIX paths, so the symlink is in NT-style path.
@@ -875,15 +883,8 @@ globalroot:
abs_path += sizeof("\\DosDevices\\")-1;
else if (strstarts(abs_path, "\\GLOBAL??\\"))
abs_path += sizeof("\\GLOBAL??\\")-1;
- else {
- /* Unhandled absolute symlink, points outside of DOS/Win32 */
- cifs_dbg(VFS,
- "absolute symlink '%s' cannot be converted from NT format "
- "because points to unknown target\n",
- smb_target);
- rc = -EIO;
- goto out;
- }
+ else
+ goto out_unhandled_target;
/* Sometimes path separator after \?? is double backslash */
if (abs_path[0] == '\\')
@@ -910,25 +911,19 @@ globalroot:
abs_path++;
abs_path[0] = drive_letter;
} else {
- /* Unhandled absolute symlink. Report an error. */
- cifs_dbg(VFS,
- "absolute symlink '%s' cannot be converted from NT format "
- "because points to unknown target\n",
- smb_target);
- rc = -EIO;
- goto out;
+ goto out_unhandled_target;
}
abs_path_len = strlen(abs_path)+1;
- symlinkroot_len = strlen(cifs_sb->ctx->symlinkroot);
- if (cifs_sb->ctx->symlinkroot[symlinkroot_len-1] == '/')
+ symlinkroot_len = strlen(symroot);
+ if (symroot[symlinkroot_len - 1] == '/')
symlinkroot_len--;
linux_target = kmalloc(symlinkroot_len + 1 + abs_path_len, GFP_KERNEL);
if (!linux_target) {
rc = -ENOMEM;
goto out;
}
- memcpy(linux_target, cifs_sb->ctx->symlinkroot, symlinkroot_len);
+ memcpy(linux_target, symroot, symlinkroot_len);
linux_target[symlinkroot_len] = '/';
memcpy(linux_target + symlinkroot_len + 1, abs_path, abs_path_len);
} else if (smb_target[0] == sep && relative) {
@@ -966,6 +961,7 @@ globalroot:
* These paths have same format as Linux symlinks, so no
* conversion is needed.
*/
+out_unhandled_target:
linux_target = smb_target;
smb_target = NULL;
}
@@ -1172,7 +1168,6 @@ out:
if (!have_xattr_dev && (tag == IO_REPARSE_TAG_LX_CHR || tag == IO_REPARSE_TAG_LX_BLK))
return false;
- fattr->cf_dtype = S_DT(fattr->cf_mode);
return true;
}
diff --git a/fs/smb/client/reparse.h b/fs/smb/client/reparse.h
index 08de853b36a8..66269c10beba 100644
--- a/fs/smb/client/reparse.h
+++ b/fs/smb/client/reparse.h
@@ -129,10 +129,10 @@ static inline bool cifs_open_data_reparse(struct cifs_open_info_data *data)
bool cifs_reparse_point_to_fattr(struct cifs_sb_info *cifs_sb,
struct cifs_fattr *fattr,
struct cifs_open_info_data *data);
-int smb2_create_reparse_symlink(const unsigned int xid, struct inode *inode,
+int create_reparse_symlink(const unsigned int xid, struct inode *inode,
struct dentry *dentry, struct cifs_tcon *tcon,
const char *full_path, const char *symname);
-int smb2_mknod_reparse(unsigned int xid, struct inode *inode,
+int mknod_reparse(unsigned int xid, struct inode *inode,
struct dentry *dentry, struct cifs_tcon *tcon,
const char *full_path, umode_t mode, dev_t dev);
struct reparse_data_buffer *smb2_get_reparse_point_buffer(const struct kvec *rsp_iov, u32 *len);
diff --git a/fs/smb/client/sess.c b/fs/smb/client/sess.c
index b3fa9ee26912..0a8c2fcc9ded 100644
--- a/fs/smb/client/sess.c
+++ b/fs/smb/client/sess.c
@@ -332,6 +332,7 @@ cifs_chan_update_iface(struct cifs_ses *ses, struct TCP_Server_Info *server)
struct cifs_server_iface *old_iface = NULL;
struct cifs_server_iface *last_iface = NULL;
struct sockaddr_storage ss;
+ int retry = 0;
spin_lock(&ses->chan_lock);
chan_index = cifs_ses_get_chan_index(ses, server);
@@ -360,6 +361,7 @@ cifs_chan_update_iface(struct cifs_ses *ses, struct TCP_Server_Info *server)
return;
}
+try_again:
last_iface = list_last_entry(&ses->iface_list, struct cifs_server_iface,
iface_head);
iface_min_speed = last_iface->speed;
@@ -397,6 +399,13 @@ cifs_chan_update_iface(struct cifs_ses *ses, struct TCP_Server_Info *server)
}
if (list_entry_is_head(iface, &ses->iface_list, iface_head)) {
+ list_for_each_entry(iface, &ses->iface_list, iface_head)
+ iface->weight_fulfilled = 0;
+
+ /* see if it can be satisfied in second attempt */
+ if (!retry++)
+ goto try_again;
+
iface = NULL;
cifs_dbg(FYI, "unable to find a suitable iface\n");
}
@@ -445,6 +454,10 @@ cifs_chan_update_iface(struct cifs_ses *ses, struct TCP_Server_Info *server)
ses->chans[chan_index].iface = iface;
spin_unlock(&ses->chan_lock);
+
+ spin_lock(&server->srv_lock);
+ memcpy(&server->dstaddr, &iface->sockaddr, sizeof(server->dstaddr));
+ spin_unlock(&server->srv_lock);
}
static int
@@ -494,8 +507,7 @@ cifs_ses_add_channel(struct cifs_ses *ses,
ctx->domainauto = ses->domainAuto;
ctx->domainname = ses->domainName;
- /* no hostname for extra channels */
- ctx->server_hostname = "";
+ ctx->server_hostname = ses->server->hostname;
ctx->username = ses->user_name;
ctx->password = ses->password;
@@ -628,6 +640,7 @@ static __u32 cifs_ssetup_hdr(struct cifs_ses *ses,
USHRT_MAX));
pSMB->req.MaxMpxCount = cpu_to_le16(server->maxReq);
pSMB->req.VcNumber = cpu_to_le16(1);
+ pSMB->req.SessionKey = server->session_key_id;
/* Now no need to set SMBFLG_CASELESS or obsolete CANONICAL PATH */
@@ -1684,22 +1697,22 @@ _sess_auth_rawntlmssp_assemble_req(struct sess_data *sess_data)
pSMB = (SESSION_SETUP_ANDX *)sess_data->iov[0].iov_base;
capabilities = cifs_ssetup_hdr(ses, server, pSMB);
- if ((pSMB->req.hdr.Flags2 & SMBFLG2_UNICODE) == 0) {
- cifs_dbg(VFS, "NTLMSSP requires Unicode support\n");
- return -ENOSYS;
- }
-
pSMB->req.hdr.Flags2 |= SMBFLG2_EXT_SEC;
capabilities |= CAP_EXTENDED_SECURITY;
pSMB->req.Capabilities |= cpu_to_le32(capabilities);
bcc_ptr = sess_data->iov[2].iov_base;
- /* unicode strings must be word aligned */
- if (!IS_ALIGNED(sess_data->iov[0].iov_len + sess_data->iov[1].iov_len, 2)) {
- *bcc_ptr = 0;
- bcc_ptr++;
+
+ if (pSMB->req.hdr.Flags2 & SMBFLG2_UNICODE) {
+ /* unicode strings must be word aligned */
+ if (!IS_ALIGNED(sess_data->iov[0].iov_len + sess_data->iov[1].iov_len, 2)) {
+ *bcc_ptr = 0;
+ bcc_ptr++;
+ }
+ unicode_oslm_strings(&bcc_ptr, sess_data->nls_cp);
+ } else {
+ ascii_oslm_strings(&bcc_ptr, sess_data->nls_cp);
}
- unicode_oslm_strings(&bcc_ptr, sess_data->nls_cp);
sess_data->iov[2].iov_len = (long) bcc_ptr -
(long) sess_data->iov[2].iov_base;
diff --git a/fs/smb/client/smb1ops.c b/fs/smb/client/smb1ops.c
index 0adeec652dc1..893a1ea8c000 100644
--- a/fs/smb/client/smb1ops.c
+++ b/fs/smb/client/smb1ops.c
@@ -16,6 +16,7 @@
#include "fs_context.h"
#include "nterr.h"
#include "smberr.h"
+#include "reparse.h"
/*
* An NT cancel request header looks just like the original request except:
@@ -94,17 +95,17 @@ cifs_find_mid(struct TCP_Server_Info *server, char *buffer)
struct smb_hdr *buf = (struct smb_hdr *)buffer;
struct mid_q_entry *mid;
- spin_lock(&server->mid_lock);
+ spin_lock(&server->mid_queue_lock);
list_for_each_entry(mid, &server->pending_mid_q, qhead) {
if (compare_mid(mid->mid, buf) &&
mid->mid_state == MID_REQUEST_SUBMITTED &&
le16_to_cpu(mid->command) == buf->Command) {
kref_get(&mid->refcount);
- spin_unlock(&server->mid_lock);
+ spin_unlock(&server->mid_queue_lock);
return mid;
}
}
- spin_unlock(&server->mid_lock);
+ spin_unlock(&server->mid_queue_lock);
return NULL;
}
@@ -168,10 +169,9 @@ cifs_get_next_mid(struct TCP_Server_Info *server)
__u16 last_mid, cur_mid;
bool collision, reconnect = false;
- spin_lock(&server->mid_lock);
-
+ spin_lock(&server->mid_counter_lock);
/* mid is 16 bit only for CIFS/SMB */
- cur_mid = (__u16)((server->CurrentMid) & 0xffff);
+ cur_mid = (__u16)((server->current_mid) & 0xffff);
/* we do not want to loop forever */
last_mid = cur_mid;
cur_mid++;
@@ -197,6 +197,7 @@ cifs_get_next_mid(struct TCP_Server_Info *server)
cur_mid++;
num_mids = 0;
+ spin_lock(&server->mid_queue_lock);
list_for_each_entry(mid_entry, &server->pending_mid_q, qhead) {
++num_mids;
if (mid_entry->mid == cur_mid &&
@@ -206,6 +207,7 @@ cifs_get_next_mid(struct TCP_Server_Info *server)
break;
}
}
+ spin_unlock(&server->mid_queue_lock);
/*
* if we have more than 32k mids in the list, then something
@@ -222,12 +224,12 @@ cifs_get_next_mid(struct TCP_Server_Info *server)
if (!collision) {
mid = (__u64)cur_mid;
- server->CurrentMid = mid;
+ server->current_mid = mid;
break;
}
cur_mid++;
}
- spin_unlock(&server->mid_lock);
+ spin_unlock(&server->mid_counter_lock);
if (reconnect) {
cifs_signal_cifsd_for_reconnect(server, false);
@@ -432,7 +434,7 @@ cifs_negotiate(const unsigned int xid,
}
static unsigned int
-cifs_negotiate_wsize(struct cifs_tcon *tcon, struct smb3_fs_context *ctx)
+smb1_negotiate_wsize(struct cifs_tcon *tcon, struct smb3_fs_context *ctx)
{
__u64 unix_cap = le64_to_cpu(tcon->fsUnixInfo.Capability);
struct TCP_Server_Info *server = tcon->ses->server;
@@ -467,7 +469,7 @@ cifs_negotiate_wsize(struct cifs_tcon *tcon, struct smb3_fs_context *ctx)
}
static unsigned int
-cifs_negotiate_rsize(struct cifs_tcon *tcon, struct smb3_fs_context *ctx)
+smb1_negotiate_rsize(struct cifs_tcon *tcon, struct smb3_fs_context *ctx)
{
__u64 unix_cap = le64_to_cpu(tcon->fsUnixInfo.Capability);
struct TCP_Server_Info *server = tcon->ses->server;
@@ -543,24 +545,104 @@ static int cifs_query_path_info(const unsigned int xid,
const char *full_path,
struct cifs_open_info_data *data)
{
- int rc;
+ int rc = -EOPNOTSUPP;
FILE_ALL_INFO fi = {};
+ struct cifs_search_info search_info = {};
+ bool non_unicode_wildcard = false;
data->reparse_point = false;
data->adjust_tz = false;
- /* could do find first instead but this returns more info */
- rc = CIFSSMBQPathInfo(xid, tcon, full_path, &fi, 0 /* not legacy */, cifs_sb->local_nls,
- cifs_remap(cifs_sb));
/*
- * BB optimize code so we do not make the above call when server claims
- * no NT SMB support and the above call failed at least once - set flag
- * in tcon or mount.
+ * First try CIFSSMBQPathInfo() function which returns more info
+ * (NumberOfLinks) than CIFSFindFirst() fallback function.
+ * Some servers like Win9x do not support SMB_QUERY_FILE_ALL_INFO over
+ * TRANS2_QUERY_PATH_INFORMATION, but supports it with filehandle over
+ * TRANS2_QUERY_FILE_INFORMATION (function CIFSSMBQFileInfo(). But SMB
+ * Open command on non-NT servers works only for files, does not work
+ * for directories. And moreover Win9x SMB server returns bogus data in
+ * SMB_QUERY_FILE_ALL_INFO Attributes field. So for non-NT servers,
+ * do not even use CIFSSMBQPathInfo() or CIFSSMBQFileInfo() function.
*/
- if ((rc == -EOPNOTSUPP) || (rc == -EINVAL)) {
+ if (tcon->ses->capabilities & CAP_NT_SMBS)
+ rc = CIFSSMBQPathInfo(xid, tcon, full_path, &fi, 0 /* not legacy */,
+ cifs_sb->local_nls, cifs_remap(cifs_sb));
+
+ /*
+ * Non-UNICODE variant of fallback functions below expands wildcards,
+ * so they cannot be used for querying paths with wildcard characters.
+ */
+ if (rc && !(tcon->ses->capabilities & CAP_UNICODE) && strpbrk(full_path, "*?\"><"))
+ non_unicode_wildcard = true;
+
+ /*
+ * Then fallback to CIFSFindFirst() which works also with non-NT servers
+ * but does not does not provide NumberOfLinks.
+ */
+ if ((rc == -EOPNOTSUPP || rc == -EINVAL) &&
+ !non_unicode_wildcard) {
+ if (!(tcon->ses->capabilities & tcon->ses->server->vals->cap_nt_find))
+ search_info.info_level = SMB_FIND_FILE_INFO_STANDARD;
+ else
+ search_info.info_level = SMB_FIND_FILE_FULL_DIRECTORY_INFO;
+ rc = CIFSFindFirst(xid, tcon, full_path, cifs_sb, NULL,
+ CIFS_SEARCH_CLOSE_ALWAYS | CIFS_SEARCH_CLOSE_AT_END,
+ &search_info, false);
+ if (rc == 0) {
+ if (!(tcon->ses->capabilities & tcon->ses->server->vals->cap_nt_find)) {
+ FIND_FILE_STANDARD_INFO *di;
+ int offset = tcon->ses->server->timeAdj;
+
+ di = (FIND_FILE_STANDARD_INFO *)search_info.srch_entries_start;
+ fi.CreationTime = cpu_to_le64(cifs_UnixTimeToNT(cnvrtDosUnixTm(
+ di->CreationDate, di->CreationTime, offset)));
+ fi.LastAccessTime = cpu_to_le64(cifs_UnixTimeToNT(cnvrtDosUnixTm(
+ di->LastAccessDate, di->LastAccessTime, offset)));
+ fi.LastWriteTime = cpu_to_le64(cifs_UnixTimeToNT(cnvrtDosUnixTm(
+ di->LastWriteDate, di->LastWriteTime, offset)));
+ fi.ChangeTime = fi.LastWriteTime;
+ fi.Attributes = cpu_to_le32(le16_to_cpu(di->Attributes));
+ fi.AllocationSize = cpu_to_le64(le32_to_cpu(di->AllocationSize));
+ fi.EndOfFile = cpu_to_le64(le32_to_cpu(di->DataSize));
+ } else {
+ FILE_FULL_DIRECTORY_INFO *di;
+
+ di = (FILE_FULL_DIRECTORY_INFO *)search_info.srch_entries_start;
+ fi.CreationTime = di->CreationTime;
+ fi.LastAccessTime = di->LastAccessTime;
+ fi.LastWriteTime = di->LastWriteTime;
+ fi.ChangeTime = di->ChangeTime;
+ fi.Attributes = di->ExtFileAttributes;
+ fi.AllocationSize = di->AllocationSize;
+ fi.EndOfFile = di->EndOfFile;
+ fi.EASize = di->EaSize;
+ }
+ fi.NumberOfLinks = cpu_to_le32(1);
+ fi.DeletePending = 0;
+ fi.Directory = !!(le32_to_cpu(fi.Attributes) & ATTR_DIRECTORY);
+ cifs_buf_release(search_info.ntwrk_buf_start);
+ } else if (!full_path[0]) {
+ /*
+ * CIFSFindFirst() does not work on root path if the
+ * root path was exported on the server from the top
+ * level path (drive letter).
+ */
+ rc = -EOPNOTSUPP;
+ }
+ }
+
+ /*
+ * If everything failed then fallback to the legacy SMB command
+ * SMB_COM_QUERY_INFORMATION which works with all servers, but
+ * provide just few information.
+ */
+ if ((rc == -EOPNOTSUPP || rc == -EINVAL) && !non_unicode_wildcard) {
rc = SMBQueryInformation(xid, tcon, full_path, &fi, cifs_sb->local_nls,
cifs_remap(cifs_sb));
data->adjust_tz = true;
+ } else if ((rc == -EOPNOTSUPP || rc == -EINVAL) && non_unicode_wildcard) {
+ /* Path with non-UNICODE wildcard character cannot exist. */
+ rc = -ENOENT;
}
if (!rc) {
@@ -639,6 +721,13 @@ static int cifs_query_file_info(const unsigned int xid, struct cifs_tcon *tcon,
int rc;
FILE_ALL_INFO fi = {};
+ /*
+ * CIFSSMBQFileInfo() for non-NT servers returns bogus data in
+ * Attributes fields. So do not use this command for non-NT servers.
+ */
+ if (!(tcon->ses->capabilities & CAP_NT_SMBS))
+ return -EOPNOTSUPP;
+
if (cfile->symlink_target) {
data->symlink_target = kstrdup(cfile->symlink_target, GFP_KERNEL);
if (!data->symlink_target)
@@ -809,6 +898,9 @@ smb_set_file_info(struct inode *inode, const char *full_path,
struct cifs_fid fid;
struct cifs_open_parms oparms;
struct cifsFileInfo *open_file;
+ FILE_BASIC_INFO new_buf;
+ struct cifs_open_info_data query_data;
+ __le64 write_time = buf->LastWriteTime;
struct cifsInodeInfo *cinode = CIFS_I(inode);
struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb);
struct tcon_link *tlink = NULL;
@@ -816,20 +908,58 @@ smb_set_file_info(struct inode *inode, const char *full_path,
/* if the file is already open for write, just use that fileid */
open_file = find_writable_file(cinode, FIND_WR_FSUID_ONLY);
+
if (open_file) {
fid.netfid = open_file->fid.netfid;
netpid = open_file->pid;
tcon = tlink_tcon(open_file->tlink);
- goto set_via_filehandle;
+ } else {
+ tlink = cifs_sb_tlink(cifs_sb);
+ if (IS_ERR(tlink)) {
+ rc = PTR_ERR(tlink);
+ tlink = NULL;
+ goto out;
+ }
+ tcon = tlink_tcon(tlink);
}
- tlink = cifs_sb_tlink(cifs_sb);
- if (IS_ERR(tlink)) {
- rc = PTR_ERR(tlink);
- tlink = NULL;
- goto out;
+ /*
+ * Non-NT servers interprets zero time value in SMB_SET_FILE_BASIC_INFO
+ * over TRANS2_SET_FILE_INFORMATION as a valid time value. NT servers
+ * interprets zero time value as do not change existing value on server.
+ * API of ->set_file_info() callback expects that zero time value has
+ * the NT meaning - do not change. Therefore if server is non-NT and
+ * some time values in "buf" are zero, then fetch missing time values.
+ */
+ if (!(tcon->ses->capabilities & CAP_NT_SMBS) &&
+ (!buf->CreationTime || !buf->LastAccessTime ||
+ !buf->LastWriteTime || !buf->ChangeTime)) {
+ rc = cifs_query_path_info(xid, tcon, cifs_sb, full_path, &query_data);
+ if (rc) {
+ if (open_file) {
+ cifsFileInfo_put(open_file);
+ open_file = NULL;
+ }
+ goto out;
+ }
+ /*
+ * Original write_time from buf->LastWriteTime is preserved
+ * as SMBSetInformation() interprets zero as do not change.
+ */
+ new_buf = *buf;
+ buf = &new_buf;
+ if (!buf->CreationTime)
+ buf->CreationTime = query_data.fi.CreationTime;
+ if (!buf->LastAccessTime)
+ buf->LastAccessTime = query_data.fi.LastAccessTime;
+ if (!buf->LastWriteTime)
+ buf->LastWriteTime = query_data.fi.LastWriteTime;
+ if (!buf->ChangeTime)
+ buf->ChangeTime = query_data.fi.ChangeTime;
}
- tcon = tlink_tcon(tlink);
+
+ if (open_file)
+ goto set_via_filehandle;
rc = CIFSSMBSetPathInfo(xid, tcon, full_path, buf, cifs_sb->local_nls,
cifs_sb);
@@ -850,8 +980,45 @@ smb_set_file_info(struct inode *inode, const char *full_path,
.fid = &fid,
};
- cifs_dbg(FYI, "calling SetFileInfo since SetPathInfo for times not supported by this server\n");
- rc = CIFS_open(xid, &oparms, &oplock, NULL);
+ if (S_ISDIR(inode->i_mode) && !(tcon->ses->capabilities & CAP_NT_SMBS)) {
+ /* Opening directory path is not possible on non-NT servers. */
+ rc = -EOPNOTSUPP;
+ } else {
+ /*
+ * Use cifs_open_file() instead of CIFS_open() as the
+ * cifs_open_file() selects the correct function which
+ * works also on non-NT servers.
+ */
+ rc = cifs_open_file(xid, &oparms, &oplock, NULL);
+ /*
+ * Opening path for writing on non-NT servers is not
+ * possible when the read-only attribute is already set.
+ * Non-NT server in this case returns -EACCES. For those
+ * servers the only possible way how to clear the read-only
+ * bit is via SMB_COM_SETATTR command.
+ */
+ if (rc == -EACCES &&
+ (cinode->cifsAttrs & ATTR_READONLY) &&
+ le32_to_cpu(buf->Attributes) != 0 && /* 0 = do not change attrs */
+ !(le32_to_cpu(buf->Attributes) & ATTR_READONLY) &&
+ !(tcon->ses->capabilities & CAP_NT_SMBS))
+ rc = -EOPNOTSUPP;
+ }
+
+ /* Fallback to SMB_COM_SETATTR command when absolutelty needed. */
+ if (rc == -EOPNOTSUPP) {
+ cifs_dbg(FYI, "calling SetInformation since SetPathInfo for attrs/times not supported by this server\n");
+ rc = SMBSetInformation(xid, tcon, full_path,
+ buf->Attributes != 0 ? buf->Attributes : cpu_to_le32(cinode->cifsAttrs),
+ write_time,
+ cifs_sb->local_nls, cifs_sb);
+ if (rc == 0)
+ cinode->cifsAttrs = le32_to_cpu(buf->Attributes);
+ else
+ rc = -EACCES;
+ goto out;
+ }
+
if (rc != 0) {
if (rc == -EIO)
rc = -EINVAL;
@@ -859,6 +1026,7 @@ smb_set_file_info(struct inode *inode, const char *full_path,
}
netpid = current->tgid;
+ cifs_dbg(FYI, "calling SetFileInfo since SetPathInfo for attrs/times not supported by this server\n");
set_via_filehandle:
rc = CIFSSMBSetFileInfo(xid, tcon, buf, fid.netfid, netpid);
@@ -869,6 +1037,21 @@ set_via_filehandle:
CIFSSMBClose(xid, tcon, fid.netfid);
else
cifsFileInfo_put(open_file);
+
+ /*
+ * Setting the read-only bit is not honered on non-NT servers when done
+ * via open-semantics. So for setting it, use SMB_COM_SETATTR command.
+ * This command works only after the file is closed, so use it only when
+ * operation was called without the filehandle.
+ */
+ if (open_file == NULL &&
+ !(tcon->ses->capabilities & CAP_NT_SMBS) &&
+ le32_to_cpu(buf->Attributes) & ATTR_READONLY) {
+ SMBSetInformation(xid, tcon, full_path,
+ buf->Attributes,
+ 0 /* do not change write time */,
+ cifs_sb->local_nls, cifs_sb);
+ }
out:
if (tlink != NULL)
cifs_put_tlink(tlink);
@@ -1082,17 +1265,26 @@ cifs_make_node(unsigned int xid, struct inode *inode,
if (rc == 0)
d_instantiate(dentry, newinode);
return rc;
+ } else if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_UNX_EMUL) {
+ /*
+ * Check if mounted with mount parm 'sfu' mount parm.
+ * SFU emulation should work with all servers
+ * and was used by default in earlier versions of Windows.
+ */
+ return cifs_sfu_make_node(xid, inode, dentry, tcon,
+ full_path, mode, dev);
+ } else if (CIFS_REPARSE_SUPPORT(tcon)) {
+ /*
+ * mknod via reparse points requires server support for
+ * storing reparse points, which is available since
+ * Windows 2000, but was not widely used until release
+ * of Windows Server 2012 by the Windows NFS server.
+ */
+ return mknod_reparse(xid, inode, dentry, tcon,
+ full_path, mode, dev);
+ } else {
+ return -EOPNOTSUPP;
}
- /*
- * Check if mounted with mount parm 'sfu' mount parm.
- * SFU emulation should work with all servers, but only
- * supports block and char device, socket & fifo,
- * and was used by default in earlier versions of Windows
- */
- if (!(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_UNX_EMUL))
- return -EPERM;
- return cifs_sfu_make_node(xid, inode, dentry, tcon,
- full_path, mode, dev);
}
static bool
@@ -1161,8 +1353,8 @@ struct smb_version_operations smb1_operations = {
.check_trans2 = cifs_check_trans2,
.need_neg = cifs_need_neg,
.negotiate = cifs_negotiate,
- .negotiate_wsize = cifs_negotiate_wsize,
- .negotiate_rsize = cifs_negotiate_rsize,
+ .negotiate_wsize = smb1_negotiate_wsize,
+ .negotiate_rsize = smb1_negotiate_rsize,
.sess_setup = CIFS_SessSetup,
.logoff = CIFSSMBLogoff,
.tree_connect = CIFSTCon,
@@ -1189,6 +1381,7 @@ struct smb_version_operations smb1_operations = {
.create_hardlink = CIFSCreateHardLink,
.query_symlink = cifs_query_symlink,
.get_reparse_point_buffer = cifs_get_reparse_point_buffer,
+ .create_reparse_inode = cifs_create_reparse_inode,
.open = cifs_open_file,
.set_fid = cifs_set_fid,
.close = cifs_close_file,
diff --git a/fs/smb/client/smb2inode.c b/fs/smb/client/smb2inode.c
index 57d9bfbadd97..2a0316c514e4 100644
--- a/fs/smb/client/smb2inode.c
+++ b/fs/smb/client/smb2inode.c
@@ -666,6 +666,8 @@ finished:
/* smb2_parse_contexts() fills idata->fi.IndexNumber */
rc = smb2_parse_contexts(server, &rsp_iov[0], &oparms->fid->epoch,
oparms->fid->lease_key, &oplock, &idata->fi, NULL);
+ if (rc)
+ cifs_dbg(VFS, "rc: %d parsing context of compound op\n", rc);
}
for (i = 0; i < num_cmds; i++) {
@@ -1056,10 +1058,11 @@ int smb2_query_path_info(const unsigned int xid,
* Skip SMB2_OP_GET_REPARSE if symlink already parsed in create
* response.
*/
- if (data->reparse.tag != IO_REPARSE_TAG_SYMLINK)
+ if (data->reparse.tag != IO_REPARSE_TAG_SYMLINK) {
cmds[num_cmds++] = SMB2_OP_GET_REPARSE;
- if (!tcon->posix_extensions)
- cmds[num_cmds++] = SMB2_OP_QUERY_WSL_EA;
+ if (!tcon->posix_extensions)
+ cmds[num_cmds++] = SMB2_OP_QUERY_WSL_EA;
+ }
oparms = CIFS_OPARMS(cifs_sb, tcon, full_path,
FILE_READ_ATTRIBUTES |
@@ -1318,7 +1321,7 @@ smb2_set_file_info(struct inode *inode, const char *full_path,
return rc;
}
-struct inode *smb2_get_reparse_inode(struct cifs_open_info_data *data,
+struct inode *smb2_create_reparse_inode(struct cifs_open_info_data *data,
struct super_block *sb,
const unsigned int xid,
struct cifs_tcon *tcon,
@@ -1343,7 +1346,7 @@ struct inode *smb2_get_reparse_inode(struct cifs_open_info_data *data,
* attempt to create reparse point. This will prevent creating unusable
* empty object on the server.
*/
- if (!(le32_to_cpu(tcon->fsAttrInfo.Attributes) & FILE_SUPPORTS_REPARSE_POINTS))
+ if (!CIFS_REPARSE_SUPPORT(tcon))
return ERR_PTR(-EOPNOTSUPP);
oparms = CIFS_OPARMS(cifs_sb, tcon, full_path,
diff --git a/fs/smb/client/smb2ops.c b/fs/smb/client/smb2ops.c
index 2fe8eeb98535..ad8947434b71 100644
--- a/fs/smb/client/smb2ops.c
+++ b/fs/smb/client/smb2ops.c
@@ -91,7 +91,7 @@ smb2_add_credits(struct TCP_Server_Info *server,
if (*val > 65000) {
*val = 65000; /* Don't get near 64K credits, avoid srv bugs */
pr_warn_once("server overflowed SMB3 credits\n");
- trace_smb3_overflow_credits(server->CurrentMid,
+ trace_smb3_overflow_credits(server->current_mid,
server->conn_id, server->hostname, *val,
add, server->in_flight);
}
@@ -136,7 +136,7 @@ smb2_add_credits(struct TCP_Server_Info *server,
wake_up(&server->request_q);
if (reconnect_detected) {
- trace_smb3_reconnect_detected(server->CurrentMid,
+ trace_smb3_reconnect_detected(server->current_mid,
server->conn_id, server->hostname, scredits, add, in_flight);
cifs_dbg(FYI, "trying to put %d credits from the old server instance %d\n",
@@ -144,7 +144,7 @@ smb2_add_credits(struct TCP_Server_Info *server,
}
if (reconnect_with_invalid_credits) {
- trace_smb3_reconnect_with_invalid_credits(server->CurrentMid,
+ trace_smb3_reconnect_with_invalid_credits(server->current_mid,
server->conn_id, server->hostname, scredits, add, in_flight);
cifs_dbg(FYI, "Negotiate operation when server credits is non-zero. Optype: %d, server credits: %d, credits added: %d\n",
optype, scredits, add);
@@ -176,7 +176,7 @@ smb2_add_credits(struct TCP_Server_Info *server,
break;
}
- trace_smb3_add_credits(server->CurrentMid,
+ trace_smb3_add_credits(server->current_mid,
server->conn_id, server->hostname, scredits, add, in_flight);
cifs_dbg(FYI, "%s: added %u credits total=%d\n", __func__, add, scredits);
}
@@ -203,7 +203,7 @@ smb2_set_credits(struct TCP_Server_Info *server, const int val)
in_flight = server->in_flight;
spin_unlock(&server->req_lock);
- trace_smb3_set_credits(server->CurrentMid,
+ trace_smb3_set_credits(server->current_mid,
server->conn_id, server->hostname, scredits, val, in_flight);
cifs_dbg(FYI, "%s: set %u credits\n", __func__, val);
@@ -288,7 +288,7 @@ smb2_wait_mtu_credits(struct TCP_Server_Info *server, size_t size,
in_flight = server->in_flight;
spin_unlock(&server->req_lock);
- trace_smb3_wait_credits(server->CurrentMid,
+ trace_smb3_wait_credits(server->current_mid,
server->conn_id, server->hostname, scredits, -(credits->value), in_flight);
cifs_dbg(FYI, "%s: removed %u credits total=%d\n",
__func__, credits->value, scredits);
@@ -316,7 +316,7 @@ smb2_adjust_credits(struct TCP_Server_Info *server,
server->credits, server->in_flight,
new_val - credits->value,
cifs_trace_rw_credits_no_adjust_up);
- trace_smb3_too_many_credits(server->CurrentMid,
+ trace_smb3_too_many_credits(server->current_mid,
server->conn_id, server->hostname, 0, credits->value - new_val, 0);
cifs_server_dbg(VFS, "R=%x[%x] request has less credits (%d) than required (%d)",
subreq->rreq->debug_id, subreq->subreq.debug_index,
@@ -338,7 +338,7 @@ smb2_adjust_credits(struct TCP_Server_Info *server,
server->credits, server->in_flight,
new_val - credits->value,
cifs_trace_rw_credits_old_session);
- trace_smb3_reconnect_detected(server->CurrentMid,
+ trace_smb3_reconnect_detected(server->current_mid,
server->conn_id, server->hostname, scredits,
credits->value - new_val, in_flight);
cifs_server_dbg(VFS, "R=%x[%x] trying to return %d credits to old session\n",
@@ -358,7 +358,7 @@ smb2_adjust_credits(struct TCP_Server_Info *server,
spin_unlock(&server->req_lock);
wake_up(&server->request_q);
- trace_smb3_adj_credits(server->CurrentMid,
+ trace_smb3_adj_credits(server->current_mid,
server->conn_id, server->hostname, scredits,
credits->value - new_val, in_flight);
cifs_dbg(FYI, "%s: adjust added %u credits total=%d\n",
@@ -374,19 +374,19 @@ smb2_get_next_mid(struct TCP_Server_Info *server)
{
__u64 mid;
/* for SMB2 we need the current value */
- spin_lock(&server->mid_lock);
- mid = server->CurrentMid++;
- spin_unlock(&server->mid_lock);
+ spin_lock(&server->mid_counter_lock);
+ mid = server->current_mid++;
+ spin_unlock(&server->mid_counter_lock);
return mid;
}
static void
smb2_revert_current_mid(struct TCP_Server_Info *server, const unsigned int val)
{
- spin_lock(&server->mid_lock);
- if (server->CurrentMid >= val)
- server->CurrentMid -= val;
- spin_unlock(&server->mid_lock);
+ spin_lock(&server->mid_counter_lock);
+ if (server->current_mid >= val)
+ server->current_mid -= val;
+ spin_unlock(&server->mid_counter_lock);
}
static struct mid_q_entry *
@@ -401,7 +401,7 @@ __smb2_find_mid(struct TCP_Server_Info *server, char *buf, bool dequeue)
return NULL;
}
- spin_lock(&server->mid_lock);
+ spin_lock(&server->mid_queue_lock);
list_for_each_entry(mid, &server->pending_mid_q, qhead) {
if ((mid->mid == wire_mid) &&
(mid->mid_state == MID_REQUEST_SUBMITTED) &&
@@ -409,13 +409,13 @@ __smb2_find_mid(struct TCP_Server_Info *server, char *buf, bool dequeue)
kref_get(&mid->refcount);
if (dequeue) {
list_del_init(&mid->qhead);
- mid->mid_flags |= MID_DELETED;
+ mid->deleted_from_q = true;
}
- spin_unlock(&server->mid_lock);
+ spin_unlock(&server->mid_queue_lock);
return mid;
}
}
- spin_unlock(&server->mid_lock);
+ spin_unlock(&server->mid_queue_lock);
return NULL;
}
@@ -460,9 +460,9 @@ smb2_negotiate(const unsigned int xid,
{
int rc;
- spin_lock(&server->mid_lock);
- server->CurrentMid = 0;
- spin_unlock(&server->mid_lock);
+ spin_lock(&server->mid_counter_lock);
+ server->current_mid = 0;
+ spin_unlock(&server->mid_counter_lock);
rc = SMB2_negotiate(xid, ses, server);
return rc;
}
@@ -504,6 +504,9 @@ smb3_negotiate_wsize(struct cifs_tcon *tcon, struct smb3_fs_context *ctx)
wsize = min_t(unsigned int, wsize, server->max_write);
#ifdef CONFIG_CIFS_SMB_DIRECT
if (server->rdma) {
+ struct smbdirect_socket_parameters *sp =
+ &server->smbd_conn->socket.parameters;
+
if (server->sign)
/*
* Account for SMB2 data transfer packet header and
@@ -511,12 +514,12 @@ smb3_negotiate_wsize(struct cifs_tcon *tcon, struct smb3_fs_context *ctx)
*/
wsize = min_t(unsigned int,
wsize,
- server->smbd_conn->max_fragmented_send_size -
+ sp->max_fragmented_send_size -
SMB2_READWRITE_PDU_HEADER_SIZE -
sizeof(struct smb2_transform_hdr));
else
wsize = min_t(unsigned int,
- wsize, server->smbd_conn->max_readwrite_size);
+ wsize, sp->max_read_write_size);
}
#endif
if (!(server->capabilities & SMB2_GLOBAL_CAP_LARGE_MTU))
@@ -552,6 +555,9 @@ smb3_negotiate_rsize(struct cifs_tcon *tcon, struct smb3_fs_context *ctx)
rsize = min_t(unsigned int, rsize, server->max_read);
#ifdef CONFIG_CIFS_SMB_DIRECT
if (server->rdma) {
+ struct smbdirect_socket_parameters *sp =
+ &server->smbd_conn->socket.parameters;
+
if (server->sign)
/*
* Account for SMB2 data transfer packet header and
@@ -559,12 +565,12 @@ smb3_negotiate_rsize(struct cifs_tcon *tcon, struct smb3_fs_context *ctx)
*/
rsize = min_t(unsigned int,
rsize,
- server->smbd_conn->max_fragmented_recv_size -
+ sp->max_fragmented_recv_size -
SMB2_READWRITE_PDU_HEADER_SIZE -
sizeof(struct smb2_transform_hdr));
else
rsize = min_t(unsigned int,
- rsize, server->smbd_conn->max_readwrite_size);
+ rsize, sp->max_read_write_size);
}
#endif
@@ -2492,7 +2498,7 @@ smb2_is_status_pending(char *buf, struct TCP_Server_Info *server)
spin_unlock(&server->req_lock);
wake_up(&server->request_q);
- trace_smb3_pend_credits(server->CurrentMid,
+ trace_smb3_pend_credits(server->current_mid,
server->conn_id, server->hostname, scredits,
le16_to_cpu(shdr->CreditRequest), in_flight);
cifs_dbg(FYI, "%s: status pending add %u credits total=%d\n",
@@ -4069,7 +4075,7 @@ map_oplock_to_lease(u8 oplock)
}
static char *
-smb2_create_lease_buf(u8 *lease_key, u8 oplock)
+smb2_create_lease_buf(u8 *lease_key, u8 oplock, u8 *parent_lease_key, __le32 flags)
{
struct create_lease *buf;
@@ -4095,7 +4101,7 @@ smb2_create_lease_buf(u8 *lease_key, u8 oplock)
}
static char *
-smb3_create_lease_buf(u8 *lease_key, u8 oplock)
+smb3_create_lease_buf(u8 *lease_key, u8 oplock, u8 *parent_lease_key, __le32 flags)
{
struct create_lease_v2 *buf;
@@ -4105,6 +4111,9 @@ smb3_create_lease_buf(u8 *lease_key, u8 oplock)
memcpy(&buf->lcontext.LeaseKey, lease_key, SMB2_LEASE_KEY_SIZE);
buf->lcontext.LeaseState = map_oplock_to_lease(oplock);
+ buf->lcontext.LeaseFlags = flags;
+ if (flags & SMB2_LEASE_FLAG_PARENT_LEASE_KEY_SET_LE)
+ memcpy(&buf->lcontext.ParentLeaseKey, parent_lease_key, SMB2_LEASE_KEY_SIZE);
buf->ccontext.DataOffset = cpu_to_le16(offsetof
(struct create_lease_v2, lcontext));
@@ -4307,6 +4316,7 @@ crypt_message(struct TCP_Server_Info *server, int num_rqst,
u8 key[SMB3_ENC_DEC_KEY_SIZE];
struct aead_request *req;
u8 *iv;
+ DECLARE_CRYPTO_WAIT(wait);
unsigned int crypt_len = le32_to_cpu(tr_hdr->OriginalMessageSize);
void *creq;
size_t sensitive_size;
@@ -4357,7 +4367,11 @@ crypt_message(struct TCP_Server_Info *server, int num_rqst,
aead_request_set_crypt(req, sg, sg, crypt_len, iv);
aead_request_set_ad(req, assoc_data_len);
- rc = enc ? crypto_aead_encrypt(req) : crypto_aead_decrypt(req);
+ aead_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG,
+ crypto_req_done, &wait);
+
+ rc = crypto_wait_req(enc ? crypto_aead_encrypt(req)
+ : crypto_aead_decrypt(req), &wait);
if (!rc && enc)
memcpy(&tr_hdr->Signature, sign, SMB2_SIGNATURE_SIZE);
@@ -4795,18 +4809,18 @@ static void smb2_decrypt_offload(struct work_struct *work)
} else {
spin_lock(&dw->server->srv_lock);
if (dw->server->tcpStatus == CifsNeedReconnect) {
- spin_lock(&dw->server->mid_lock);
+ spin_lock(&dw->server->mid_queue_lock);
mid->mid_state = MID_RETRY_NEEDED;
- spin_unlock(&dw->server->mid_lock);
+ spin_unlock(&dw->server->mid_queue_lock);
spin_unlock(&dw->server->srv_lock);
mid->callback(mid);
} else {
- spin_lock(&dw->server->mid_lock);
+ spin_lock(&dw->server->mid_queue_lock);
mid->mid_state = MID_REQUEST_SUBMITTED;
- mid->mid_flags &= ~(MID_DELETED);
+ mid->deleted_from_q = false;
list_add_tail(&mid->qhead,
&dw->server->pending_mid_q);
- spin_unlock(&dw->server->mid_lock);
+ spin_unlock(&dw->server->mid_queue_lock);
spin_unlock(&dw->server->srv_lock);
}
}
@@ -5246,9 +5260,9 @@ static int smb2_make_node(unsigned int xid, struct inode *inode,
if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_UNX_EMUL) {
rc = cifs_sfu_make_node(xid, inode, dentry, tcon,
full_path, mode, dev);
- } else if (le32_to_cpu(tcon->fsAttrInfo.Attributes) & FILE_SUPPORTS_REPARSE_POINTS) {
- rc = smb2_mknod_reparse(xid, inode, dentry, tcon,
- full_path, mode, dev);
+ } else if (CIFS_REPARSE_SUPPORT(tcon)) {
+ rc = mknod_reparse(xid, inode, dentry, tcon,
+ full_path, mode, dev);
}
return rc;
}
@@ -5306,7 +5320,7 @@ struct smb_version_operations smb20_operations = {
.get_reparse_point_buffer = smb2_get_reparse_point_buffer,
.query_mf_symlink = smb3_query_mf_symlink,
.create_mf_symlink = smb3_create_mf_symlink,
- .create_reparse_symlink = smb2_create_reparse_symlink,
+ .create_reparse_inode = smb2_create_reparse_inode,
.open = smb2_open_file,
.set_fid = smb2_set_fid,
.close = smb2_close_file,
@@ -5409,7 +5423,7 @@ struct smb_version_operations smb21_operations = {
.get_reparse_point_buffer = smb2_get_reparse_point_buffer,
.query_mf_symlink = smb3_query_mf_symlink,
.create_mf_symlink = smb3_create_mf_symlink,
- .create_reparse_symlink = smb2_create_reparse_symlink,
+ .create_reparse_inode = smb2_create_reparse_inode,
.open = smb2_open_file,
.set_fid = smb2_set_fid,
.close = smb2_close_file,
@@ -5516,7 +5530,7 @@ struct smb_version_operations smb30_operations = {
.get_reparse_point_buffer = smb2_get_reparse_point_buffer,
.query_mf_symlink = smb3_query_mf_symlink,
.create_mf_symlink = smb3_create_mf_symlink,
- .create_reparse_symlink = smb2_create_reparse_symlink,
+ .create_reparse_inode = smb2_create_reparse_inode,
.open = smb2_open_file,
.set_fid = smb2_set_fid,
.close = smb2_close_file,
@@ -5632,7 +5646,7 @@ struct smb_version_operations smb311_operations = {
.get_reparse_point_buffer = smb2_get_reparse_point_buffer,
.query_mf_symlink = smb3_query_mf_symlink,
.create_mf_symlink = smb3_create_mf_symlink,
- .create_reparse_symlink = smb2_create_reparse_symlink,
+ .create_reparse_inode = smb2_create_reparse_inode,
.open = smb2_open_file,
.set_fid = smb2_set_fid,
.close = smb2_close_file,
diff --git a/fs/smb/client/smb2pdu.c b/fs/smb/client/smb2pdu.c
index c4d52bebd37d..2df93a75e3b8 100644
--- a/fs/smb/client/smb2pdu.c
+++ b/fs/smb/client/smb2pdu.c
@@ -36,6 +36,7 @@
#include "smb2glob.h"
#include "cifspdu.h"
#include "cifs_spnego.h"
+#include "../common/smbdirect/smbdirect.h"
#include "smbdirect.h"
#include "trace.h"
#ifdef CONFIG_CIFS_DFS_UPCALL
@@ -411,14 +412,23 @@ skip_sess_setup:
if (!rc &&
(server->capabilities & SMB2_GLOBAL_CAP_MULTI_CHANNEL) &&
server->ops->query_server_interfaces) {
- mutex_unlock(&ses->session_mutex);
-
/*
- * query server network interfaces, in case they change
+ * query server network interfaces, in case they change.
+ * Also mark the session as pending this update while the query
+ * is in progress. This will be used to avoid calling
+ * smb2_reconnect recursively.
*/
+ ses->flags |= CIFS_SES_FLAGS_PENDING_QUERY_INTERFACES;
xid = get_xid();
rc = server->ops->query_server_interfaces(xid, tcon, false);
free_xid(xid);
+ ses->flags &= ~CIFS_SES_FLAGS_PENDING_QUERY_INTERFACES;
+
+ if (!tcon->ipc && !tcon->dummy)
+ queue_delayed_work(cifsiod_wq, &tcon->query_interfaces,
+ (SMB_INTERFACE_POLL_INTERVAL * HZ));
+
+ mutex_unlock(&ses->session_mutex);
if (rc == -EOPNOTSUPP && ses->chan_count > 1) {
/*
@@ -438,11 +448,8 @@ skip_sess_setup:
if (ses->chan_max > ses->chan_count &&
ses->iface_count &&
!SERVER_IS_CHAN(server)) {
- if (ses->chan_count == 1) {
+ if (ses->chan_count == 1)
cifs_server_dbg(VFS, "supports multichannel now\n");
- queue_delayed_work(cifsiod_wq, &tcon->query_interfaces,
- (SMB_INTERFACE_POLL_INTERVAL * HZ));
- }
cifs_try_adding_channels(ses);
}
@@ -560,11 +567,18 @@ static int smb2_ioctl_req_init(u32 opcode, struct cifs_tcon *tcon,
struct TCP_Server_Info *server,
void **request_buf, unsigned int *total_len)
{
- /* Skip reconnect only for FSCTL_VALIDATE_NEGOTIATE_INFO IOCTLs */
- if (opcode == FSCTL_VALIDATE_NEGOTIATE_INFO) {
+ /*
+ * Skip reconnect in one of the following cases:
+ * 1. For FSCTL_VALIDATE_NEGOTIATE_INFO IOCTLs
+ * 2. For FSCTL_QUERY_NETWORK_INTERFACE_INFO IOCTL when called from
+ * smb2_reconnect (indicated by CIFS_SES_FLAG_SCALE_CHANNELS ses flag)
+ */
+ if (opcode == FSCTL_VALIDATE_NEGOTIATE_INFO ||
+ (opcode == FSCTL_QUERY_NETWORK_INTERFACE_INFO &&
+ (tcon->ses->flags & CIFS_SES_FLAGS_PENDING_QUERY_INTERFACES)))
return __smb2_plain_req_init(SMB2_IOCTL, tcon, server,
request_buf, total_len);
- }
+
return smb2_plain_req_init(SMB2_IOCTL, tcon, server,
request_buf, total_len);
}
@@ -2392,11 +2406,16 @@ static int
add_lease_context(struct TCP_Server_Info *server,
struct smb2_create_req *req,
struct kvec *iov,
- unsigned int *num_iovec, u8 *lease_key, __u8 *oplock)
+ unsigned int *num_iovec,
+ u8 *lease_key,
+ __u8 *oplock,
+ u8 *parent_lease_key,
+ __le32 flags)
{
unsigned int num = *num_iovec;
- iov[num].iov_base = server->ops->create_lease_buf(lease_key, *oplock);
+ iov[num].iov_base = server->ops->create_lease_buf(lease_key, *oplock,
+ parent_lease_key, flags);
if (iov[num].iov_base == NULL)
return -ENOMEM;
iov[num].iov_len = server->vals->create_lease_size;
@@ -2921,6 +2940,7 @@ replay_again:
req->CreateContextsOffset = cpu_to_le32(
sizeof(struct smb2_create_req) +
iov[1].iov_len);
+ le32_add_cpu(&req->CreateContextsLength, iov[n_iov-1].iov_len);
pc_buf = iov[n_iov-1].iov_base;
}
@@ -2967,7 +2987,7 @@ replay_again:
/* Eventually save off posix specific response info and timestamps */
err_free_rsp_buf:
- free_rsp_buf(resp_buftype, rsp);
+ free_rsp_buf(resp_buftype, rsp_iov.iov_base);
kfree(pc_buf);
err_free_req:
cifs_small_buf_release(req);
@@ -3068,7 +3088,9 @@ SMB2_open_init(struct cifs_tcon *tcon, struct TCP_Server_Info *server,
req->RequestedOplockLevel = *oplock; /* no srv lease support */
else {
rc = add_lease_context(server, req, iov, &n_iov,
- oparms->fid->lease_key, oplock);
+ oparms->fid->lease_key, oplock,
+ oparms->fid->parent_lease_key,
+ oparms->lease_flags);
if (rc)
return rc;
}
@@ -4092,12 +4114,8 @@ static void cifs_renegotiate_iosize(struct TCP_Server_Info *server,
return;
spin_lock(&tcon->sb_list_lock);
- list_for_each_entry(cifs_sb, &tcon->cifs_sb_list, tcon_sb_link) {
- cifs_sb->ctx->rsize =
- server->ops->negotiate_rsize(tcon, cifs_sb->ctx);
- cifs_sb->ctx->wsize =
- server->ops->negotiate_wsize(tcon, cifs_sb->ctx);
- }
+ list_for_each_entry(cifs_sb, &tcon->cifs_sb_list, tcon_sb_link)
+ cifs_negotiate_iosize(server, cifs_sb->ctx, tcon);
spin_unlock(&tcon->sb_list_lock);
}
@@ -4211,10 +4229,8 @@ void smb2_reconnect_server(struct work_struct *work)
}
goto done;
}
-
tcon->status = TID_GOOD;
- tcon->retry = false;
- tcon->need_reconnect = false;
+ tcon->dummy = true;
/* now reconnect sessions for necessary channels */
list_for_each_entry_safe(ses, ses2, &tmp_ses_list, rlist) {
@@ -4445,10 +4461,10 @@ smb2_new_read_req(void **buf, unsigned int *total_len,
#ifdef CONFIG_CIFS_SMB_DIRECT
/*
* If we want to do a RDMA write, fill in and append
- * smbd_buffer_descriptor_v1 to the end of read request
+ * smbdirect_buffer_descriptor_v1 to the end of read request
*/
if (rdata && smb3_use_rdma_offload(io_parms)) {
- struct smbd_buffer_descriptor_v1 *v1;
+ struct smbdirect_buffer_descriptor_v1 *v1;
bool need_invalidate = server->dialect == SMB30_PROT_ID;
rdata->mr = smbd_register_mr(server->smbd_conn, &rdata->subreq.io_iter,
@@ -4462,8 +4478,8 @@ smb2_new_read_req(void **buf, unsigned int *total_len,
req->ReadChannelInfoOffset =
cpu_to_le16(offsetof(struct smb2_read_req, Buffer));
req->ReadChannelInfoLength =
- cpu_to_le16(sizeof(struct smbd_buffer_descriptor_v1));
- v1 = (struct smbd_buffer_descriptor_v1 *) &req->Buffer[0];
+ cpu_to_le16(sizeof(struct smbdirect_buffer_descriptor_v1));
+ v1 = (struct smbdirect_buffer_descriptor_v1 *) &req->Buffer[0];
v1->offset = cpu_to_le64(rdata->mr->mr->iova);
v1->token = cpu_to_le32(rdata->mr->mr->rkey);
v1->length = cpu_to_le32(rdata->mr->mr->length);
@@ -4549,7 +4565,11 @@ smb2_readv_callback(struct mid_q_entry *mid)
cifs_stats_bytes_read(tcon, rdata->got_bytes);
break;
case MID_REQUEST_SUBMITTED:
+ trace_netfs_sreq(&rdata->subreq, netfs_sreq_trace_io_req_submitted);
+ goto do_retry;
case MID_RETRY_NEEDED:
+ trace_netfs_sreq(&rdata->subreq, netfs_sreq_trace_io_retry_needed);
+do_retry:
__set_bit(NETFS_SREQ_NEED_RETRY, &rdata->subreq.flags);
rdata->result = -EAGAIN;
if (server->sign && rdata->got_bytes)
@@ -4560,11 +4580,15 @@ smb2_readv_callback(struct mid_q_entry *mid)
cifs_stats_bytes_read(tcon, rdata->got_bytes);
break;
case MID_RESPONSE_MALFORMED:
+ trace_netfs_sreq(&rdata->subreq, netfs_sreq_trace_io_malformed);
credits.value = le16_to_cpu(shdr->CreditRequest);
credits.instance = server->reconnect_instance;
- fallthrough;
+ rdata->result = -EIO;
+ break;
default:
+ trace_netfs_sreq(&rdata->subreq, netfs_sreq_trace_io_unknown);
rdata->result = -EIO;
+ break;
}
#ifdef CONFIG_CIFS_SMB_DIRECT
/*
@@ -4817,11 +4841,14 @@ smb2_writev_callback(struct mid_q_entry *mid)
switch (mid->mid_state) {
case MID_RESPONSE_RECEIVED:
+ trace_netfs_sreq(&wdata->subreq, netfs_sreq_trace_io_progress);
credits.value = le16_to_cpu(rsp->hdr.CreditRequest);
credits.instance = server->reconnect_instance;
result = smb2_check_receive(mid, server, 0);
- if (result != 0)
+ if (result != 0) {
+ trace_netfs_sreq(&wdata->subreq, netfs_sreq_trace_io_bad);
break;
+ }
written = le32_to_cpu(rsp->DataLength);
/*
@@ -4843,14 +4870,23 @@ smb2_writev_callback(struct mid_q_entry *mid)
}
break;
case MID_REQUEST_SUBMITTED:
+ trace_netfs_sreq(&wdata->subreq, netfs_sreq_trace_io_req_submitted);
+ __set_bit(NETFS_SREQ_NEED_RETRY, &wdata->subreq.flags);
+ result = -EAGAIN;
+ break;
case MID_RETRY_NEEDED:
+ trace_netfs_sreq(&wdata->subreq, netfs_sreq_trace_io_retry_needed);
+ __set_bit(NETFS_SREQ_NEED_RETRY, &wdata->subreq.flags);
result = -EAGAIN;
break;
case MID_RESPONSE_MALFORMED:
+ trace_netfs_sreq(&wdata->subreq, netfs_sreq_trace_io_malformed);
credits.value = le16_to_cpu(rsp->hdr.CreditRequest);
credits.instance = server->reconnect_instance;
- fallthrough;
+ result = -EIO;
+ break;
default:
+ trace_netfs_sreq(&wdata->subreq, netfs_sreq_trace_io_unknown);
result = -EIO;
break;
}
@@ -4890,8 +4926,7 @@ smb2_writev_callback(struct mid_q_entry *mid)
server->credits, server->in_flight,
0, cifs_trace_rw_credits_write_response_clear);
wdata->credits.value = 0;
- trace_netfs_sreq(&wdata->subreq, netfs_sreq_trace_io_progress);
- cifs_write_subrequest_terminated(wdata, result ?: written, true);
+ cifs_write_subrequest_terminated(wdata, result ?: written);
release_mid(mid);
trace_smb3_rw_credits(rreq_debug_id, subreq_debug_index, 0,
server->credits, server->in_flight,
@@ -4971,10 +5006,10 @@ smb2_async_writev(struct cifs_io_subrequest *wdata)
#ifdef CONFIG_CIFS_SMB_DIRECT
/*
* If we want to do a server RDMA read, fill in and append
- * smbd_buffer_descriptor_v1 to the end of write request
+ * smbdirect_buffer_descriptor_v1 to the end of write request
*/
if (smb3_use_rdma_offload(io_parms)) {
- struct smbd_buffer_descriptor_v1 *v1;
+ struct smbdirect_buffer_descriptor_v1 *v1;
bool need_invalidate = server->dialect == SMB30_PROT_ID;
wdata->mr = smbd_register_mr(server->smbd_conn, &wdata->subreq.io_iter,
@@ -4993,8 +5028,8 @@ smb2_async_writev(struct cifs_io_subrequest *wdata)
req->WriteChannelInfoOffset =
cpu_to_le16(offsetof(struct smb2_write_req, Buffer));
req->WriteChannelInfoLength =
- cpu_to_le16(sizeof(struct smbd_buffer_descriptor_v1));
- v1 = (struct smbd_buffer_descriptor_v1 *) &req->Buffer[0];
+ cpu_to_le16(sizeof(struct smbdirect_buffer_descriptor_v1));
+ v1 = (struct smbdirect_buffer_descriptor_v1 *) &req->Buffer[0];
v1->offset = cpu_to_le64(wdata->mr->mr->iova);
v1->token = cpu_to_le32(wdata->mr->mr->rkey);
v1->length = cpu_to_le32(wdata->mr->mr->length);
@@ -5064,7 +5099,7 @@ out:
-(int)wdata->credits.value,
cifs_trace_rw_credits_write_response_clear);
add_credits_and_wake_if(wdata->server, &wdata->credits, 0);
- cifs_write_subrequest_terminated(wdata, rc, true);
+ cifs_write_subrequest_terminated(wdata, rc);
}
}
@@ -5920,71 +5955,6 @@ posix_qfsinf_exit:
}
int
-SMB2_QFS_info(const unsigned int xid, struct cifs_tcon *tcon,
- u64 persistent_fid, u64 volatile_fid, struct kstatfs *fsdata)
-{
- struct smb_rqst rqst;
- struct smb2_query_info_rsp *rsp = NULL;
- struct kvec iov;
- struct kvec rsp_iov;
- int rc = 0;
- int resp_buftype;
- struct cifs_ses *ses = tcon->ses;
- struct TCP_Server_Info *server;
- struct smb2_fs_full_size_info *info = NULL;
- int flags = 0;
- int retries = 0, cur_sleep = 1;
-
-replay_again:
- /* reinitialize for possible replay */
- flags = 0;
- server = cifs_pick_channel(ses);
-
- rc = build_qfs_info_req(&iov, tcon, server,
- FS_FULL_SIZE_INFORMATION,
- sizeof(struct smb2_fs_full_size_info),
- persistent_fid, volatile_fid);
- if (rc)
- return rc;
-
- if (smb3_encryption_required(tcon))
- flags |= CIFS_TRANSFORM_REQ;
-
- memset(&rqst, 0, sizeof(struct smb_rqst));
- rqst.rq_iov = &iov;
- rqst.rq_nvec = 1;
-
- if (retries)
- smb2_set_replay(server, &rqst);
-
- rc = cifs_send_recv(xid, ses, server,
- &rqst, &resp_buftype, flags, &rsp_iov);
- free_qfs_info_req(&iov);
- if (rc) {
- cifs_stats_fail_inc(tcon, SMB2_QUERY_INFO_HE);
- goto qfsinf_exit;
- }
- rsp = (struct smb2_query_info_rsp *)rsp_iov.iov_base;
-
- info = (struct smb2_fs_full_size_info *)(
- le16_to_cpu(rsp->OutputBufferOffset) + (char *)rsp);
- rc = smb2_validate_iov(le16_to_cpu(rsp->OutputBufferOffset),
- le32_to_cpu(rsp->OutputBufferLength), &rsp_iov,
- sizeof(struct smb2_fs_full_size_info));
- if (!rc)
- smb2_copy_fs_info_to_kstatfs(info, fsdata);
-
-qfsinf_exit:
- free_rsp_buf(resp_buftype, rsp_iov.iov_base);
-
- if (is_replayable_error(rc) &&
- smb2_should_replay(tcon, &retries, &cur_sleep))
- goto replay_again;
-
- return rc;
-}
-
-int
SMB2_QFS_attr(const unsigned int xid, struct cifs_tcon *tcon,
u64 persistent_fid, u64 volatile_fid, int level)
{
diff --git a/fs/smb/client/smb2proto.h b/fs/smb/client/smb2proto.h
index 4662c7e2d259..6e805ece6a7b 100644
--- a/fs/smb/client/smb2proto.h
+++ b/fs/smb/client/smb2proto.h
@@ -54,7 +54,7 @@ extern int smb3_handle_read_data(struct TCP_Server_Info *server,
extern int smb2_query_reparse_tag(const unsigned int xid, struct cifs_tcon *tcon,
struct cifs_sb_info *cifs_sb, const char *path,
__u32 *reparse_tag);
-struct inode *smb2_get_reparse_inode(struct cifs_open_info_data *data,
+struct inode *smb2_create_reparse_inode(struct cifs_open_info_data *data,
struct super_block *sb,
const unsigned int xid,
struct cifs_tcon *tcon,
@@ -259,9 +259,6 @@ extern int smb2_handle_cancelled_close(struct cifs_tcon *tcon,
__u64 volatile_fid);
extern int smb2_handle_cancelled_mid(struct mid_q_entry *mid, struct TCP_Server_Info *server);
void smb2_cancelled_close_fid(struct work_struct *work);
-extern int SMB2_QFS_info(const unsigned int xid, struct cifs_tcon *tcon,
- u64 persistent_file_id, u64 volatile_file_id,
- struct kstatfs *FSData);
extern int SMB311_posix_qfs_info(const unsigned int xid, struct cifs_tcon *tcon,
u64 persistent_file_id, u64 volatile_file_id,
struct kstatfs *FSData);
@@ -317,9 +314,6 @@ int smb311_posix_query_path_info(const unsigned int xid,
int posix_info_parse(const void *beg, const void *end,
struct smb2_posix_info_parsed *out);
int posix_info_sid_size(const void *beg, const void *end);
-int smb2_create_reparse_symlink(const unsigned int xid, struct inode *inode,
- struct dentry *dentry, struct cifs_tcon *tcon,
- const char *full_path, const char *symname);
int smb2_make_nfs_node(unsigned int xid, struct inode *inode,
struct dentry *dentry, struct cifs_tcon *tcon,
const char *full_path, umode_t mode, dev_t dev);
diff --git a/fs/smb/client/smb2transport.c b/fs/smb/client/smb2transport.c
index 475b36c27f65..ff9ef7fcd010 100644
--- a/fs/smb/client/smb2transport.c
+++ b/fs/smb/client/smb2transport.c
@@ -840,9 +840,9 @@ smb2_get_mid_entry(struct cifs_ses *ses, struct TCP_Server_Info *server,
*mid = smb2_mid_entry_alloc(shdr, server);
if (*mid == NULL)
return -ENOMEM;
- spin_lock(&server->mid_lock);
+ spin_lock(&server->mid_queue_lock);
list_add_tail(&(*mid)->qhead, &server->pending_mid_q);
- spin_unlock(&server->mid_lock);
+ spin_unlock(&server->mid_queue_lock);
return 0;
}
diff --git a/fs/smb/client/smbdirect.c b/fs/smb/client/smbdirect.c
index b0b7254661e9..c628e91c328b 100644
--- a/fs/smb/client/smbdirect.c
+++ b/fs/smb/client/smbdirect.c
@@ -7,32 +7,29 @@
#include <linux/module.h>
#include <linux/highmem.h>
#include <linux/folio_queue.h>
+#include "../common/smbdirect/smbdirect_pdu.h"
#include "smbdirect.h"
#include "cifs_debug.h"
#include "cifsproto.h"
#include "smb2proto.h"
-static struct smbd_response *get_empty_queue_buffer(
- struct smbd_connection *info);
-static struct smbd_response *get_receive_buffer(
+static struct smbdirect_recv_io *get_receive_buffer(
struct smbd_connection *info);
static void put_receive_buffer(
struct smbd_connection *info,
- struct smbd_response *response);
+ struct smbdirect_recv_io *response);
static int allocate_receive_buffers(struct smbd_connection *info, int num_buf);
static void destroy_receive_buffers(struct smbd_connection *info);
-static void put_empty_packet(
- struct smbd_connection *info, struct smbd_response *response);
static void enqueue_reassembly(
struct smbd_connection *info,
- struct smbd_response *response, int data_length);
-static struct smbd_response *_get_first_reassembly(
+ struct smbdirect_recv_io *response, int data_length);
+static struct smbdirect_recv_io *_get_first_reassembly(
struct smbd_connection *info);
static int smbd_post_recv(
struct smbd_connection *info,
- struct smbd_response *response);
+ struct smbdirect_recv_io *response);
static int smbd_post_send_empty(struct smbd_connection *info);
@@ -50,9 +47,6 @@ struct smb_extract_to_rdma {
static ssize_t smb_extract_iter_to_rdma(struct iov_iter *iter, size_t len,
struct smb_extract_to_rdma *rdma);
-/* SMBD version number */
-#define SMBD_V1 0x0100
-
/* Port numbers for SMBD transport */
#define SMB_PORT 445
#define SMBD_PORT 5445
@@ -165,10 +159,11 @@ static void smbd_disconnect_rdma_work(struct work_struct *work)
{
struct smbd_connection *info =
container_of(work, struct smbd_connection, disconnect_work);
+ struct smbdirect_socket *sc = &info->socket;
- if (info->transport_status == SMBD_CONNECTED) {
- info->transport_status = SMBD_DISCONNECTING;
- rdma_disconnect(info->id);
+ if (sc->status == SMBDIRECT_SOCKET_CONNECTED) {
+ sc->status = SMBDIRECT_SOCKET_DISCONNECTING;
+ rdma_disconnect(sc->rdma.cm_id);
}
}
@@ -182,9 +177,11 @@ static int smbd_conn_upcall(
struct rdma_cm_id *id, struct rdma_cm_event *event)
{
struct smbd_connection *info = id->context;
+ struct smbdirect_socket *sc = &info->socket;
+ const char *event_name = rdma_event_msg(event->event);
- log_rdma_event(INFO, "event=%d status=%d\n",
- event->event, event->status);
+ log_rdma_event(INFO, "event=%s status=%d\n",
+ event_name, event->status);
switch (event->event) {
case RDMA_CM_EVENT_ADDR_RESOLVED:
@@ -194,45 +191,50 @@ static int smbd_conn_upcall(
break;
case RDMA_CM_EVENT_ADDR_ERROR:
+ log_rdma_event(ERR, "connecting failed event=%s\n", event_name);
info->ri_rc = -EHOSTUNREACH;
complete(&info->ri_done);
break;
case RDMA_CM_EVENT_ROUTE_ERROR:
+ log_rdma_event(ERR, "connecting failed event=%s\n", event_name);
info->ri_rc = -ENETUNREACH;
complete(&info->ri_done);
break;
case RDMA_CM_EVENT_ESTABLISHED:
- log_rdma_event(INFO, "connected event=%d\n", event->event);
- info->transport_status = SMBD_CONNECTED;
- wake_up_interruptible(&info->conn_wait);
+ log_rdma_event(INFO, "connected event=%s\n", event_name);
+ sc->status = SMBDIRECT_SOCKET_CONNECTED;
+ wake_up_interruptible(&info->status_wait);
break;
case RDMA_CM_EVENT_CONNECT_ERROR:
case RDMA_CM_EVENT_UNREACHABLE:
case RDMA_CM_EVENT_REJECTED:
- log_rdma_event(INFO, "connecting failed event=%d\n", event->event);
- info->transport_status = SMBD_DISCONNECTED;
- wake_up_interruptible(&info->conn_wait);
+ log_rdma_event(ERR, "connecting failed event=%s\n", event_name);
+ sc->status = SMBDIRECT_SOCKET_DISCONNECTED;
+ wake_up_interruptible(&info->status_wait);
break;
case RDMA_CM_EVENT_DEVICE_REMOVAL:
case RDMA_CM_EVENT_DISCONNECTED:
/* This happens when we fail the negotiation */
- if (info->transport_status == SMBD_NEGOTIATE_FAILED) {
- info->transport_status = SMBD_DISCONNECTED;
- wake_up(&info->conn_wait);
+ if (sc->status == SMBDIRECT_SOCKET_NEGOTIATE_FAILED) {
+ log_rdma_event(ERR, "event=%s during negotiation\n", event_name);
+ sc->status = SMBDIRECT_SOCKET_DISCONNECTED;
+ wake_up(&info->status_wait);
break;
}
- info->transport_status = SMBD_DISCONNECTED;
- wake_up_interruptible(&info->disconn_wait);
- wake_up_interruptible(&info->wait_reassembly_queue);
+ sc->status = SMBDIRECT_SOCKET_DISCONNECTED;
+ wake_up_interruptible(&info->status_wait);
+ wake_up_interruptible(&sc->recv_io.reassembly.wait_queue);
wake_up_interruptible_all(&info->wait_send_queue);
break;
default:
+ log_rdma_event(ERR, "unexpected event=%s status=%d\n",
+ event_name, event->status);
break;
}
@@ -259,12 +261,12 @@ smbd_qp_async_error_upcall(struct ib_event *event, void *context)
}
}
-static inline void *smbd_request_payload(struct smbd_request *request)
+static inline void *smbdirect_send_io_payload(struct smbdirect_send_io *request)
{
return (void *)request->packet;
}
-static inline void *smbd_response_payload(struct smbd_response *response)
+static inline void *smbdirect_recv_io_payload(struct smbdirect_recv_io *response)
{
return (void *)response->packet;
}
@@ -273,33 +275,38 @@ static inline void *smbd_response_payload(struct smbd_response *response)
static void send_done(struct ib_cq *cq, struct ib_wc *wc)
{
int i;
- struct smbd_request *request =
- container_of(wc->wr_cqe, struct smbd_request, cqe);
+ struct smbdirect_send_io *request =
+ container_of(wc->wr_cqe, struct smbdirect_send_io, cqe);
+ struct smbdirect_socket *sc = request->socket;
+ struct smbd_connection *info =
+ container_of(sc, struct smbd_connection, socket);
- log_rdma_send(INFO, "smbd_request 0x%p completed wc->status=%d\n",
+ log_rdma_send(INFO, "smbdirect_send_io 0x%p completed wc->status=%d\n",
request, wc->status);
- if (wc->status != IB_WC_SUCCESS || wc->opcode != IB_WC_SEND) {
- log_rdma_send(ERR, "wc->status=%d wc->opcode=%d\n",
- wc->status, wc->opcode);
- smbd_disconnect_rdma_connection(request->info);
- }
-
for (i = 0; i < request->num_sge; i++)
- ib_dma_unmap_single(request->info->id->device,
+ ib_dma_unmap_single(sc->ib.dev,
request->sge[i].addr,
request->sge[i].length,
DMA_TO_DEVICE);
- if (atomic_dec_and_test(&request->info->send_pending))
- wake_up(&request->info->wait_send_pending);
+ if (wc->status != IB_WC_SUCCESS || wc->opcode != IB_WC_SEND) {
+ log_rdma_send(ERR, "wc->status=%d wc->opcode=%d\n",
+ wc->status, wc->opcode);
+ mempool_free(request, sc->send_io.mem.pool);
+ smbd_disconnect_rdma_connection(info);
+ return;
+ }
- wake_up(&request->info->wait_post_send);
+ if (atomic_dec_and_test(&info->send_pending))
+ wake_up(&info->wait_send_pending);
- mempool_free(request, request->info->request_mempool);
+ wake_up(&info->wait_post_send);
+
+ mempool_free(request, sc->send_io.mem.pool);
}
-static void dump_smbd_negotiate_resp(struct smbd_negotiate_resp *resp)
+static void dump_smbdirect_negotiate_resp(struct smbdirect_negotiate_resp *resp)
{
log_rdma_event(INFO, "resp message min_version %u max_version %u negotiated_version %u credits_requested %u credits_granted %u status %u max_readwrite_size %u preferred_send_size %u max_receive_size %u max_fragmented_size %u\n",
resp->min_version, resp->max_version,
@@ -315,18 +322,21 @@ static void dump_smbd_negotiate_resp(struct smbd_negotiate_resp *resp)
* return value: true if negotiation is a success, false if failed
*/
static bool process_negotiation_response(
- struct smbd_response *response, int packet_length)
+ struct smbdirect_recv_io *response, int packet_length)
{
- struct smbd_connection *info = response->info;
- struct smbd_negotiate_resp *packet = smbd_response_payload(response);
+ struct smbdirect_socket *sc = response->socket;
+ struct smbd_connection *info =
+ container_of(sc, struct smbd_connection, socket);
+ struct smbdirect_socket_parameters *sp = &sc->parameters;
+ struct smbdirect_negotiate_resp *packet = smbdirect_recv_io_payload(response);
- if (packet_length < sizeof(struct smbd_negotiate_resp)) {
+ if (packet_length < sizeof(struct smbdirect_negotiate_resp)) {
log_rdma_event(ERR,
"error: packet_length=%d\n", packet_length);
return false;
}
- if (le16_to_cpu(packet->negotiated_version) != SMBD_V1) {
+ if (le16_to_cpu(packet->negotiated_version) != SMBDIRECT_V1) {
log_rdma_event(ERR, "error: negotiated_version=%x\n",
le16_to_cpu(packet->negotiated_version));
return false;
@@ -347,20 +357,20 @@ static bool process_negotiation_response(
atomic_set(&info->receive_credits, 0);
- if (le32_to_cpu(packet->preferred_send_size) > info->max_receive_size) {
+ if (le32_to_cpu(packet->preferred_send_size) > sp->max_recv_size) {
log_rdma_event(ERR, "error: preferred_send_size=%d\n",
le32_to_cpu(packet->preferred_send_size));
return false;
}
- info->max_receive_size = le32_to_cpu(packet->preferred_send_size);
+ sp->max_recv_size = le32_to_cpu(packet->preferred_send_size);
if (le32_to_cpu(packet->max_receive_size) < SMBD_MIN_RECEIVE_SIZE) {
log_rdma_event(ERR, "error: max_receive_size=%d\n",
le32_to_cpu(packet->max_receive_size));
return false;
}
- info->max_send_size = min_t(int, info->max_send_size,
- le32_to_cpu(packet->max_receive_size));
+ sp->max_send_size = min_t(u32, sp->max_send_size,
+ le32_to_cpu(packet->max_receive_size));
if (le32_to_cpu(packet->max_fragmented_size) <
SMBD_MIN_FRAGMENTED_SIZE) {
@@ -368,33 +378,34 @@ static bool process_negotiation_response(
le32_to_cpu(packet->max_fragmented_size));
return false;
}
- info->max_fragmented_send_size =
+ sp->max_fragmented_send_size =
le32_to_cpu(packet->max_fragmented_size);
info->rdma_readwrite_threshold =
- rdma_readwrite_threshold > info->max_fragmented_send_size ?
- info->max_fragmented_send_size :
+ rdma_readwrite_threshold > sp->max_fragmented_send_size ?
+ sp->max_fragmented_send_size :
rdma_readwrite_threshold;
- info->max_readwrite_size = min_t(u32,
+ sp->max_read_write_size = min_t(u32,
le32_to_cpu(packet->max_readwrite_size),
info->max_frmr_depth * PAGE_SIZE);
- info->max_frmr_depth = info->max_readwrite_size / PAGE_SIZE;
+ info->max_frmr_depth = sp->max_read_write_size / PAGE_SIZE;
+ sc->recv_io.expected = SMBDIRECT_EXPECT_DATA_TRANSFER;
return true;
}
static void smbd_post_send_credits(struct work_struct *work)
{
int ret = 0;
- int use_receive_queue = 1;
int rc;
- struct smbd_response *response;
+ struct smbdirect_recv_io *response;
struct smbd_connection *info =
container_of(work, struct smbd_connection,
post_send_credits_work);
+ struct smbdirect_socket *sc = &info->socket;
- if (info->transport_status != SMBD_CONNECTED) {
+ if (sc->status != SMBDIRECT_SOCKET_CONNECTED) {
wake_up(&info->wait_receive_queues);
return;
}
@@ -402,20 +413,10 @@ static void smbd_post_send_credits(struct work_struct *work)
if (info->receive_credit_target >
atomic_read(&info->receive_credits)) {
while (true) {
- if (use_receive_queue)
- response = get_receive_buffer(info);
- else
- response = get_empty_queue_buffer(info);
- if (!response) {
- /* now switch to empty packet queue */
- if (use_receive_queue) {
- use_receive_queue = 0;
- continue;
- } else
- break;
- }
+ response = get_receive_buffer(info);
+ if (!response)
+ break;
- response->type = SMBD_TRANSFER_DATA;
response->first_segment = false;
rc = smbd_post_recv(info, response);
if (rc) {
@@ -448,20 +449,21 @@ static void smbd_post_send_credits(struct work_struct *work)
/* Called from softirq, when recv is done */
static void recv_done(struct ib_cq *cq, struct ib_wc *wc)
{
- struct smbd_data_transfer *data_transfer;
- struct smbd_response *response =
- container_of(wc->wr_cqe, struct smbd_response, cqe);
- struct smbd_connection *info = response->info;
+ struct smbdirect_data_transfer *data_transfer;
+ struct smbdirect_recv_io *response =
+ container_of(wc->wr_cqe, struct smbdirect_recv_io, cqe);
+ struct smbdirect_socket *sc = response->socket;
+ struct smbd_connection *info =
+ container_of(sc, struct smbd_connection, socket);
int data_length = 0;
log_rdma_recv(INFO, "response=0x%p type=%d wc status=%d wc opcode %d byte_len=%d pkey_index=%u\n",
- response, response->type, wc->status, wc->opcode,
+ response, sc->recv_io.expected, wc->status, wc->opcode,
wc->byte_len, wc->pkey_index);
if (wc->status != IB_WC_SUCCESS || wc->opcode != IB_WC_RECV) {
log_rdma_recv(INFO, "wc->status=%d opcode=%d\n",
wc->status, wc->opcode);
- smbd_disconnect_rdma_connection(info);
goto error;
}
@@ -471,43 +473,31 @@ static void recv_done(struct ib_cq *cq, struct ib_wc *wc)
response->sge.length,
DMA_FROM_DEVICE);
- switch (response->type) {
+ switch (sc->recv_io.expected) {
/* SMBD negotiation response */
- case SMBD_NEGOTIATE_RESP:
- dump_smbd_negotiate_resp(smbd_response_payload(response));
- info->full_packet_received = true;
+ case SMBDIRECT_EXPECT_NEGOTIATE_REP:
+ dump_smbdirect_negotiate_resp(smbdirect_recv_io_payload(response));
+ sc->recv_io.reassembly.full_packet_received = true;
info->negotiate_done =
process_negotiation_response(response, wc->byte_len);
+ put_receive_buffer(info, response);
complete(&info->negotiate_completion);
- break;
+ return;
/* SMBD data transfer packet */
- case SMBD_TRANSFER_DATA:
- data_transfer = smbd_response_payload(response);
+ case SMBDIRECT_EXPECT_DATA_TRANSFER:
+ data_transfer = smbdirect_recv_io_payload(response);
data_length = le32_to_cpu(data_transfer->data_length);
- /*
- * If this is a packet with data playload place the data in
- * reassembly queue and wake up the reading thread
- */
if (data_length) {
- if (info->full_packet_received)
+ if (sc->recv_io.reassembly.full_packet_received)
response->first_segment = true;
if (le32_to_cpu(data_transfer->remaining_data_length))
- info->full_packet_received = false;
+ sc->recv_io.reassembly.full_packet_received = false;
else
- info->full_packet_received = true;
-
- enqueue_reassembly(
- info,
- response,
- data_length);
- } else
- put_empty_packet(info, response);
-
- if (data_length)
- wake_up_interruptible(&info->wait_reassembly_queue);
+ sc->recv_io.reassembly.full_packet_received = true;
+ }
atomic_dec(&info->receive_credits);
info->receive_credit_target =
@@ -531,19 +521,35 @@ static void recv_done(struct ib_cq *cq, struct ib_wc *wc)
/* Send a KEEP_ALIVE response right away if requested */
info->keep_alive_requested = KEEP_ALIVE_NONE;
if (le16_to_cpu(data_transfer->flags) &
- SMB_DIRECT_RESPONSE_REQUESTED) {
+ SMBDIRECT_FLAG_RESPONSE_REQUESTED) {
info->keep_alive_requested = KEEP_ALIVE_PENDING;
}
+ /*
+ * If this is a packet with data playload place the data in
+ * reassembly queue and wake up the reading thread
+ */
+ if (data_length) {
+ enqueue_reassembly(info, response, data_length);
+ wake_up_interruptible(&sc->recv_io.reassembly.wait_queue);
+ } else
+ put_receive_buffer(info, response);
+
return;
- default:
- log_rdma_recv(ERR,
- "unexpected response type=%d\n", response->type);
+ case SMBDIRECT_EXPECT_NEGOTIATE_REQ:
+ /* Only server... */
+ break;
}
+ /*
+ * This is an internal error!
+ */
+ log_rdma_recv(ERR, "unexpected response type=%d\n", sc->recv_io.expected);
+ WARN_ON_ONCE(sc->recv_io.expected != SMBDIRECT_EXPECT_DATA_TRANSFER);
error:
put_receive_buffer(info, response);
+ smbd_disconnect_rdma_connection(info);
}
static struct rdma_cm_id *smbd_create_id(
@@ -635,32 +641,34 @@ static int smbd_ia_open(
struct smbd_connection *info,
struct sockaddr *dstaddr, int port)
{
+ struct smbdirect_socket *sc = &info->socket;
int rc;
- info->id = smbd_create_id(info, dstaddr, port);
- if (IS_ERR(info->id)) {
- rc = PTR_ERR(info->id);
+ sc->rdma.cm_id = smbd_create_id(info, dstaddr, port);
+ if (IS_ERR(sc->rdma.cm_id)) {
+ rc = PTR_ERR(sc->rdma.cm_id);
goto out1;
}
+ sc->ib.dev = sc->rdma.cm_id->device;
- if (!frwr_is_supported(&info->id->device->attrs)) {
+ if (!frwr_is_supported(&sc->ib.dev->attrs)) {
log_rdma_event(ERR, "Fast Registration Work Requests (FRWR) is not supported\n");
log_rdma_event(ERR, "Device capability flags = %llx max_fast_reg_page_list_len = %u\n",
- info->id->device->attrs.device_cap_flags,
- info->id->device->attrs.max_fast_reg_page_list_len);
+ sc->ib.dev->attrs.device_cap_flags,
+ sc->ib.dev->attrs.max_fast_reg_page_list_len);
rc = -EPROTONOSUPPORT;
goto out2;
}
info->max_frmr_depth = min_t(int,
smbd_max_frmr_depth,
- info->id->device->attrs.max_fast_reg_page_list_len);
+ sc->ib.dev->attrs.max_fast_reg_page_list_len);
info->mr_type = IB_MR_TYPE_MEM_REG;
- if (info->id->device->attrs.kernel_cap_flags & IBK_SG_GAPS_REG)
+ if (sc->ib.dev->attrs.kernel_cap_flags & IBK_SG_GAPS_REG)
info->mr_type = IB_MR_TYPE_SG_GAPS;
- info->pd = ib_alloc_pd(info->id->device, 0);
- if (IS_ERR(info->pd)) {
- rc = PTR_ERR(info->pd);
+ sc->ib.pd = ib_alloc_pd(sc->ib.dev, 0);
+ if (IS_ERR(sc->ib.pd)) {
+ rc = PTR_ERR(sc->ib.pd);
log_rdma_event(ERR, "ib_alloc_pd() returned %d\n", rc);
goto out2;
}
@@ -668,8 +676,8 @@ static int smbd_ia_open(
return 0;
out2:
- rdma_destroy_id(info->id);
- info->id = NULL;
+ rdma_destroy_id(sc->rdma.cm_id);
+ sc->rdma.cm_id = NULL;
out1:
return rc;
@@ -683,41 +691,43 @@ out1:
*/
static int smbd_post_send_negotiate_req(struct smbd_connection *info)
{
+ struct smbdirect_socket *sc = &info->socket;
+ struct smbdirect_socket_parameters *sp = &sc->parameters;
struct ib_send_wr send_wr;
int rc = -ENOMEM;
- struct smbd_request *request;
- struct smbd_negotiate_req *packet;
+ struct smbdirect_send_io *request;
+ struct smbdirect_negotiate_req *packet;
- request = mempool_alloc(info->request_mempool, GFP_KERNEL);
+ request = mempool_alloc(sc->send_io.mem.pool, GFP_KERNEL);
if (!request)
return rc;
- request->info = info;
+ request->socket = sc;
- packet = smbd_request_payload(request);
- packet->min_version = cpu_to_le16(SMBD_V1);
- packet->max_version = cpu_to_le16(SMBD_V1);
+ packet = smbdirect_send_io_payload(request);
+ packet->min_version = cpu_to_le16(SMBDIRECT_V1);
+ packet->max_version = cpu_to_le16(SMBDIRECT_V1);
packet->reserved = 0;
- packet->credits_requested = cpu_to_le16(info->send_credit_target);
- packet->preferred_send_size = cpu_to_le32(info->max_send_size);
- packet->max_receive_size = cpu_to_le32(info->max_receive_size);
+ packet->credits_requested = cpu_to_le16(sp->send_credit_target);
+ packet->preferred_send_size = cpu_to_le32(sp->max_send_size);
+ packet->max_receive_size = cpu_to_le32(sp->max_recv_size);
packet->max_fragmented_size =
- cpu_to_le32(info->max_fragmented_recv_size);
+ cpu_to_le32(sp->max_fragmented_recv_size);
request->num_sge = 1;
request->sge[0].addr = ib_dma_map_single(
- info->id->device, (void *)packet,
+ sc->ib.dev, (void *)packet,
sizeof(*packet), DMA_TO_DEVICE);
- if (ib_dma_mapping_error(info->id->device, request->sge[0].addr)) {
+ if (ib_dma_mapping_error(sc->ib.dev, request->sge[0].addr)) {
rc = -EIO;
goto dma_mapping_failed;
}
request->sge[0].length = sizeof(*packet);
- request->sge[0].lkey = info->pd->local_dma_lkey;
+ request->sge[0].lkey = sc->ib.pd->local_dma_lkey;
ib_dma_sync_single_for_device(
- info->id->device, request->sge[0].addr,
+ sc->ib.dev, request->sge[0].addr,
request->sge[0].length, DMA_TO_DEVICE);
request->cqe.done = send_done;
@@ -734,20 +744,20 @@ static int smbd_post_send_negotiate_req(struct smbd_connection *info)
request->sge[0].length, request->sge[0].lkey);
atomic_inc(&info->send_pending);
- rc = ib_post_send(info->id->qp, &send_wr, NULL);
+ rc = ib_post_send(sc->ib.qp, &send_wr, NULL);
if (!rc)
return 0;
/* if we reach here, post send failed */
log_rdma_send(ERR, "ib_post_send failed rc=%d\n", rc);
atomic_dec(&info->send_pending);
- ib_dma_unmap_single(info->id->device, request->sge[0].addr,
+ ib_dma_unmap_single(sc->ib.dev, request->sge[0].addr,
request->sge[0].length, DMA_TO_DEVICE);
smbd_disconnect_rdma_connection(info);
dma_mapping_failed:
- mempool_free(request, info->request_mempool);
+ mempool_free(request, sc->send_io.mem.pool);
return rc;
}
@@ -774,10 +784,10 @@ static int manage_credits_prior_sending(struct smbd_connection *info)
/*
* Check if we need to send a KEEP_ALIVE message
* The idle connection timer triggers a KEEP_ALIVE message when expires
- * SMB_DIRECT_RESPONSE_REQUESTED is set in the message flag to have peer send
+ * SMBDIRECT_FLAG_RESPONSE_REQUESTED is set in the message flag to have peer send
* back a response.
* return value:
- * 1 if SMB_DIRECT_RESPONSE_REQUESTED needs to be set
+ * 1 if SMBDIRECT_FLAG_RESPONSE_REQUESTED needs to be set
* 0: otherwise
*/
static int manage_keep_alive_before_sending(struct smbd_connection *info)
@@ -791,8 +801,10 @@ static int manage_keep_alive_before_sending(struct smbd_connection *info)
/* Post the send request */
static int smbd_post_send(struct smbd_connection *info,
- struct smbd_request *request)
+ struct smbdirect_send_io *request)
{
+ struct smbdirect_socket *sc = &info->socket;
+ struct smbdirect_socket_parameters *sp = &sc->parameters;
struct ib_send_wr send_wr;
int rc, i;
@@ -801,7 +813,7 @@ static int smbd_post_send(struct smbd_connection *info,
"rdma_request sge[%d] addr=0x%llx length=%u\n",
i, request->sge[i].addr, request->sge[i].length);
ib_dma_sync_single_for_device(
- info->id->device,
+ sc->ib.dev,
request->sge[i].addr,
request->sge[i].length,
DMA_TO_DEVICE);
@@ -816,7 +828,7 @@ static int smbd_post_send(struct smbd_connection *info,
send_wr.opcode = IB_WR_SEND;
send_wr.send_flags = IB_SEND_SIGNALED;
- rc = ib_post_send(info->id->qp, &send_wr, NULL);
+ rc = ib_post_send(sc->ib.qp, &send_wr, NULL);
if (rc) {
log_rdma_send(ERR, "ib_post_send failed rc=%d\n", rc);
smbd_disconnect_rdma_connection(info);
@@ -824,7 +836,7 @@ static int smbd_post_send(struct smbd_connection *info,
} else
/* Reset timer for idle connection after packet is sent */
mod_delayed_work(info->workqueue, &info->idle_timer_work,
- info->keep_alive_interval*HZ);
+ msecs_to_jiffies(sp->keepalive_interval_msec));
return rc;
}
@@ -833,22 +845,24 @@ static int smbd_post_send_iter(struct smbd_connection *info,
struct iov_iter *iter,
int *_remaining_data_length)
{
+ struct smbdirect_socket *sc = &info->socket;
+ struct smbdirect_socket_parameters *sp = &sc->parameters;
int i, rc;
int header_length;
int data_length;
- struct smbd_request *request;
- struct smbd_data_transfer *packet;
+ struct smbdirect_send_io *request;
+ struct smbdirect_data_transfer *packet;
int new_credits = 0;
wait_credit:
/* Wait for send credits. A SMBD packet needs one credit */
rc = wait_event_interruptible(info->wait_send_queue,
atomic_read(&info->send_credits) > 0 ||
- info->transport_status != SMBD_CONNECTED);
+ sc->status != SMBDIRECT_SOCKET_CONNECTED);
if (rc)
goto err_wait_credit;
- if (info->transport_status != SMBD_CONNECTED) {
+ if (sc->status != SMBDIRECT_SOCKET_CONNECTED) {
log_outgoing(ERR, "disconnected not sending on wait_credit\n");
rc = -EAGAIN;
goto err_wait_credit;
@@ -860,42 +874,44 @@ wait_credit:
wait_send_queue:
wait_event(info->wait_post_send,
- atomic_read(&info->send_pending) < info->send_credit_target ||
- info->transport_status != SMBD_CONNECTED);
+ atomic_read(&info->send_pending) < sp->send_credit_target ||
+ sc->status != SMBDIRECT_SOCKET_CONNECTED);
- if (info->transport_status != SMBD_CONNECTED) {
+ if (sc->status != SMBDIRECT_SOCKET_CONNECTED) {
log_outgoing(ERR, "disconnected not sending on wait_send_queue\n");
rc = -EAGAIN;
goto err_wait_send_queue;
}
if (unlikely(atomic_inc_return(&info->send_pending) >
- info->send_credit_target)) {
+ sp->send_credit_target)) {
atomic_dec(&info->send_pending);
goto wait_send_queue;
}
- request = mempool_alloc(info->request_mempool, GFP_KERNEL);
+ request = mempool_alloc(sc->send_io.mem.pool, GFP_KERNEL);
if (!request) {
rc = -ENOMEM;
goto err_alloc;
}
- request->info = info;
+ request->socket = sc;
memset(request->sge, 0, sizeof(request->sge));
/* Fill in the data payload to find out how much data we can add */
if (iter) {
struct smb_extract_to_rdma extract = {
.nr_sge = 1,
- .max_sge = SMBDIRECT_MAX_SEND_SGE,
+ .max_sge = SMBDIRECT_SEND_IO_MAX_SGE,
.sge = request->sge,
- .device = info->id->device,
- .local_dma_lkey = info->pd->local_dma_lkey,
+ .device = sc->ib.dev,
+ .local_dma_lkey = sc->ib.pd->local_dma_lkey,
.direction = DMA_TO_DEVICE,
};
+ size_t payload_len = umin(*_remaining_data_length,
+ sp->max_send_size - sizeof(*packet));
- rc = smb_extract_iter_to_rdma(iter, *_remaining_data_length,
+ rc = smb_extract_iter_to_rdma(iter, payload_len,
&extract);
if (rc < 0)
goto err_dma;
@@ -908,8 +924,8 @@ wait_send_queue:
}
/* Fill in the packet header */
- packet = smbd_request_payload(request);
- packet->credits_requested = cpu_to_le16(info->send_credit_target);
+ packet = smbdirect_send_io_payload(request);
+ packet->credits_requested = cpu_to_le16(sp->send_credit_target);
new_credits = manage_credits_prior_sending(info);
atomic_add(new_credits, &info->receive_credits);
@@ -919,7 +935,7 @@ wait_send_queue:
packet->flags = 0;
if (manage_keep_alive_before_sending(info))
- packet->flags |= cpu_to_le16(SMB_DIRECT_RESPONSE_REQUESTED);
+ packet->flags |= cpu_to_le16(SMBDIRECT_FLAG_RESPONSE_REQUESTED);
packet->reserved = 0;
if (!data_length)
@@ -938,23 +954,23 @@ wait_send_queue:
le32_to_cpu(packet->remaining_data_length));
/* Map the packet to DMA */
- header_length = sizeof(struct smbd_data_transfer);
+ header_length = sizeof(struct smbdirect_data_transfer);
/* If this is a packet without payload, don't send padding */
if (!data_length)
- header_length = offsetof(struct smbd_data_transfer, padding);
+ header_length = offsetof(struct smbdirect_data_transfer, padding);
- request->sge[0].addr = ib_dma_map_single(info->id->device,
+ request->sge[0].addr = ib_dma_map_single(sc->ib.dev,
(void *)packet,
header_length,
DMA_TO_DEVICE);
- if (ib_dma_mapping_error(info->id->device, request->sge[0].addr)) {
+ if (ib_dma_mapping_error(sc->ib.dev, request->sge[0].addr)) {
rc = -EIO;
request->sge[0].addr = 0;
goto err_dma;
}
request->sge[0].length = header_length;
- request->sge[0].lkey = info->pd->local_dma_lkey;
+ request->sge[0].lkey = sc->ib.pd->local_dma_lkey;
rc = smbd_post_send(info, request);
if (!rc)
@@ -963,11 +979,11 @@ wait_send_queue:
err_dma:
for (i = 0; i < request->num_sge; i++)
if (request->sge[i].addr)
- ib_dma_unmap_single(info->id->device,
+ ib_dma_unmap_single(sc->ib.dev,
request->sge[i].addr,
request->sge[i].length,
DMA_TO_DEVICE);
- mempool_free(request, info->request_mempool);
+ mempool_free(request, sc->send_io.mem.pool);
/* roll back receive credits and credits to be offered */
spin_lock(&info->lock_new_credits_offered);
@@ -1000,25 +1016,48 @@ static int smbd_post_send_empty(struct smbd_connection *info)
return smbd_post_send_iter(info, NULL, &remaining_data_length);
}
+static int smbd_post_send_full_iter(struct smbd_connection *info,
+ struct iov_iter *iter,
+ int *_remaining_data_length)
+{
+ int rc = 0;
+
+ /*
+ * smbd_post_send_iter() respects the
+ * negotiated max_send_size, so we need to
+ * loop until the full iter is posted
+ */
+
+ while (iov_iter_count(iter) > 0) {
+ rc = smbd_post_send_iter(info, iter, _remaining_data_length);
+ if (rc < 0)
+ break;
+ }
+
+ return rc;
+}
+
/*
* Post a receive request to the transport
* The remote peer can only send data when a receive request is posted
* The interaction is controlled by send/receive credit system
*/
static int smbd_post_recv(
- struct smbd_connection *info, struct smbd_response *response)
+ struct smbd_connection *info, struct smbdirect_recv_io *response)
{
+ struct smbdirect_socket *sc = &info->socket;
+ struct smbdirect_socket_parameters *sp = &sc->parameters;
struct ib_recv_wr recv_wr;
int rc = -EIO;
response->sge.addr = ib_dma_map_single(
- info->id->device, response->packet,
- info->max_receive_size, DMA_FROM_DEVICE);
- if (ib_dma_mapping_error(info->id->device, response->sge.addr))
+ sc->ib.dev, response->packet,
+ sp->max_recv_size, DMA_FROM_DEVICE);
+ if (ib_dma_mapping_error(sc->ib.dev, response->sge.addr))
return rc;
- response->sge.length = info->max_receive_size;
- response->sge.lkey = info->pd->local_dma_lkey;
+ response->sge.length = sp->max_recv_size;
+ response->sge.lkey = sc->ib.pd->local_dma_lkey;
response->cqe.done = recv_done;
@@ -1027,10 +1066,11 @@ static int smbd_post_recv(
recv_wr.sg_list = &response->sge;
recv_wr.num_sge = 1;
- rc = ib_post_recv(info->id->qp, &recv_wr, NULL);
+ rc = ib_post_recv(sc->ib.qp, &recv_wr, NULL);
if (rc) {
- ib_dma_unmap_single(info->id->device, response->sge.addr,
+ ib_dma_unmap_single(sc->ib.dev, response->sge.addr,
response->sge.length, DMA_FROM_DEVICE);
+ response->sge.length = 0;
smbd_disconnect_rdma_connection(info);
log_rdma_recv(ERR, "ib_post_recv failed rc=%d\n", rc);
}
@@ -1041,10 +1081,11 @@ static int smbd_post_recv(
/* Perform SMBD negotiate according to [MS-SMBD] 3.1.5.2 */
static int smbd_negotiate(struct smbd_connection *info)
{
+ struct smbdirect_socket *sc = &info->socket;
int rc;
- struct smbd_response *response = get_receive_buffer(info);
+ struct smbdirect_recv_io *response = get_receive_buffer(info);
- response->type = SMBD_NEGOTIATE_RESP;
+ sc->recv_io.expected = SMBDIRECT_EXPECT_NEGOTIATE_REP;
rc = smbd_post_recv(info, response);
log_rdma_event(INFO, "smbd_post_recv rc=%d iov.addr=0x%llx iov.length=%u iov.lkey=0x%x\n",
rc, response->sge.addr,
@@ -1075,17 +1116,6 @@ static int smbd_negotiate(struct smbd_connection *info)
return rc;
}
-static void put_empty_packet(
- struct smbd_connection *info, struct smbd_response *response)
-{
- spin_lock(&info->empty_packet_queue_lock);
- list_add_tail(&response->list, &info->empty_packet_queue);
- info->count_empty_packet_queue++;
- spin_unlock(&info->empty_packet_queue_lock);
-
- queue_work(info->workqueue, &info->post_send_credits_work);
-}
-
/*
* Implement Connection.FragmentReassemblyBuffer defined in [MS-SMBD] 3.1.1.1
* This is a queue for reassembling upper layer payload and present to upper
@@ -1098,12 +1128,14 @@ static void put_empty_packet(
*/
static void enqueue_reassembly(
struct smbd_connection *info,
- struct smbd_response *response,
+ struct smbdirect_recv_io *response,
int data_length)
{
- spin_lock(&info->reassembly_queue_lock);
- list_add_tail(&response->list, &info->reassembly_queue);
- info->reassembly_queue_length++;
+ struct smbdirect_socket *sc = &info->socket;
+
+ spin_lock(&sc->recv_io.reassembly.lock);
+ list_add_tail(&response->list, &sc->recv_io.reassembly.list);
+ sc->recv_io.reassembly.queue_length++;
/*
* Make sure reassembly_data_length is updated after list and
* reassembly_queue_length are updated. On the dequeue side
@@ -1111,8 +1143,8 @@ static void enqueue_reassembly(
* if reassembly_queue_length and list is up to date
*/
virt_wmb();
- info->reassembly_data_length += data_length;
- spin_unlock(&info->reassembly_queue_lock);
+ sc->recv_io.reassembly.data_length += data_length;
+ spin_unlock(&sc->recv_io.reassembly.lock);
info->count_reassembly_queue++;
info->count_enqueue_reassembly_queue++;
}
@@ -1122,34 +1154,16 @@ static void enqueue_reassembly(
* Caller is responsible for locking
* return value: the first entry if any, NULL if queue is empty
*/
-static struct smbd_response *_get_first_reassembly(struct smbd_connection *info)
-{
- struct smbd_response *ret = NULL;
-
- if (!list_empty(&info->reassembly_queue)) {
- ret = list_first_entry(
- &info->reassembly_queue,
- struct smbd_response, list);
- }
- return ret;
-}
-
-static struct smbd_response *get_empty_queue_buffer(
- struct smbd_connection *info)
+static struct smbdirect_recv_io *_get_first_reassembly(struct smbd_connection *info)
{
- struct smbd_response *ret = NULL;
- unsigned long flags;
+ struct smbdirect_socket *sc = &info->socket;
+ struct smbdirect_recv_io *ret = NULL;
- spin_lock_irqsave(&info->empty_packet_queue_lock, flags);
- if (!list_empty(&info->empty_packet_queue)) {
+ if (!list_empty(&sc->recv_io.reassembly.list)) {
ret = list_first_entry(
- &info->empty_packet_queue,
- struct smbd_response, list);
- list_del(&ret->list);
- info->count_empty_packet_queue--;
+ &sc->recv_io.reassembly.list,
+ struct smbdirect_recv_io, list);
}
- spin_unlock_irqrestore(&info->empty_packet_queue_lock, flags);
-
return ret;
}
@@ -1159,21 +1173,22 @@ static struct smbd_response *get_empty_queue_buffer(
* pre-allocated in advance.
* return value: the receive buffer, NULL if none is available
*/
-static struct smbd_response *get_receive_buffer(struct smbd_connection *info)
+static struct smbdirect_recv_io *get_receive_buffer(struct smbd_connection *info)
{
- struct smbd_response *ret = NULL;
+ struct smbdirect_socket *sc = &info->socket;
+ struct smbdirect_recv_io *ret = NULL;
unsigned long flags;
- spin_lock_irqsave(&info->receive_queue_lock, flags);
- if (!list_empty(&info->receive_queue)) {
+ spin_lock_irqsave(&sc->recv_io.free.lock, flags);
+ if (!list_empty(&sc->recv_io.free.list)) {
ret = list_first_entry(
- &info->receive_queue,
- struct smbd_response, list);
+ &sc->recv_io.free.list,
+ struct smbdirect_recv_io, list);
list_del(&ret->list);
info->count_receive_queue--;
info->count_get_receive_buffer++;
}
- spin_unlock_irqrestore(&info->receive_queue_lock, flags);
+ spin_unlock_irqrestore(&sc->recv_io.free.lock, flags);
return ret;
}
@@ -1185,18 +1200,24 @@ static struct smbd_response *get_receive_buffer(struct smbd_connection *info)
* receive buffer is returned.
*/
static void put_receive_buffer(
- struct smbd_connection *info, struct smbd_response *response)
+ struct smbd_connection *info, struct smbdirect_recv_io *response)
{
+ struct smbdirect_socket *sc = &info->socket;
unsigned long flags;
- ib_dma_unmap_single(info->id->device, response->sge.addr,
- response->sge.length, DMA_FROM_DEVICE);
+ if (likely(response->sge.length != 0)) {
+ ib_dma_unmap_single(sc->ib.dev,
+ response->sge.addr,
+ response->sge.length,
+ DMA_FROM_DEVICE);
+ response->sge.length = 0;
+ }
- spin_lock_irqsave(&info->receive_queue_lock, flags);
- list_add_tail(&response->list, &info->receive_queue);
+ spin_lock_irqsave(&sc->recv_io.free.lock, flags);
+ list_add_tail(&response->list, &sc->recv_io.free.list);
info->count_receive_queue++;
info->count_put_receive_buffer++;
- spin_unlock_irqrestore(&info->receive_queue_lock, flags);
+ spin_unlock_irqrestore(&sc->recv_io.free.lock, flags);
queue_work(info->workqueue, &info->post_send_credits_work);
}
@@ -1204,58 +1225,54 @@ static void put_receive_buffer(
/* Preallocate all receive buffer on transport establishment */
static int allocate_receive_buffers(struct smbd_connection *info, int num_buf)
{
+ struct smbdirect_socket *sc = &info->socket;
+ struct smbdirect_recv_io *response;
int i;
- struct smbd_response *response;
- INIT_LIST_HEAD(&info->reassembly_queue);
- spin_lock_init(&info->reassembly_queue_lock);
- info->reassembly_data_length = 0;
- info->reassembly_queue_length = 0;
+ INIT_LIST_HEAD(&sc->recv_io.reassembly.list);
+ spin_lock_init(&sc->recv_io.reassembly.lock);
+ sc->recv_io.reassembly.data_length = 0;
+ sc->recv_io.reassembly.queue_length = 0;
- INIT_LIST_HEAD(&info->receive_queue);
- spin_lock_init(&info->receive_queue_lock);
+ INIT_LIST_HEAD(&sc->recv_io.free.list);
+ spin_lock_init(&sc->recv_io.free.lock);
info->count_receive_queue = 0;
- INIT_LIST_HEAD(&info->empty_packet_queue);
- spin_lock_init(&info->empty_packet_queue_lock);
- info->count_empty_packet_queue = 0;
-
init_waitqueue_head(&info->wait_receive_queues);
for (i = 0; i < num_buf; i++) {
- response = mempool_alloc(info->response_mempool, GFP_KERNEL);
+ response = mempool_alloc(sc->recv_io.mem.pool, GFP_KERNEL);
if (!response)
goto allocate_failed;
- response->info = info;
- list_add_tail(&response->list, &info->receive_queue);
+ response->socket = sc;
+ response->sge.length = 0;
+ list_add_tail(&response->list, &sc->recv_io.free.list);
info->count_receive_queue++;
}
return 0;
allocate_failed:
- while (!list_empty(&info->receive_queue)) {
+ while (!list_empty(&sc->recv_io.free.list)) {
response = list_first_entry(
- &info->receive_queue,
- struct smbd_response, list);
+ &sc->recv_io.free.list,
+ struct smbdirect_recv_io, list);
list_del(&response->list);
info->count_receive_queue--;
- mempool_free(response, info->response_mempool);
+ mempool_free(response, sc->recv_io.mem.pool);
}
return -ENOMEM;
}
static void destroy_receive_buffers(struct smbd_connection *info)
{
- struct smbd_response *response;
+ struct smbdirect_socket *sc = &info->socket;
+ struct smbdirect_recv_io *response;
while ((response = get_receive_buffer(info)))
- mempool_free(response, info->response_mempool);
-
- while ((response = get_empty_queue_buffer(info)))
- mempool_free(response, info->response_mempool);
+ mempool_free(response, sc->recv_io.mem.pool);
}
/* Implement idle connection timer [MS-SMBD] 3.1.6.2 */
@@ -1264,6 +1281,8 @@ static void idle_connection_timer(struct work_struct *work)
struct smbd_connection *info = container_of(
work, struct smbd_connection,
idle_timer_work.work);
+ struct smbdirect_socket *sc = &info->socket;
+ struct smbdirect_socket_parameters *sp = &sc->parameters;
if (info->keep_alive_requested != KEEP_ALIVE_NONE) {
log_keep_alive(ERR,
@@ -1278,7 +1297,7 @@ static void idle_connection_timer(struct work_struct *work)
/* Setup the next idle timeout work */
queue_delayed_work(info->workqueue, &info->idle_timer_work,
- info->keep_alive_interval*HZ);
+ msecs_to_jiffies(sp->keepalive_interval_msec));
}
/*
@@ -1289,26 +1308,31 @@ static void idle_connection_timer(struct work_struct *work)
void smbd_destroy(struct TCP_Server_Info *server)
{
struct smbd_connection *info = server->smbd_conn;
- struct smbd_response *response;
+ struct smbdirect_socket *sc;
+ struct smbdirect_socket_parameters *sp;
+ struct smbdirect_recv_io *response;
unsigned long flags;
if (!info) {
log_rdma_event(INFO, "rdma session already destroyed\n");
return;
}
+ sc = &info->socket;
+ sp = &sc->parameters;
log_rdma_event(INFO, "destroying rdma session\n");
- if (info->transport_status != SMBD_DISCONNECTED) {
- rdma_disconnect(server->smbd_conn->id);
+ if (sc->status != SMBDIRECT_SOCKET_DISCONNECTED) {
+ rdma_disconnect(sc->rdma.cm_id);
log_rdma_event(INFO, "wait for transport being disconnected\n");
wait_event_interruptible(
- info->disconn_wait,
- info->transport_status == SMBD_DISCONNECTED);
+ info->status_wait,
+ sc->status == SMBDIRECT_SOCKET_DISCONNECTED);
}
log_rdma_event(INFO, "destroying qp\n");
- ib_drain_qp(info->id->qp);
- rdma_destroy_qp(info->id);
+ ib_drain_qp(sc->ib.qp);
+ rdma_destroy_qp(sc->rdma.cm_id);
+ sc->ib.qp = NULL;
log_rdma_event(INFO, "cancelling idle timer\n");
cancel_delayed_work_sync(&info->idle_timer_work);
@@ -1320,23 +1344,22 @@ void smbd_destroy(struct TCP_Server_Info *server)
/* It's not possible for upper layer to get to reassembly */
log_rdma_event(INFO, "drain the reassembly queue\n");
do {
- spin_lock_irqsave(&info->reassembly_queue_lock, flags);
+ spin_lock_irqsave(&sc->recv_io.reassembly.lock, flags);
response = _get_first_reassembly(info);
if (response) {
list_del(&response->list);
spin_unlock_irqrestore(
- &info->reassembly_queue_lock, flags);
+ &sc->recv_io.reassembly.lock, flags);
put_receive_buffer(info, response);
} else
spin_unlock_irqrestore(
- &info->reassembly_queue_lock, flags);
+ &sc->recv_io.reassembly.lock, flags);
} while (response);
- info->reassembly_data_length = 0;
+ sc->recv_io.reassembly.data_length = 0;
log_rdma_event(INFO, "free receive buffers\n");
wait_event(info->wait_receive_queues,
- info->count_receive_queue + info->count_empty_packet_queue
- == info->receive_credit_max);
+ info->count_receive_queue == sp->recv_credit_max);
destroy_receive_buffers(info);
/*
@@ -1355,19 +1378,19 @@ void smbd_destroy(struct TCP_Server_Info *server)
}
destroy_mr_list(info);
- ib_free_cq(info->send_cq);
- ib_free_cq(info->recv_cq);
- ib_dealloc_pd(info->pd);
- rdma_destroy_id(info->id);
+ ib_free_cq(sc->ib.send_cq);
+ ib_free_cq(sc->ib.recv_cq);
+ ib_dealloc_pd(sc->ib.pd);
+ rdma_destroy_id(sc->rdma.cm_id);
/* free mempools */
- mempool_destroy(info->request_mempool);
- kmem_cache_destroy(info->request_cache);
+ mempool_destroy(sc->send_io.mem.pool);
+ kmem_cache_destroy(sc->send_io.mem.cache);
- mempool_destroy(info->response_mempool);
- kmem_cache_destroy(info->response_cache);
+ mempool_destroy(sc->recv_io.mem.pool);
+ kmem_cache_destroy(sc->recv_io.mem.cache);
- info->transport_status = SMBD_DESTROYED;
+ sc->status = SMBDIRECT_SOCKET_DESTROYED;
destroy_workqueue(info->workqueue);
log_rdma_event(INFO, "rdma session destroyed\n");
@@ -1392,7 +1415,7 @@ int smbd_reconnect(struct TCP_Server_Info *server)
* This is possible if transport is disconnected and we haven't received
* notification from RDMA, but upper layer has detected timeout
*/
- if (server->smbd_conn->transport_status == SMBD_CONNECTED) {
+ if (server->smbd_conn->socket.status == SMBDIRECT_SOCKET_CONNECTED) {
log_rdma_event(INFO, "disconnecting transport\n");
smbd_destroy(server);
}
@@ -1413,50 +1436,62 @@ create_conn:
static void destroy_caches_and_workqueue(struct smbd_connection *info)
{
+ struct smbdirect_socket *sc = &info->socket;
+
destroy_receive_buffers(info);
destroy_workqueue(info->workqueue);
- mempool_destroy(info->response_mempool);
- kmem_cache_destroy(info->response_cache);
- mempool_destroy(info->request_mempool);
- kmem_cache_destroy(info->request_cache);
+ mempool_destroy(sc->recv_io.mem.pool);
+ kmem_cache_destroy(sc->recv_io.mem.cache);
+ mempool_destroy(sc->send_io.mem.pool);
+ kmem_cache_destroy(sc->send_io.mem.cache);
}
#define MAX_NAME_LEN 80
static int allocate_caches_and_workqueue(struct smbd_connection *info)
{
+ struct smbdirect_socket *sc = &info->socket;
+ struct smbdirect_socket_parameters *sp = &sc->parameters;
char name[MAX_NAME_LEN];
int rc;
- scnprintf(name, MAX_NAME_LEN, "smbd_request_%p", info);
- info->request_cache =
+ if (WARN_ON_ONCE(sp->max_recv_size < sizeof(struct smbdirect_data_transfer)))
+ return -ENOMEM;
+
+ scnprintf(name, MAX_NAME_LEN, "smbdirect_send_io_%p", info);
+ sc->send_io.mem.cache =
kmem_cache_create(
name,
- sizeof(struct smbd_request) +
- sizeof(struct smbd_data_transfer),
+ sizeof(struct smbdirect_send_io) +
+ sizeof(struct smbdirect_data_transfer),
0, SLAB_HWCACHE_ALIGN, NULL);
- if (!info->request_cache)
+ if (!sc->send_io.mem.cache)
return -ENOMEM;
- info->request_mempool =
- mempool_create(info->send_credit_target, mempool_alloc_slab,
- mempool_free_slab, info->request_cache);
- if (!info->request_mempool)
+ sc->send_io.mem.pool =
+ mempool_create(sp->send_credit_target, mempool_alloc_slab,
+ mempool_free_slab, sc->send_io.mem.cache);
+ if (!sc->send_io.mem.pool)
goto out1;
- scnprintf(name, MAX_NAME_LEN, "smbd_response_%p", info);
- info->response_cache =
- kmem_cache_create(
- name,
- sizeof(struct smbd_response) +
- info->max_receive_size,
- 0, SLAB_HWCACHE_ALIGN, NULL);
- if (!info->response_cache)
+ scnprintf(name, MAX_NAME_LEN, "smbdirect_recv_io_%p", info);
+
+ struct kmem_cache_args response_args = {
+ .align = __alignof__(struct smbdirect_recv_io),
+ .useroffset = (offsetof(struct smbdirect_recv_io, packet) +
+ sizeof(struct smbdirect_data_transfer)),
+ .usersize = sp->max_recv_size - sizeof(struct smbdirect_data_transfer),
+ };
+ sc->recv_io.mem.cache =
+ kmem_cache_create(name,
+ sizeof(struct smbdirect_recv_io) + sp->max_recv_size,
+ &response_args, SLAB_HWCACHE_ALIGN);
+ if (!sc->recv_io.mem.cache)
goto out2;
- info->response_mempool =
- mempool_create(info->receive_credit_max, mempool_alloc_slab,
- mempool_free_slab, info->response_cache);
- if (!info->response_mempool)
+ sc->recv_io.mem.pool =
+ mempool_create(sp->recv_credit_max, mempool_alloc_slab,
+ mempool_free_slab, sc->recv_io.mem.cache);
+ if (!sc->recv_io.mem.pool)
goto out3;
scnprintf(name, MAX_NAME_LEN, "smbd_%p", info);
@@ -1464,7 +1499,7 @@ static int allocate_caches_and_workqueue(struct smbd_connection *info)
if (!info->workqueue)
goto out4;
- rc = allocate_receive_buffers(info, info->receive_credit_max);
+ rc = allocate_receive_buffers(info, sp->recv_credit_max);
if (rc) {
log_rdma_event(ERR, "failed to allocate receive buffers\n");
goto out5;
@@ -1475,13 +1510,13 @@ static int allocate_caches_and_workqueue(struct smbd_connection *info)
out5:
destroy_workqueue(info->workqueue);
out4:
- mempool_destroy(info->response_mempool);
+ mempool_destroy(sc->recv_io.mem.pool);
out3:
- kmem_cache_destroy(info->response_cache);
+ kmem_cache_destroy(sc->recv_io.mem.cache);
out2:
- mempool_destroy(info->request_mempool);
+ mempool_destroy(sc->send_io.mem.pool);
out1:
- kmem_cache_destroy(info->request_cache);
+ kmem_cache_destroy(sc->send_io.mem.cache);
return -ENOMEM;
}
@@ -1491,6 +1526,8 @@ static struct smbd_connection *_smbd_get_connection(
{
int rc;
struct smbd_connection *info;
+ struct smbdirect_socket *sc;
+ struct smbdirect_socket_parameters *sp;
struct rdma_conn_param conn_param;
struct ib_qp_init_attr qp_attr;
struct sockaddr_in *addr_in = (struct sockaddr_in *) dstaddr;
@@ -1500,101 +1537,102 @@ static struct smbd_connection *_smbd_get_connection(
info = kzalloc(sizeof(struct smbd_connection), GFP_KERNEL);
if (!info)
return NULL;
+ sc = &info->socket;
+ sp = &sc->parameters;
- info->transport_status = SMBD_CONNECTING;
+ sc->status = SMBDIRECT_SOCKET_CONNECTING;
rc = smbd_ia_open(info, dstaddr, port);
if (rc) {
log_rdma_event(INFO, "smbd_ia_open rc=%d\n", rc);
goto create_id_failed;
}
- if (smbd_send_credit_target > info->id->device->attrs.max_cqe ||
- smbd_send_credit_target > info->id->device->attrs.max_qp_wr) {
+ if (smbd_send_credit_target > sc->ib.dev->attrs.max_cqe ||
+ smbd_send_credit_target > sc->ib.dev->attrs.max_qp_wr) {
log_rdma_event(ERR, "consider lowering send_credit_target = %d. Possible CQE overrun, device reporting max_cqe %d max_qp_wr %d\n",
smbd_send_credit_target,
- info->id->device->attrs.max_cqe,
- info->id->device->attrs.max_qp_wr);
+ sc->ib.dev->attrs.max_cqe,
+ sc->ib.dev->attrs.max_qp_wr);
goto config_failed;
}
- if (smbd_receive_credit_max > info->id->device->attrs.max_cqe ||
- smbd_receive_credit_max > info->id->device->attrs.max_qp_wr) {
+ if (smbd_receive_credit_max > sc->ib.dev->attrs.max_cqe ||
+ smbd_receive_credit_max > sc->ib.dev->attrs.max_qp_wr) {
log_rdma_event(ERR, "consider lowering receive_credit_max = %d. Possible CQE overrun, device reporting max_cqe %d max_qp_wr %d\n",
smbd_receive_credit_max,
- info->id->device->attrs.max_cqe,
- info->id->device->attrs.max_qp_wr);
+ sc->ib.dev->attrs.max_cqe,
+ sc->ib.dev->attrs.max_qp_wr);
goto config_failed;
}
- info->receive_credit_max = smbd_receive_credit_max;
- info->send_credit_target = smbd_send_credit_target;
- info->max_send_size = smbd_max_send_size;
- info->max_fragmented_recv_size = smbd_max_fragmented_recv_size;
- info->max_receive_size = smbd_max_receive_size;
- info->keep_alive_interval = smbd_keep_alive_interval;
+ sp->recv_credit_max = smbd_receive_credit_max;
+ sp->send_credit_target = smbd_send_credit_target;
+ sp->max_send_size = smbd_max_send_size;
+ sp->max_fragmented_recv_size = smbd_max_fragmented_recv_size;
+ sp->max_recv_size = smbd_max_receive_size;
+ sp->keepalive_interval_msec = smbd_keep_alive_interval * 1000;
- if (info->id->device->attrs.max_send_sge < SMBDIRECT_MAX_SEND_SGE ||
- info->id->device->attrs.max_recv_sge < SMBDIRECT_MAX_RECV_SGE) {
+ if (sc->ib.dev->attrs.max_send_sge < SMBDIRECT_SEND_IO_MAX_SGE ||
+ sc->ib.dev->attrs.max_recv_sge < SMBDIRECT_RECV_IO_MAX_SGE) {
log_rdma_event(ERR,
"device %.*s max_send_sge/max_recv_sge = %d/%d too small\n",
IB_DEVICE_NAME_MAX,
- info->id->device->name,
- info->id->device->attrs.max_send_sge,
- info->id->device->attrs.max_recv_sge);
+ sc->ib.dev->name,
+ sc->ib.dev->attrs.max_send_sge,
+ sc->ib.dev->attrs.max_recv_sge);
goto config_failed;
}
- info->send_cq = NULL;
- info->recv_cq = NULL;
- info->send_cq =
- ib_alloc_cq_any(info->id->device, info,
- info->send_credit_target, IB_POLL_SOFTIRQ);
- if (IS_ERR(info->send_cq)) {
- info->send_cq = NULL;
+ sc->ib.send_cq =
+ ib_alloc_cq_any(sc->ib.dev, info,
+ sp->send_credit_target, IB_POLL_SOFTIRQ);
+ if (IS_ERR(sc->ib.send_cq)) {
+ sc->ib.send_cq = NULL;
goto alloc_cq_failed;
}
- info->recv_cq =
- ib_alloc_cq_any(info->id->device, info,
- info->receive_credit_max, IB_POLL_SOFTIRQ);
- if (IS_ERR(info->recv_cq)) {
- info->recv_cq = NULL;
+ sc->ib.recv_cq =
+ ib_alloc_cq_any(sc->ib.dev, info,
+ sp->recv_credit_max, IB_POLL_SOFTIRQ);
+ if (IS_ERR(sc->ib.recv_cq)) {
+ sc->ib.recv_cq = NULL;
goto alloc_cq_failed;
}
memset(&qp_attr, 0, sizeof(qp_attr));
qp_attr.event_handler = smbd_qp_async_error_upcall;
qp_attr.qp_context = info;
- qp_attr.cap.max_send_wr = info->send_credit_target;
- qp_attr.cap.max_recv_wr = info->receive_credit_max;
- qp_attr.cap.max_send_sge = SMBDIRECT_MAX_SEND_SGE;
- qp_attr.cap.max_recv_sge = SMBDIRECT_MAX_RECV_SGE;
+ qp_attr.cap.max_send_wr = sp->send_credit_target;
+ qp_attr.cap.max_recv_wr = sp->recv_credit_max;
+ qp_attr.cap.max_send_sge = SMBDIRECT_SEND_IO_MAX_SGE;
+ qp_attr.cap.max_recv_sge = SMBDIRECT_RECV_IO_MAX_SGE;
qp_attr.cap.max_inline_data = 0;
qp_attr.sq_sig_type = IB_SIGNAL_REQ_WR;
qp_attr.qp_type = IB_QPT_RC;
- qp_attr.send_cq = info->send_cq;
- qp_attr.recv_cq = info->recv_cq;
+ qp_attr.send_cq = sc->ib.send_cq;
+ qp_attr.recv_cq = sc->ib.recv_cq;
qp_attr.port_num = ~0;
- rc = rdma_create_qp(info->id, info->pd, &qp_attr);
+ rc = rdma_create_qp(sc->rdma.cm_id, sc->ib.pd, &qp_attr);
if (rc) {
log_rdma_event(ERR, "rdma_create_qp failed %i\n", rc);
goto create_qp_failed;
}
+ sc->ib.qp = sc->rdma.cm_id->qp;
memset(&conn_param, 0, sizeof(conn_param));
conn_param.initiator_depth = 0;
conn_param.responder_resources =
- min(info->id->device->attrs.max_qp_rd_atom,
+ min(sc->ib.dev->attrs.max_qp_rd_atom,
SMBD_CM_RESPONDER_RESOURCES);
info->responder_resources = conn_param.responder_resources;
log_rdma_mr(INFO, "responder_resources=%d\n",
info->responder_resources);
/* Need to send IRD/ORD in private data for iWARP */
- info->id->device->ops.get_port_immutable(
- info->id->device, info->id->port_num, &port_immutable);
+ sc->ib.dev->ops.get_port_immutable(
+ sc->ib.dev, sc->rdma.cm_id->port_num, &port_immutable);
if (port_immutable.core_cap_flags & RDMA_CORE_PORT_IWARP) {
ird_ord_hdr[0] = info->responder_resources;
ird_ord_hdr[1] = 1;
@@ -1612,19 +1650,20 @@ static struct smbd_connection *_smbd_get_connection(
log_rdma_event(INFO, "connecting to IP %pI4 port %d\n",
&addr_in->sin_addr, port);
- init_waitqueue_head(&info->conn_wait);
- init_waitqueue_head(&info->disconn_wait);
- init_waitqueue_head(&info->wait_reassembly_queue);
- rc = rdma_connect(info->id, &conn_param);
+ init_waitqueue_head(&info->status_wait);
+ init_waitqueue_head(&sc->recv_io.reassembly.wait_queue);
+ rc = rdma_connect(sc->rdma.cm_id, &conn_param);
if (rc) {
log_rdma_event(ERR, "rdma_connect() failed with %i\n", rc);
goto rdma_connect_failed;
}
- wait_event_interruptible(
- info->conn_wait, info->transport_status != SMBD_CONNECTING);
+ wait_event_interruptible_timeout(
+ info->status_wait,
+ sc->status != SMBDIRECT_SOCKET_CONNECTING,
+ msecs_to_jiffies(RDMA_RESOLVE_TIMEOUT));
- if (info->transport_status != SMBD_CONNECTED) {
+ if (sc->status != SMBDIRECT_SOCKET_CONNECTED) {
log_rdma_event(ERR, "rdma_connect failed port=%d\n", port);
goto rdma_connect_failed;
}
@@ -1640,7 +1679,7 @@ static struct smbd_connection *_smbd_get_connection(
init_waitqueue_head(&info->wait_send_queue);
INIT_DELAYED_WORK(&info->idle_timer_work, idle_connection_timer);
queue_delayed_work(info->workqueue, &info->idle_timer_work,
- info->keep_alive_interval*HZ);
+ msecs_to_jiffies(sp->keepalive_interval_msec));
init_waitqueue_head(&info->wait_send_pending);
atomic_set(&info->send_pending, 0);
@@ -1675,26 +1714,25 @@ allocate_mr_failed:
negotiation_failed:
cancel_delayed_work_sync(&info->idle_timer_work);
destroy_caches_and_workqueue(info);
- info->transport_status = SMBD_NEGOTIATE_FAILED;
- init_waitqueue_head(&info->conn_wait);
- rdma_disconnect(info->id);
- wait_event(info->conn_wait,
- info->transport_status == SMBD_DISCONNECTED);
+ sc->status = SMBDIRECT_SOCKET_NEGOTIATE_FAILED;
+ rdma_disconnect(sc->rdma.cm_id);
+ wait_event(info->status_wait,
+ sc->status == SMBDIRECT_SOCKET_DISCONNECTED);
allocate_cache_failed:
rdma_connect_failed:
- rdma_destroy_qp(info->id);
+ rdma_destroy_qp(sc->rdma.cm_id);
create_qp_failed:
alloc_cq_failed:
- if (info->send_cq)
- ib_free_cq(info->send_cq);
- if (info->recv_cq)
- ib_free_cq(info->recv_cq);
+ if (sc->ib.send_cq)
+ ib_free_cq(sc->ib.send_cq);
+ if (sc->ib.recv_cq)
+ ib_free_cq(sc->ib.recv_cq);
config_failed:
- ib_dealloc_pd(info->pd);
- rdma_destroy_id(info->id);
+ ib_dealloc_pd(sc->ib.pd);
+ rdma_destroy_id(sc->rdma.cm_id);
create_id_failed:
kfree(info);
@@ -1719,36 +1757,41 @@ try_again:
}
/*
- * Receive data from receive reassembly queue
+ * Receive data from the transport's receive reassembly queue
* All the incoming data packets are placed in reassembly queue
- * buf: the buffer to read data into
+ * iter: the buffer to read data into
* size: the length of data to read
* return value: actual data read
- * Note: this implementation copies the data from reassebmly queue to receive
+ *
+ * Note: this implementation copies the data from reassembly queue to receive
* buffers used by upper layer. This is not the optimal code path. A better way
* to do it is to not have upper layer allocate its receive buffers but rather
* borrow the buffer from reassembly queue, and return it after data is
* consumed. But this will require more changes to upper layer code, and also
* need to consider packet boundaries while they still being reassembled.
*/
-static int smbd_recv_buf(struct smbd_connection *info, char *buf,
- unsigned int size)
+int smbd_recv(struct smbd_connection *info, struct msghdr *msg)
{
- struct smbd_response *response;
- struct smbd_data_transfer *data_transfer;
+ struct smbdirect_socket *sc = &info->socket;
+ struct smbdirect_recv_io *response;
+ struct smbdirect_data_transfer *data_transfer;
+ size_t size = iov_iter_count(&msg->msg_iter);
int to_copy, to_read, data_read, offset;
u32 data_length, remaining_data_length, data_offset;
int rc;
+ if (WARN_ON_ONCE(iov_iter_rw(&msg->msg_iter) == WRITE))
+ return -EINVAL; /* It's a bug in upper layer to get there */
+
again:
/*
* No need to hold the reassembly queue lock all the time as we are
* the only one reading from the front of the queue. The transport
* may add more entries to the back of the queue at the same time
*/
- log_read(INFO, "size=%d info->reassembly_data_length=%d\n", size,
- info->reassembly_data_length);
- if (info->reassembly_data_length >= size) {
+ log_read(INFO, "size=%zd sc->recv_io.reassembly.data_length=%d\n", size,
+ sc->recv_io.reassembly.data_length);
+ if (sc->recv_io.reassembly.data_length >= size) {
int queue_length;
int queue_removed = 0;
@@ -1760,13 +1803,13 @@ again:
* updated in SOFTIRQ as more data is received
*/
virt_rmb();
- queue_length = info->reassembly_queue_length;
+ queue_length = sc->recv_io.reassembly.queue_length;
data_read = 0;
to_read = size;
- offset = info->first_entry_offset;
+ offset = sc->recv_io.reassembly.first_entry_offset;
while (data_read < size) {
response = _get_first_reassembly(info);
- data_transfer = smbd_response_payload(response);
+ data_transfer = smbdirect_recv_io_payload(response);
data_length = le32_to_cpu(data_transfer->data_length);
remaining_data_length =
le32_to_cpu(
@@ -1784,7 +1827,10 @@ again:
if (response->first_segment && size == 4) {
unsigned int rfc1002_len =
data_length + remaining_data_length;
- *((__be32 *)buf) = cpu_to_be32(rfc1002_len);
+ __be32 rfc1002_hdr = cpu_to_be32(rfc1002_len);
+ if (copy_to_iter(&rfc1002_hdr, sizeof(rfc1002_hdr),
+ &msg->msg_iter) != sizeof(rfc1002_hdr))
+ return -EFAULT;
data_read = 4;
response->first_segment = false;
log_read(INFO, "returning rfc1002 length %d\n",
@@ -1793,10 +1839,9 @@ again:
}
to_copy = min_t(int, data_length - offset, to_read);
- memcpy(
- buf + data_read,
- (char *)data_transfer + data_offset + offset,
- to_copy);
+ if (copy_to_iter((char *)data_transfer + data_offset + offset,
+ to_copy, &msg->msg_iter) != to_copy)
+ return -EFAULT;
/* move on to the next buffer? */
if (to_copy == data_length - offset) {
@@ -1809,10 +1854,10 @@ again:
list_del(&response->list);
else {
spin_lock_irq(
- &info->reassembly_queue_lock);
+ &sc->recv_io.reassembly.lock);
list_del(&response->list);
spin_unlock_irq(
- &info->reassembly_queue_lock);
+ &sc->recv_io.reassembly.lock);
}
queue_removed++;
info->count_reassembly_queue--;
@@ -1831,29 +1876,29 @@ again:
to_read, data_read, offset);
}
- spin_lock_irq(&info->reassembly_queue_lock);
- info->reassembly_data_length -= data_read;
- info->reassembly_queue_length -= queue_removed;
- spin_unlock_irq(&info->reassembly_queue_lock);
+ spin_lock_irq(&sc->recv_io.reassembly.lock);
+ sc->recv_io.reassembly.data_length -= data_read;
+ sc->recv_io.reassembly.queue_length -= queue_removed;
+ spin_unlock_irq(&sc->recv_io.reassembly.lock);
- info->first_entry_offset = offset;
+ sc->recv_io.reassembly.first_entry_offset = offset;
log_read(INFO, "returning to thread data_read=%d reassembly_data_length=%d first_entry_offset=%d\n",
- data_read, info->reassembly_data_length,
- info->first_entry_offset);
+ data_read, sc->recv_io.reassembly.data_length,
+ sc->recv_io.reassembly.first_entry_offset);
read_rfc1002_done:
return data_read;
}
log_read(INFO, "wait_event on more data\n");
rc = wait_event_interruptible(
- info->wait_reassembly_queue,
- info->reassembly_data_length >= size ||
- info->transport_status != SMBD_CONNECTED);
+ sc->recv_io.reassembly.wait_queue,
+ sc->recv_io.reassembly.data_length >= size ||
+ sc->status != SMBDIRECT_SOCKET_CONNECTED);
/* Don't return any data if interrupted */
if (rc)
return rc;
- if (info->transport_status != SMBD_CONNECTED) {
+ if (sc->status != SMBDIRECT_SOCKET_CONNECTED) {
log_read(ERR, "disconnected\n");
return -ECONNABORTED;
}
@@ -1862,89 +1907,6 @@ read_rfc1002_done:
}
/*
- * Receive a page from receive reassembly queue
- * page: the page to read data into
- * to_read: the length of data to read
- * return value: actual data read
- */
-static int smbd_recv_page(struct smbd_connection *info,
- struct page *page, unsigned int page_offset,
- unsigned int to_read)
-{
- int ret;
- char *to_address;
- void *page_address;
-
- /* make sure we have the page ready for read */
- ret = wait_event_interruptible(
- info->wait_reassembly_queue,
- info->reassembly_data_length >= to_read ||
- info->transport_status != SMBD_CONNECTED);
- if (ret)
- return ret;
-
- /* now we can read from reassembly queue and not sleep */
- page_address = kmap_atomic(page);
- to_address = (char *) page_address + page_offset;
-
- log_read(INFO, "reading from page=%p address=%p to_read=%d\n",
- page, to_address, to_read);
-
- ret = smbd_recv_buf(info, to_address, to_read);
- kunmap_atomic(page_address);
-
- return ret;
-}
-
-/*
- * Receive data from transport
- * msg: a msghdr point to the buffer, can be ITER_KVEC or ITER_BVEC
- * return: total bytes read, or 0. SMB Direct will not do partial read.
- */
-int smbd_recv(struct smbd_connection *info, struct msghdr *msg)
-{
- char *buf;
- struct page *page;
- unsigned int to_read, page_offset;
- int rc;
-
- if (iov_iter_rw(&msg->msg_iter) == WRITE) {
- /* It's a bug in upper layer to get there */
- cifs_dbg(VFS, "Invalid msg iter dir %u\n",
- iov_iter_rw(&msg->msg_iter));
- rc = -EINVAL;
- goto out;
- }
-
- switch (iov_iter_type(&msg->msg_iter)) {
- case ITER_KVEC:
- buf = msg->msg_iter.kvec->iov_base;
- to_read = msg->msg_iter.kvec->iov_len;
- rc = smbd_recv_buf(info, buf, to_read);
- break;
-
- case ITER_BVEC:
- page = msg->msg_iter.bvec->bv_page;
- page_offset = msg->msg_iter.bvec->bv_offset;
- to_read = msg->msg_iter.bvec->bv_len;
- rc = smbd_recv_page(info, page, page_offset, to_read);
- break;
-
- default:
- /* It's a bug in upper layer to get there */
- cifs_dbg(VFS, "Invalid msg type %d\n",
- iov_iter_type(&msg->msg_iter));
- rc = -EINVAL;
- }
-
-out:
- /* SMBDirect will read it all or nothing */
- if (rc > 0)
- msg->msg_iter.count = 0;
- return rc;
-}
-
-/*
* Send data to transport
* Each rqst is transported as a SMBDirect payload
* rqst: the data to write
@@ -1954,12 +1916,14 @@ int smbd_send(struct TCP_Server_Info *server,
int num_rqst, struct smb_rqst *rqst_array)
{
struct smbd_connection *info = server->smbd_conn;
+ struct smbdirect_socket *sc = &info->socket;
+ struct smbdirect_socket_parameters *sp = &sc->parameters;
struct smb_rqst *rqst;
struct iov_iter iter;
unsigned int remaining_data_length, klen;
int rc, i, rqst_idx;
- if (info->transport_status != SMBD_CONNECTED)
+ if (sc->status != SMBDIRECT_SOCKET_CONNECTED)
return -EAGAIN;
/*
@@ -1971,10 +1935,10 @@ int smbd_send(struct TCP_Server_Info *server,
for (i = 0; i < num_rqst; i++)
remaining_data_length += smb_rqst_len(server, &rqst_array[i]);
- if (unlikely(remaining_data_length > info->max_fragmented_send_size)) {
+ if (unlikely(remaining_data_length > sp->max_fragmented_send_size)) {
/* assertion: payload never exceeds negotiated maximum */
log_write(ERR, "payload size %d > max size %d\n",
- remaining_data_length, info->max_fragmented_send_size);
+ remaining_data_length, sp->max_fragmented_send_size);
return -EINVAL;
}
@@ -2000,14 +1964,14 @@ int smbd_send(struct TCP_Server_Info *server,
klen += rqst->rq_iov[i].iov_len;
iov_iter_kvec(&iter, ITER_SOURCE, rqst->rq_iov, rqst->rq_nvec, klen);
- rc = smbd_post_send_iter(info, &iter, &remaining_data_length);
+ rc = smbd_post_send_full_iter(info, &iter, &remaining_data_length);
if (rc < 0)
break;
if (iov_iter_count(&rqst->rq_iter) > 0) {
/* And then the data pages if there are any */
- rc = smbd_post_send_iter(info, &rqst->rq_iter,
- &remaining_data_length);
+ rc = smbd_post_send_full_iter(info, &rqst->rq_iter,
+ &remaining_data_length);
if (rc < 0)
break;
}
@@ -2053,6 +2017,7 @@ static void smbd_mr_recovery_work(struct work_struct *work)
{
struct smbd_connection *info =
container_of(work, struct smbd_connection, mr_recovery_work);
+ struct smbdirect_socket *sc = &info->socket;
struct smbd_mr *smbdirect_mr;
int rc;
@@ -2070,7 +2035,7 @@ static void smbd_mr_recovery_work(struct work_struct *work)
}
smbdirect_mr->mr = ib_alloc_mr(
- info->pd, info->mr_type,
+ sc->ib.pd, info->mr_type,
info->max_frmr_depth);
if (IS_ERR(smbdirect_mr->mr)) {
log_rdma_mr(ERR, "ib_alloc_mr failed mr_type=%x max_frmr_depth=%x\n",
@@ -2099,12 +2064,13 @@ static void smbd_mr_recovery_work(struct work_struct *work)
static void destroy_mr_list(struct smbd_connection *info)
{
+ struct smbdirect_socket *sc = &info->socket;
struct smbd_mr *mr, *tmp;
cancel_work_sync(&info->mr_recovery_work);
list_for_each_entry_safe(mr, tmp, &info->mr_list, list) {
if (mr->state == MR_INVALIDATED)
- ib_dma_unmap_sg(info->id->device, mr->sgt.sgl,
+ ib_dma_unmap_sg(sc->ib.dev, mr->sgt.sgl,
mr->sgt.nents, mr->dir);
ib_dereg_mr(mr->mr);
kfree(mr->sgt.sgl);
@@ -2121,6 +2087,7 @@ static void destroy_mr_list(struct smbd_connection *info)
*/
static int allocate_mr_list(struct smbd_connection *info)
{
+ struct smbdirect_socket *sc = &info->socket;
int i;
struct smbd_mr *smbdirect_mr, *tmp;
@@ -2136,7 +2103,7 @@ static int allocate_mr_list(struct smbd_connection *info)
smbdirect_mr = kzalloc(sizeof(*smbdirect_mr), GFP_KERNEL);
if (!smbdirect_mr)
goto cleanup_entries;
- smbdirect_mr->mr = ib_alloc_mr(info->pd, info->mr_type,
+ smbdirect_mr->mr = ib_alloc_mr(sc->ib.pd, info->mr_type,
info->max_frmr_depth);
if (IS_ERR(smbdirect_mr->mr)) {
log_rdma_mr(ERR, "ib_alloc_mr failed mr_type=%x max_frmr_depth=%x\n",
@@ -2181,20 +2148,20 @@ cleanup_entries:
*/
static struct smbd_mr *get_mr(struct smbd_connection *info)
{
+ struct smbdirect_socket *sc = &info->socket;
struct smbd_mr *ret;
int rc;
again:
rc = wait_event_interruptible(info->wait_mr,
atomic_read(&info->mr_ready_count) ||
- info->transport_status != SMBD_CONNECTED);
+ sc->status != SMBDIRECT_SOCKET_CONNECTED);
if (rc) {
log_rdma_mr(ERR, "wait_event_interruptible rc=%x\n", rc);
return NULL;
}
- if (info->transport_status != SMBD_CONNECTED) {
- log_rdma_mr(ERR, "info->transport_status=%x\n",
- info->transport_status);
+ if (sc->status != SMBDIRECT_SOCKET_CONNECTED) {
+ log_rdma_mr(ERR, "sc->status=%x\n", sc->status);
return NULL;
}
@@ -2247,6 +2214,7 @@ struct smbd_mr *smbd_register_mr(struct smbd_connection *info,
struct iov_iter *iter,
bool writing, bool need_invalidate)
{
+ struct smbdirect_socket *sc = &info->socket;
struct smbd_mr *smbdirect_mr;
int rc, num_pages;
enum dma_data_direction dir;
@@ -2276,7 +2244,7 @@ struct smbd_mr *smbd_register_mr(struct smbd_connection *info,
num_pages, iov_iter_count(iter), info->max_frmr_depth);
smbd_iter_to_mr(info, iter, &smbdirect_mr->sgt, info->max_frmr_depth);
- rc = ib_dma_map_sg(info->id->device, smbdirect_mr->sgt.sgl,
+ rc = ib_dma_map_sg(sc->ib.dev, smbdirect_mr->sgt.sgl,
smbdirect_mr->sgt.nents, dir);
if (!rc) {
log_rdma_mr(ERR, "ib_dma_map_sg num_pages=%x dir=%x rc=%x\n",
@@ -2312,7 +2280,7 @@ struct smbd_mr *smbd_register_mr(struct smbd_connection *info,
* on IB_WR_REG_MR. Hardware enforces a barrier and order of execution
* on the next ib_post_send when we actually send I/O to remote peer
*/
- rc = ib_post_send(info->id->qp, &reg_wr->wr, NULL);
+ rc = ib_post_send(sc->ib.qp, &reg_wr->wr, NULL);
if (!rc)
return smbdirect_mr;
@@ -2321,7 +2289,7 @@ struct smbd_mr *smbd_register_mr(struct smbd_connection *info,
/* If all failed, attempt to recover this MR by setting it MR_ERROR*/
map_mr_error:
- ib_dma_unmap_sg(info->id->device, smbdirect_mr->sgt.sgl,
+ ib_dma_unmap_sg(sc->ib.dev, smbdirect_mr->sgt.sgl,
smbdirect_mr->sgt.nents, smbdirect_mr->dir);
dma_map_error:
@@ -2359,6 +2327,7 @@ int smbd_deregister_mr(struct smbd_mr *smbdirect_mr)
{
struct ib_send_wr *wr;
struct smbd_connection *info = smbdirect_mr->conn;
+ struct smbdirect_socket *sc = &info->socket;
int rc = 0;
if (smbdirect_mr->need_invalidate) {
@@ -2372,7 +2341,7 @@ int smbd_deregister_mr(struct smbd_mr *smbdirect_mr)
wr->send_flags = IB_SEND_SIGNALED;
init_completion(&smbdirect_mr->invalidate_done);
- rc = ib_post_send(info->id->qp, wr, NULL);
+ rc = ib_post_send(sc->ib.qp, wr, NULL);
if (rc) {
log_rdma_mr(ERR, "ib_post_send failed rc=%x\n", rc);
smbd_disconnect_rdma_connection(info);
@@ -2389,7 +2358,7 @@ int smbd_deregister_mr(struct smbd_mr *smbdirect_mr)
if (smbdirect_mr->state == MR_INVALIDATED) {
ib_dma_unmap_sg(
- info->id->device, smbdirect_mr->sgt.sgl,
+ sc->ib.dev, smbdirect_mr->sgt.sgl,
smbdirect_mr->sgt.nents,
smbdirect_mr->dir);
smbdirect_mr->state = MR_READY;
@@ -2552,13 +2521,14 @@ static ssize_t smb_extract_folioq_to_rdma(struct iov_iter *iter,
size_t fsize = folioq_folio_size(folioq, slot);
if (offset < fsize) {
- size_t part = umin(maxsize - ret, fsize - offset);
+ size_t part = umin(maxsize, fsize - offset);
if (!smb_set_sge(rdma, folio_page(folio, 0), offset, part))
return -EIO;
offset += part;
ret += part;
+ maxsize -= part;
}
if (offset >= fsize) {
@@ -2573,7 +2543,7 @@ static ssize_t smb_extract_folioq_to_rdma(struct iov_iter *iter,
slot = 0;
}
}
- } while (rdma->nr_sge < rdma->max_sge || maxsize > 0);
+ } while (rdma->nr_sge < rdma->max_sge && maxsize > 0);
iter->folioq = folioq;
iter->folioq_slot = slot;
diff --git a/fs/smb/client/smbdirect.h b/fs/smb/client/smbdirect.h
index c08e3665150d..e45aa9ddd71d 100644
--- a/fs/smb/client/smbdirect.h
+++ b/fs/smb/client/smbdirect.h
@@ -15,6 +15,9 @@
#include <rdma/rdma_cm.h>
#include <linux/mempool.h>
+#include "../common/smbdirect/smbdirect.h"
+#include "../common/smbdirect/smbdirect_socket.h"
+
extern int rdma_readwrite_threshold;
extern int smbd_max_frmr_depth;
extern int smbd_keep_alive_interval;
@@ -30,16 +33,6 @@ enum keep_alive_status {
KEEP_ALIVE_SENT,
};
-enum smbd_connection_status {
- SMBD_CREATED,
- SMBD_CONNECTING,
- SMBD_CONNECTED,
- SMBD_NEGOTIATE_FAILED,
- SMBD_DISCONNECTING,
- SMBD_DISCONNECTED,
- SMBD_DESTROYED
-};
-
/*
* The context for the SMBDirect transport
* Everything related to the transport is here. It has several logical parts
@@ -50,18 +43,11 @@ enum smbd_connection_status {
* 5. mempools for allocating packets
*/
struct smbd_connection {
- enum smbd_connection_status transport_status;
+ struct smbdirect_socket socket;
- /* RDMA related */
- struct rdma_cm_id *id;
- struct ib_qp_init_attr qp_attr;
- struct ib_pd *pd;
- struct ib_cq *send_cq, *recv_cq;
- struct ib_device_attr dev_attr;
int ri_rc;
struct completion ri_done;
- wait_queue_head_t conn_wait;
- wait_queue_head_t disconn_wait;
+ wait_queue_head_t status_wait;
struct completion negotiate_completion;
bool negotiate_done;
@@ -72,21 +58,12 @@ struct smbd_connection {
spinlock_t lock_new_credits_offered;
int new_credits_offered;
- /* Connection parameters defined in [MS-SMBD] 3.1.1.1 */
- int receive_credit_max;
- int send_credit_target;
- int max_send_size;
- int max_fragmented_recv_size;
- int max_fragmented_send_size;
- int max_receive_size;
- int keep_alive_interval;
- int max_readwrite_size;
+ /* dynamic connection parameters defined in [MS-SMBD] 3.1.1.1 */
enum keep_alive_status keep_alive_requested;
int protocol;
atomic_t send_credits;
atomic_t receive_credits;
int receive_credit_target;
- int fragment_reassembly_remaining;
/* Memory registrations */
/* Maximum number of RDMA read/write outstanding on this connection */
@@ -117,52 +94,16 @@ struct smbd_connection {
wait_queue_head_t wait_post_send;
/* Receive queue */
- struct list_head receive_queue;
int count_receive_queue;
- spinlock_t receive_queue_lock;
-
- struct list_head empty_packet_queue;
- int count_empty_packet_queue;
- spinlock_t empty_packet_queue_lock;
-
wait_queue_head_t wait_receive_queues;
- /* Reassembly queue */
- struct list_head reassembly_queue;
- spinlock_t reassembly_queue_lock;
- wait_queue_head_t wait_reassembly_queue;
-
- /* total data length of reassembly queue */
- int reassembly_data_length;
- int reassembly_queue_length;
- /* the offset to first buffer in reassembly queue */
- int first_entry_offset;
-
bool send_immediate;
wait_queue_head_t wait_send_queue;
- /*
- * Indicate if we have received a full packet on the connection
- * This is used to identify the first SMBD packet of a assembled
- * payload (SMB packet) in reassembly queue so we can return a
- * RFC1002 length to upper layer to indicate the length of the SMB
- * packet received
- */
- bool full_packet_received;
-
struct workqueue_struct *workqueue;
struct delayed_work idle_timer_work;
- /* Memory pool for preallocating buffers */
- /* request pool for RDMA send */
- struct kmem_cache *request_cache;
- mempool_t *request_mempool;
-
- /* response pool for RDMA receive */
- struct kmem_cache *response_cache;
- mempool_t *response_mempool;
-
/* for debug purposes */
unsigned int count_get_receive_buffer;
unsigned int count_put_receive_buffer;
@@ -172,96 +113,6 @@ struct smbd_connection {
unsigned int count_send_empty;
};
-enum smbd_message_type {
- SMBD_NEGOTIATE_RESP,
- SMBD_TRANSFER_DATA,
-};
-
-#define SMB_DIRECT_RESPONSE_REQUESTED 0x0001
-
-/* SMBD negotiation request packet [MS-SMBD] 2.2.1 */
-struct smbd_negotiate_req {
- __le16 min_version;
- __le16 max_version;
- __le16 reserved;
- __le16 credits_requested;
- __le32 preferred_send_size;
- __le32 max_receive_size;
- __le32 max_fragmented_size;
-} __packed;
-
-/* SMBD negotiation response packet [MS-SMBD] 2.2.2 */
-struct smbd_negotiate_resp {
- __le16 min_version;
- __le16 max_version;
- __le16 negotiated_version;
- __le16 reserved;
- __le16 credits_requested;
- __le16 credits_granted;
- __le32 status;
- __le32 max_readwrite_size;
- __le32 preferred_send_size;
- __le32 max_receive_size;
- __le32 max_fragmented_size;
-} __packed;
-
-/* SMBD data transfer packet with payload [MS-SMBD] 2.2.3 */
-struct smbd_data_transfer {
- __le16 credits_requested;
- __le16 credits_granted;
- __le16 flags;
- __le16 reserved;
- __le32 remaining_data_length;
- __le32 data_offset;
- __le32 data_length;
- __le32 padding;
- __u8 buffer[];
-} __packed;
-
-/* The packet fields for a registered RDMA buffer */
-struct smbd_buffer_descriptor_v1 {
- __le64 offset;
- __le32 token;
- __le32 length;
-} __packed;
-
-/* Maximum number of SGEs used by smbdirect.c in any send work request */
-#define SMBDIRECT_MAX_SEND_SGE 6
-
-/* The context for a SMBD request */
-struct smbd_request {
- struct smbd_connection *info;
- struct ib_cqe cqe;
-
- /* the SGE entries for this work request */
- struct ib_sge sge[SMBDIRECT_MAX_SEND_SGE];
- int num_sge;
-
- /* SMBD packet header follows this structure */
- u8 packet[];
-};
-
-/* Maximum number of SGEs used by smbdirect.c in any receive work request */
-#define SMBDIRECT_MAX_RECV_SGE 1
-
-/* The context for a SMBD response */
-struct smbd_response {
- struct smbd_connection *info;
- struct ib_cqe cqe;
- struct ib_sge sge;
-
- enum smbd_message_type type;
-
- /* Link to receive queue or reassembly queue */
- struct list_head list;
-
- /* Indicate if this is the 1st packet of a payload */
- bool first_segment;
-
- /* SMBD packet header and payload follows this structure */
- u8 packet[];
-};
-
/* Create a SMBDirect session */
struct smbd_connection *smbd_get_connection(
struct TCP_Server_Info *server, struct sockaddr *dstaddr);
diff --git a/fs/smb/client/trace.h b/fs/smb/client/trace.h
index 52bcb55d9952..93e5b2bb9f28 100644
--- a/fs/smb/client/trace.h
+++ b/fs/smb/client/trace.h
@@ -140,7 +140,7 @@ DECLARE_EVENT_CLASS(smb3_rw_err_class,
__entry->len = len;
__entry->rc = rc;
),
- TP_printk("\tR=%08x[%x] xid=%u sid=0x%llx tid=0x%x fid=0x%llx offset=0x%llx len=0x%x rc=%d",
+ TP_printk("R=%08x[%x] xid=%u sid=0x%llx tid=0x%x fid=0x%llx offset=0x%llx len=0x%x rc=%d",
__entry->rreq_debug_id, __entry->rreq_debug_index,
__entry->xid, __entry->sesid, __entry->tid, __entry->fid,
__entry->offset, __entry->len, __entry->rc)
@@ -190,7 +190,7 @@ DECLARE_EVENT_CLASS(smb3_other_err_class,
__entry->len = len;
__entry->rc = rc;
),
- TP_printk("\txid=%u sid=0x%llx tid=0x%x fid=0x%llx offset=0x%llx len=0x%x rc=%d",
+ TP_printk("xid=%u sid=0x%llx tid=0x%x fid=0x%llx offset=0x%llx len=0x%x rc=%d",
__entry->xid, __entry->sesid, __entry->tid, __entry->fid,
__entry->offset, __entry->len, __entry->rc)
)
@@ -247,7 +247,7 @@ DECLARE_EVENT_CLASS(smb3_copy_range_err_class,
__entry->len = len;
__entry->rc = rc;
),
- TP_printk("\txid=%u sid=0x%llx tid=0x%x source fid=0x%llx source offset=0x%llx target fid=0x%llx target offset=0x%llx len=0x%x rc=%d",
+ TP_printk("xid=%u sid=0x%llx tid=0x%x source fid=0x%llx source offset=0x%llx target fid=0x%llx target offset=0x%llx len=0x%x rc=%d",
__entry->xid, __entry->sesid, __entry->tid, __entry->target_fid,
__entry->src_offset, __entry->target_fid, __entry->target_offset, __entry->len, __entry->rc)
)
@@ -298,7 +298,7 @@ DECLARE_EVENT_CLASS(smb3_copy_range_done_class,
__entry->target_offset = target_offset;
__entry->len = len;
),
- TP_printk("\txid=%u sid=0x%llx tid=0x%x source fid=0x%llx source offset=0x%llx target fid=0x%llx target offset=0x%llx len=0x%x",
+ TP_printk("xid=%u sid=0x%llx tid=0x%x source fid=0x%llx source offset=0x%llx target fid=0x%llx target offset=0x%llx len=0x%x",
__entry->xid, __entry->sesid, __entry->tid, __entry->target_fid,
__entry->src_offset, __entry->target_fid, __entry->target_offset, __entry->len)
)
@@ -482,7 +482,7 @@ DECLARE_EVENT_CLASS(smb3_fd_class,
__entry->tid = tid;
__entry->sesid = sesid;
),
- TP_printk("\txid=%u sid=0x%llx tid=0x%x fid=0x%llx",
+ TP_printk("xid=%u sid=0x%llx tid=0x%x fid=0x%llx",
__entry->xid, __entry->sesid, __entry->tid, __entry->fid)
)
@@ -521,7 +521,7 @@ DECLARE_EVENT_CLASS(smb3_fd_err_class,
__entry->sesid = sesid;
__entry->rc = rc;
),
- TP_printk("\txid=%u sid=0x%llx tid=0x%x fid=0x%llx rc=%d",
+ TP_printk("xid=%u sid=0x%llx tid=0x%x fid=0x%llx rc=%d",
__entry->xid, __entry->sesid, __entry->tid, __entry->fid,
__entry->rc)
)
@@ -794,7 +794,7 @@ DECLARE_EVENT_CLASS(smb3_cmd_err_class,
__entry->status = status;
__entry->rc = rc;
),
- TP_printk("\tsid=0x%llx tid=0x%x cmd=%u mid=%llu status=0x%x rc=%d",
+ TP_printk("sid=0x%llx tid=0x%x cmd=%u mid=%llu status=0x%x rc=%d",
__entry->sesid, __entry->tid, __entry->cmd, __entry->mid,
__entry->status, __entry->rc)
)
@@ -829,7 +829,7 @@ DECLARE_EVENT_CLASS(smb3_cmd_done_class,
__entry->cmd = cmd;
__entry->mid = mid;
),
- TP_printk("\tsid=0x%llx tid=0x%x cmd=%u mid=%llu",
+ TP_printk("sid=0x%llx tid=0x%x cmd=%u mid=%llu",
__entry->sesid, __entry->tid,
__entry->cmd, __entry->mid)
)
@@ -867,7 +867,7 @@ DECLARE_EVENT_CLASS(smb3_mid_class,
__entry->when_sent = when_sent;
__entry->when_received = when_received;
),
- TP_printk("\tcmd=%u mid=%llu pid=%u, when_sent=%lu when_rcv=%lu",
+ TP_printk("cmd=%u mid=%llu pid=%u, when_sent=%lu when_rcv=%lu",
__entry->cmd, __entry->mid, __entry->pid, __entry->when_sent,
__entry->when_received)
)
@@ -898,7 +898,7 @@ DECLARE_EVENT_CLASS(smb3_exit_err_class,
__assign_str(func_name);
__entry->rc = rc;
),
- TP_printk("\t%s: xid=%u rc=%d",
+ TP_printk("%s: xid=%u rc=%d",
__get_str(func_name), __entry->xid, __entry->rc)
)
@@ -924,7 +924,7 @@ DECLARE_EVENT_CLASS(smb3_sync_err_class,
__entry->ino = ino;
__entry->rc = rc;
),
- TP_printk("\tino=%lu rc=%d",
+ TP_printk("ino=%lu rc=%d",
__entry->ino, __entry->rc)
)
@@ -950,7 +950,7 @@ DECLARE_EVENT_CLASS(smb3_enter_exit_class,
__entry->xid = xid;
__assign_str(func_name);
),
- TP_printk("\t%s: xid=%u",
+ TP_printk("%s: xid=%u",
__get_str(func_name), __entry->xid)
)
diff --git a/fs/smb/client/transport.c b/fs/smb/client/transport.c
index 266af17aa7d9..32d528b4dd83 100644
--- a/fs/smb/client/transport.c
+++ b/fs/smb/client/transport.c
@@ -30,9 +30,6 @@
#include "smbdirect.h"
#include "compress.h"
-/* Max number of iovectors we can use off the stack when sending requests. */
-#define CIFS_MAX_IOV_SIZE 8
-
void
cifs_wake_up_task(struct mid_q_entry *mid)
{
@@ -41,42 +38,6 @@ cifs_wake_up_task(struct mid_q_entry *mid)
wake_up_process(mid->callback_data);
}
-static struct mid_q_entry *
-alloc_mid(const struct smb_hdr *smb_buffer, struct TCP_Server_Info *server)
-{
- struct mid_q_entry *temp;
-
- if (server == NULL) {
- cifs_dbg(VFS, "%s: null TCP session\n", __func__);
- return NULL;
- }
-
- temp = mempool_alloc(cifs_mid_poolp, GFP_NOFS);
- memset(temp, 0, sizeof(struct mid_q_entry));
- kref_init(&temp->refcount);
- temp->mid = get_mid(smb_buffer);
- temp->pid = current->pid;
- temp->command = cpu_to_le16(smb_buffer->Command);
- cifs_dbg(FYI, "For smb_command %d\n", smb_buffer->Command);
- /* easier to use jiffies */
- /* when mid allocated can be before when sent */
- temp->when_alloc = jiffies;
- temp->server = server;
-
- /*
- * The default is for the mid to be synchronous, so the
- * default callback just wakes up the current task.
- */
- get_task_struct(current);
- temp->creator = current;
- temp->callback = cifs_wake_up_task;
- temp->callback_data = current;
-
- atomic_inc(&mid_count);
- temp->mid_state = MID_REQUEST_ALLOCATED;
- return temp;
-}
-
void __release_mid(struct kref *refcount)
{
struct mid_q_entry *midEntry =
@@ -89,7 +50,7 @@ void __release_mid(struct kref *refcount)
#endif
struct TCP_Server_Info *server = midEntry->server;
- if (midEntry->resp_buf && (midEntry->mid_flags & MID_WAIT_CANCELLED) &&
+ if (midEntry->resp_buf && (midEntry->wait_cancelled) &&
(midEntry->mid_state == MID_RESPONSE_RECEIVED ||
midEntry->mid_state == MID_RESPONSE_READY) &&
server->ops->handle_cancelled_mid)
@@ -160,12 +121,12 @@ void __release_mid(struct kref *refcount)
void
delete_mid(struct mid_q_entry *mid)
{
- spin_lock(&mid->server->mid_lock);
- if (!(mid->mid_flags & MID_DELETED)) {
+ spin_lock(&mid->server->mid_queue_lock);
+ if (mid->deleted_from_q == false) {
list_del_init(&mid->qhead);
- mid->mid_flags |= MID_DELETED;
+ mid->deleted_from_q = true;
}
- spin_unlock(&mid->server->mid_lock);
+ spin_unlock(&mid->server->mid_queue_lock);
release_mid(mid);
}
@@ -269,9 +230,8 @@ smb_rqst_len(struct TCP_Server_Info *server, struct smb_rqst *rqst)
return buflen;
}
-static int
-__smb_send_rqst(struct TCP_Server_Info *server, int num_rqst,
- struct smb_rqst *rqst)
+int __smb_send_rqst(struct TCP_Server_Info *server, int num_rqst,
+ struct smb_rqst *rqst)
{
int rc;
struct kvec *iov;
@@ -397,7 +357,7 @@ unmask:
* socket so the server throws away the partial SMB
*/
cifs_signal_cifsd_for_reconnect(server, false);
- trace_smb3_partial_send_reconnect(server->CurrentMid,
+ trace_smb3_partial_send_reconnect(server->current_mid,
server->conn_id, server->hostname);
}
smbd_done:
@@ -456,22 +416,6 @@ smb_send_rqst(struct TCP_Server_Info *server, int num_rqst,
return rc;
}
-int
-smb_send(struct TCP_Server_Info *server, struct smb_hdr *smb_buffer,
- unsigned int smb_buf_length)
-{
- struct kvec iov[2];
- struct smb_rqst rqst = { .rq_iov = iov,
- .rq_nvec = 2 };
-
- iov[0].iov_base = smb_buffer;
- iov[0].iov_len = 4;
- iov[1].iov_base = (char *)smb_buffer + 4;
- iov[1].iov_len = smb_buf_length;
-
- return __smb_send_rqst(server, 1, &rqst);
-}
-
static int
wait_for_free_credits(struct TCP_Server_Info *server, const int num_credits,
const int timeout, const int flags,
@@ -509,7 +453,7 @@ wait_for_free_credits(struct TCP_Server_Info *server, const int num_credits,
in_flight = server->in_flight;
spin_unlock(&server->req_lock);
- trace_smb3_nblk_credits(server->CurrentMid,
+ trace_smb3_nblk_credits(server->current_mid,
server->conn_id, server->hostname, scredits, -1, in_flight);
cifs_dbg(FYI, "%s: remove %u credits total=%d\n",
__func__, 1, scredits);
@@ -542,7 +486,7 @@ wait_for_free_credits(struct TCP_Server_Info *server, const int num_credits,
in_flight = server->in_flight;
spin_unlock(&server->req_lock);
- trace_smb3_credit_timeout(server->CurrentMid,
+ trace_smb3_credit_timeout(server->current_mid,
server->conn_id, server->hostname, scredits,
num_credits, in_flight);
cifs_server_dbg(VFS, "wait timed out after %d ms\n",
@@ -585,7 +529,7 @@ wait_for_free_credits(struct TCP_Server_Info *server, const int num_credits,
spin_unlock(&server->req_lock);
trace_smb3_credit_timeout(
- server->CurrentMid,
+ server->current_mid,
server->conn_id, server->hostname,
scredits, num_credits, in_flight);
cifs_server_dbg(VFS, "wait timed out after %d ms\n",
@@ -615,7 +559,7 @@ wait_for_free_credits(struct TCP_Server_Info *server, const int num_credits,
in_flight = server->in_flight;
spin_unlock(&server->req_lock);
- trace_smb3_waitff_credits(server->CurrentMid,
+ trace_smb3_waitff_credits(server->current_mid,
server->conn_id, server->hostname, scredits,
-(num_credits), in_flight);
cifs_dbg(FYI, "%s: remove %u credits total=%d\n",
@@ -626,9 +570,8 @@ wait_for_free_credits(struct TCP_Server_Info *server, const int num_credits,
return 0;
}
-static int
-wait_for_free_request(struct TCP_Server_Info *server, const int flags,
- unsigned int *instance)
+int wait_for_free_request(struct TCP_Server_Info *server, const int flags,
+ unsigned int *instance)
{
return wait_for_free_credits(server, 1, -1, flags,
instance);
@@ -666,7 +609,7 @@ wait_for_compound_request(struct TCP_Server_Info *server, int num,
*/
if (server->in_flight == 0) {
spin_unlock(&server->req_lock);
- trace_smb3_insufficient_credits(server->CurrentMid,
+ trace_smb3_insufficient_credits(server->current_mid,
server->conn_id, server->hostname, scredits,
num, in_flight);
cifs_dbg(FYI, "%s: %d requests in flight, needed %d total=%d\n",
@@ -690,40 +633,7 @@ cifs_wait_mtu_credits(struct TCP_Server_Info *server, size_t size,
return 0;
}
-static int allocate_mid(struct cifs_ses *ses, struct smb_hdr *in_buf,
- struct mid_q_entry **ppmidQ)
-{
- spin_lock(&ses->ses_lock);
- if (ses->ses_status == SES_NEW) {
- if ((in_buf->Command != SMB_COM_SESSION_SETUP_ANDX) &&
- (in_buf->Command != SMB_COM_NEGOTIATE)) {
- spin_unlock(&ses->ses_lock);
- return -EAGAIN;
- }
- /* else ok - we are setting up session */
- }
-
- if (ses->ses_status == SES_EXITING) {
- /* check if SMB session is bad because we are setting it up */
- if (in_buf->Command != SMB_COM_LOGOFF_ANDX) {
- spin_unlock(&ses->ses_lock);
- return -EAGAIN;
- }
- /* else ok - we are shutting down session */
- }
- spin_unlock(&ses->ses_lock);
-
- *ppmidQ = alloc_mid(in_buf, ses->server);
- if (*ppmidQ == NULL)
- return -ENOMEM;
- spin_lock(&ses->server->mid_lock);
- list_add_tail(&(*ppmidQ)->qhead, &ses->server->pending_mid_q);
- spin_unlock(&ses->server->mid_lock);
- return 0;
-}
-
-static int
-wait_for_response(struct TCP_Server_Info *server, struct mid_q_entry *midQ)
+int wait_for_response(struct TCP_Server_Info *server, struct mid_q_entry *midQ)
{
int error;
@@ -737,34 +647,6 @@ wait_for_response(struct TCP_Server_Info *server, struct mid_q_entry *midQ)
return 0;
}
-struct mid_q_entry *
-cifs_setup_async_request(struct TCP_Server_Info *server, struct smb_rqst *rqst)
-{
- int rc;
- struct smb_hdr *hdr = (struct smb_hdr *)rqst->rq_iov[0].iov_base;
- struct mid_q_entry *mid;
-
- if (rqst->rq_iov[0].iov_len != 4 ||
- rqst->rq_iov[0].iov_base + 4 != rqst->rq_iov[1].iov_base)
- return ERR_PTR(-EIO);
-
- /* enable signing if server requires it */
- if (server->sign)
- hdr->Flags2 |= SMBFLG2_SECURITY_SIGNATURE;
-
- mid = alloc_mid(hdr, server);
- if (mid == NULL)
- return ERR_PTR(-ENOMEM);
-
- rc = cifs_sign_rqst(rqst, server, &mid->sequence_number);
- if (rc) {
- release_mid(mid);
- return ERR_PTR(rc);
- }
-
- return mid;
-}
-
/*
* Send a SMB request and set the callback function in the mid to handle
* the result. Caller is responsible for dealing with timeouts.
@@ -819,9 +701,9 @@ cifs_call_async(struct TCP_Server_Info *server, struct smb_rqst *rqst,
mid->mid_state = MID_REQUEST_SUBMITTED;
/* put it on the pending_mid_q */
- spin_lock(&server->mid_lock);
+ spin_lock(&server->mid_queue_lock);
list_add_tail(&mid->qhead, &server->pending_mid_q);
- spin_unlock(&server->mid_lock);
+ spin_unlock(&server->mid_queue_lock);
/*
* Need to store the time in mid before calling I/O. For call_async,
@@ -845,45 +727,17 @@ cifs_call_async(struct TCP_Server_Info *server, struct smb_rqst *rqst,
return rc;
}
-/*
- *
- * Send an SMB Request. No response info (other than return code)
- * needs to be parsed.
- *
- * flags indicate the type of request buffer and how long to wait
- * and whether to log NT STATUS code (error) before mapping it to POSIX error
- *
- */
-int
-SendReceiveNoRsp(const unsigned int xid, struct cifs_ses *ses,
- char *in_buf, int flags)
-{
- int rc;
- struct kvec iov[1];
- struct kvec rsp_iov;
- int resp_buf_type;
-
- iov[0].iov_base = in_buf;
- iov[0].iov_len = get_rfc1002_length(in_buf) + 4;
- flags |= CIFS_NO_RSP_BUF;
- rc = SendReceive2(xid, ses, iov, 1, &resp_buf_type, flags, &rsp_iov);
- cifs_dbg(NOISY, "SendRcvNoRsp flags %d rc %d\n", flags, rc);
-
- return rc;
-}
-
-static int
-cifs_sync_mid_result(struct mid_q_entry *mid, struct TCP_Server_Info *server)
+int cifs_sync_mid_result(struct mid_q_entry *mid, struct TCP_Server_Info *server)
{
int rc = 0;
cifs_dbg(FYI, "%s: cmd=%d mid=%llu state=%d\n",
__func__, le16_to_cpu(mid->command), mid->mid, mid->mid_state);
- spin_lock(&server->mid_lock);
+ spin_lock(&server->mid_queue_lock);
switch (mid->mid_state) {
case MID_RESPONSE_READY:
- spin_unlock(&server->mid_lock);
+ spin_unlock(&server->mid_queue_lock);
return rc;
case MID_RETRY_NEEDED:
rc = -EAGAIN;
@@ -898,85 +752,23 @@ cifs_sync_mid_result(struct mid_q_entry *mid, struct TCP_Server_Info *server)
rc = mid->mid_rc;
break;
default:
- if (!(mid->mid_flags & MID_DELETED)) {
+ if (mid->deleted_from_q == false) {
list_del_init(&mid->qhead);
- mid->mid_flags |= MID_DELETED;
+ mid->deleted_from_q = true;
}
- spin_unlock(&server->mid_lock);
+ spin_unlock(&server->mid_queue_lock);
cifs_server_dbg(VFS, "%s: invalid mid state mid=%llu state=%d\n",
__func__, mid->mid, mid->mid_state);
rc = -EIO;
goto sync_mid_done;
}
- spin_unlock(&server->mid_lock);
+ spin_unlock(&server->mid_queue_lock);
sync_mid_done:
release_mid(mid);
return rc;
}
-static inline int
-send_cancel(struct TCP_Server_Info *server, struct smb_rqst *rqst,
- struct mid_q_entry *mid)
-{
- return server->ops->send_cancel ?
- server->ops->send_cancel(server, rqst, mid) : 0;
-}
-
-int
-cifs_check_receive(struct mid_q_entry *mid, struct TCP_Server_Info *server,
- bool log_error)
-{
- unsigned int len = get_rfc1002_length(mid->resp_buf) + 4;
-
- dump_smb(mid->resp_buf, min_t(u32, 92, len));
-
- /* convert the length into a more usable form */
- if (server->sign) {
- struct kvec iov[2];
- int rc = 0;
- struct smb_rqst rqst = { .rq_iov = iov,
- .rq_nvec = 2 };
-
- iov[0].iov_base = mid->resp_buf;
- iov[0].iov_len = 4;
- iov[1].iov_base = (char *)mid->resp_buf + 4;
- iov[1].iov_len = len - 4;
- /* FIXME: add code to kill session */
- rc = cifs_verify_signature(&rqst, server,
- mid->sequence_number);
- if (rc)
- cifs_server_dbg(VFS, "SMB signature verification returned error = %d\n",
- rc);
- }
-
- /* BB special case reconnect tid and uid here? */
- return map_and_check_smb_error(mid, log_error);
-}
-
-struct mid_q_entry *
-cifs_setup_request(struct cifs_ses *ses, struct TCP_Server_Info *ignored,
- struct smb_rqst *rqst)
-{
- int rc;
- struct smb_hdr *hdr = (struct smb_hdr *)rqst->rq_iov[0].iov_base;
- struct mid_q_entry *mid;
-
- if (rqst->rq_iov[0].iov_len != 4 ||
- rqst->rq_iov[0].iov_base + 4 != rqst->rq_iov[1].iov_base)
- return ERR_PTR(-EIO);
-
- rc = allocate_mid(ses, hdr, &mid);
- if (rc)
- return ERR_PTR(rc);
- rc = cifs_sign_rqst(rqst, ses->server, &mid->sequence_number);
- if (rc) {
- delete_mid(mid);
- return ERR_PTR(rc);
- }
- return mid;
-}
-
static void
cifs_compound_callback(struct mid_q_entry *mid)
{
@@ -1018,14 +810,16 @@ struct TCP_Server_Info *cifs_pick_channel(struct cifs_ses *ses)
uint index = 0;
unsigned int min_in_flight = UINT_MAX, max_in_flight = 0;
struct TCP_Server_Info *server = NULL;
- int i;
+ int i, start, cur;
if (!ses)
return NULL;
spin_lock(&ses->chan_lock);
+ start = atomic_inc_return(&ses->chan_seq);
for (i = 0; i < ses->chan_count; i++) {
- server = ses->chans[i].server;
+ cur = (start + i) % ses->chan_count;
+ server = ses->chans[cur].server;
if (!server || server->terminate)
continue;
@@ -1042,17 +836,15 @@ struct TCP_Server_Info *cifs_pick_channel(struct cifs_ses *ses)
*/
if (server->in_flight < min_in_flight) {
min_in_flight = server->in_flight;
- index = i;
+ index = cur;
}
if (server->in_flight > max_in_flight)
max_in_flight = server->in_flight;
}
/* if all channels are equally loaded, fall back to round-robin */
- if (min_in_flight == max_in_flight) {
- index = (uint)atomic_inc_return(&ses->chan_seq);
- index %= ses->chan_count;
- }
+ if (min_in_flight == max_in_flight)
+ index = (uint)start % ses->chan_count;
server = ses->chans[index].server;
spin_unlock(&ses->chan_lock);
@@ -1213,15 +1005,15 @@ compound_send_recv(const unsigned int xid, struct cifs_ses *ses,
cifs_server_dbg(FYI, "Cancelling wait for mid %llu cmd: %d\n",
midQ[i]->mid, le16_to_cpu(midQ[i]->command));
send_cancel(server, &rqst[i], midQ[i]);
- spin_lock(&server->mid_lock);
- midQ[i]->mid_flags |= MID_WAIT_CANCELLED;
+ spin_lock(&server->mid_queue_lock);
+ midQ[i]->wait_cancelled = true;
if (midQ[i]->mid_state == MID_REQUEST_SUBMITTED ||
midQ[i]->mid_state == MID_RESPONSE_RECEIVED) {
midQ[i]->callback = cifs_cancelled_callback;
cancelled_mid[i] = true;
credits[i].value = 0;
}
- spin_unlock(&server->mid_lock);
+ spin_unlock(&server->mid_queue_lock);
}
}
@@ -1304,344 +1096,6 @@ cifs_send_recv(const unsigned int xid, struct cifs_ses *ses,
rqst, resp_buf_type, resp_iov);
}
-int
-SendReceive2(const unsigned int xid, struct cifs_ses *ses,
- struct kvec *iov, int n_vec, int *resp_buf_type /* ret */,
- const int flags, struct kvec *resp_iov)
-{
- struct smb_rqst rqst;
- struct kvec s_iov[CIFS_MAX_IOV_SIZE], *new_iov;
- int rc;
-
- if (n_vec + 1 > CIFS_MAX_IOV_SIZE) {
- new_iov = kmalloc_array(n_vec + 1, sizeof(struct kvec),
- GFP_KERNEL);
- if (!new_iov) {
- /* otherwise cifs_send_recv below sets resp_buf_type */
- *resp_buf_type = CIFS_NO_BUFFER;
- return -ENOMEM;
- }
- } else
- new_iov = s_iov;
-
- /* 1st iov is a RFC1001 length followed by the rest of the packet */
- memcpy(new_iov + 1, iov, (sizeof(struct kvec) * n_vec));
-
- new_iov[0].iov_base = new_iov[1].iov_base;
- new_iov[0].iov_len = 4;
- new_iov[1].iov_base += 4;
- new_iov[1].iov_len -= 4;
-
- memset(&rqst, 0, sizeof(struct smb_rqst));
- rqst.rq_iov = new_iov;
- rqst.rq_nvec = n_vec + 1;
-
- rc = cifs_send_recv(xid, ses, ses->server,
- &rqst, resp_buf_type, flags, resp_iov);
- if (n_vec + 1 > CIFS_MAX_IOV_SIZE)
- kfree(new_iov);
- return rc;
-}
-
-int
-SendReceive(const unsigned int xid, struct cifs_ses *ses,
- struct smb_hdr *in_buf, struct smb_hdr *out_buf,
- int *pbytes_returned, const int flags)
-{
- int rc = 0;
- struct mid_q_entry *midQ;
- unsigned int len = be32_to_cpu(in_buf->smb_buf_length);
- struct kvec iov = { .iov_base = in_buf, .iov_len = len };
- struct smb_rqst rqst = { .rq_iov = &iov, .rq_nvec = 1 };
- struct cifs_credits credits = { .value = 1, .instance = 0 };
- struct TCP_Server_Info *server;
-
- if (ses == NULL) {
- cifs_dbg(VFS, "Null smb session\n");
- return -EIO;
- }
- server = ses->server;
- if (server == NULL) {
- cifs_dbg(VFS, "Null tcp session\n");
- return -EIO;
- }
-
- spin_lock(&server->srv_lock);
- if (server->tcpStatus == CifsExiting) {
- spin_unlock(&server->srv_lock);
- return -ENOENT;
- }
- spin_unlock(&server->srv_lock);
-
- /* Ensure that we do not send more than 50 overlapping requests
- to the same server. We may make this configurable later or
- use ses->maxReq */
-
- if (len > CIFSMaxBufSize + MAX_CIFS_HDR_SIZE - 4) {
- cifs_server_dbg(VFS, "Invalid length, greater than maximum frame, %d\n",
- len);
- return -EIO;
- }
-
- rc = wait_for_free_request(server, flags, &credits.instance);
- if (rc)
- return rc;
-
- /* make sure that we sign in the same order that we send on this socket
- and avoid races inside tcp sendmsg code that could cause corruption
- of smb data */
-
- cifs_server_lock(server);
-
- rc = allocate_mid(ses, in_buf, &midQ);
- if (rc) {
- cifs_server_unlock(server);
- /* Update # of requests on wire to server */
- add_credits(server, &credits, 0);
- return rc;
- }
-
- rc = cifs_sign_smb(in_buf, server, &midQ->sequence_number);
- if (rc) {
- cifs_server_unlock(server);
- goto out;
- }
-
- midQ->mid_state = MID_REQUEST_SUBMITTED;
-
- rc = smb_send(server, in_buf, len);
- cifs_save_when_sent(midQ);
-
- if (rc < 0)
- server->sequence_number -= 2;
-
- cifs_server_unlock(server);
-
- if (rc < 0)
- goto out;
-
- rc = wait_for_response(server, midQ);
- if (rc != 0) {
- send_cancel(server, &rqst, midQ);
- spin_lock(&server->mid_lock);
- if (midQ->mid_state == MID_REQUEST_SUBMITTED ||
- midQ->mid_state == MID_RESPONSE_RECEIVED) {
- /* no longer considered to be "in-flight" */
- midQ->callback = release_mid;
- spin_unlock(&server->mid_lock);
- add_credits(server, &credits, 0);
- return rc;
- }
- spin_unlock(&server->mid_lock);
- }
-
- rc = cifs_sync_mid_result(midQ, server);
- if (rc != 0) {
- add_credits(server, &credits, 0);
- return rc;
- }
-
- if (!midQ->resp_buf || !out_buf ||
- midQ->mid_state != MID_RESPONSE_READY) {
- rc = -EIO;
- cifs_server_dbg(VFS, "Bad MID state?\n");
- goto out;
- }
-
- *pbytes_returned = get_rfc1002_length(midQ->resp_buf);
- memcpy(out_buf, midQ->resp_buf, *pbytes_returned + 4);
- rc = cifs_check_receive(midQ, server, 0);
-out:
- delete_mid(midQ);
- add_credits(server, &credits, 0);
-
- return rc;
-}
-
-/* We send a LOCKINGX_CANCEL_LOCK to cause the Windows
- blocking lock to return. */
-
-static int
-send_lock_cancel(const unsigned int xid, struct cifs_tcon *tcon,
- struct smb_hdr *in_buf,
- struct smb_hdr *out_buf)
-{
- int bytes_returned;
- struct cifs_ses *ses = tcon->ses;
- LOCK_REQ *pSMB = (LOCK_REQ *)in_buf;
-
- /* We just modify the current in_buf to change
- the type of lock from LOCKING_ANDX_SHARED_LOCK
- or LOCKING_ANDX_EXCLUSIVE_LOCK to
- LOCKING_ANDX_CANCEL_LOCK. */
-
- pSMB->LockType = LOCKING_ANDX_CANCEL_LOCK|LOCKING_ANDX_LARGE_FILES;
- pSMB->Timeout = 0;
- pSMB->hdr.Mid = get_next_mid(ses->server);
-
- return SendReceive(xid, ses, in_buf, out_buf,
- &bytes_returned, 0);
-}
-
-int
-SendReceiveBlockingLock(const unsigned int xid, struct cifs_tcon *tcon,
- struct smb_hdr *in_buf, struct smb_hdr *out_buf,
- int *pbytes_returned)
-{
- int rc = 0;
- int rstart = 0;
- struct mid_q_entry *midQ;
- struct cifs_ses *ses;
- unsigned int len = be32_to_cpu(in_buf->smb_buf_length);
- struct kvec iov = { .iov_base = in_buf, .iov_len = len };
- struct smb_rqst rqst = { .rq_iov = &iov, .rq_nvec = 1 };
- unsigned int instance;
- struct TCP_Server_Info *server;
-
- if (tcon == NULL || tcon->ses == NULL) {
- cifs_dbg(VFS, "Null smb session\n");
- return -EIO;
- }
- ses = tcon->ses;
- server = ses->server;
-
- if (server == NULL) {
- cifs_dbg(VFS, "Null tcp session\n");
- return -EIO;
- }
-
- spin_lock(&server->srv_lock);
- if (server->tcpStatus == CifsExiting) {
- spin_unlock(&server->srv_lock);
- return -ENOENT;
- }
- spin_unlock(&server->srv_lock);
-
- /* Ensure that we do not send more than 50 overlapping requests
- to the same server. We may make this configurable later or
- use ses->maxReq */
-
- if (len > CIFSMaxBufSize + MAX_CIFS_HDR_SIZE - 4) {
- cifs_tcon_dbg(VFS, "Invalid length, greater than maximum frame, %d\n",
- len);
- return -EIO;
- }
-
- rc = wait_for_free_request(server, CIFS_BLOCKING_OP, &instance);
- if (rc)
- return rc;
-
- /* make sure that we sign in the same order that we send on this socket
- and avoid races inside tcp sendmsg code that could cause corruption
- of smb data */
-
- cifs_server_lock(server);
-
- rc = allocate_mid(ses, in_buf, &midQ);
- if (rc) {
- cifs_server_unlock(server);
- return rc;
- }
-
- rc = cifs_sign_smb(in_buf, server, &midQ->sequence_number);
- if (rc) {
- delete_mid(midQ);
- cifs_server_unlock(server);
- return rc;
- }
-
- midQ->mid_state = MID_REQUEST_SUBMITTED;
- rc = smb_send(server, in_buf, len);
- cifs_save_when_sent(midQ);
-
- if (rc < 0)
- server->sequence_number -= 2;
-
- cifs_server_unlock(server);
-
- if (rc < 0) {
- delete_mid(midQ);
- return rc;
- }
-
- /* Wait for a reply - allow signals to interrupt. */
- rc = wait_event_interruptible(server->response_q,
- (!(midQ->mid_state == MID_REQUEST_SUBMITTED ||
- midQ->mid_state == MID_RESPONSE_RECEIVED)) ||
- ((server->tcpStatus != CifsGood) &&
- (server->tcpStatus != CifsNew)));
-
- /* Were we interrupted by a signal ? */
- spin_lock(&server->srv_lock);
- if ((rc == -ERESTARTSYS) &&
- (midQ->mid_state == MID_REQUEST_SUBMITTED ||
- midQ->mid_state == MID_RESPONSE_RECEIVED) &&
- ((server->tcpStatus == CifsGood) ||
- (server->tcpStatus == CifsNew))) {
- spin_unlock(&server->srv_lock);
-
- if (in_buf->Command == SMB_COM_TRANSACTION2) {
- /* POSIX lock. We send a NT_CANCEL SMB to cause the
- blocking lock to return. */
- rc = send_cancel(server, &rqst, midQ);
- if (rc) {
- delete_mid(midQ);
- return rc;
- }
- } else {
- /* Windows lock. We send a LOCKINGX_CANCEL_LOCK
- to cause the blocking lock to return. */
-
- rc = send_lock_cancel(xid, tcon, in_buf, out_buf);
-
- /* If we get -ENOLCK back the lock may have
- already been removed. Don't exit in this case. */
- if (rc && rc != -ENOLCK) {
- delete_mid(midQ);
- return rc;
- }
- }
-
- rc = wait_for_response(server, midQ);
- if (rc) {
- send_cancel(server, &rqst, midQ);
- spin_lock(&server->mid_lock);
- if (midQ->mid_state == MID_REQUEST_SUBMITTED ||
- midQ->mid_state == MID_RESPONSE_RECEIVED) {
- /* no longer considered to be "in-flight" */
- midQ->callback = release_mid;
- spin_unlock(&server->mid_lock);
- return rc;
- }
- spin_unlock(&server->mid_lock);
- }
-
- /* We got the response - restart system call. */
- rstart = 1;
- spin_lock(&server->srv_lock);
- }
- spin_unlock(&server->srv_lock);
-
- rc = cifs_sync_mid_result(midQ, server);
- if (rc != 0)
- return rc;
-
- /* rcvd frame is ok */
- if (out_buf == NULL || midQ->mid_state != MID_RESPONSE_READY) {
- rc = -EIO;
- cifs_tcon_dbg(VFS, "Bad MID state?\n");
- goto out;
- }
-
- *pbytes_returned = get_rfc1002_length(midQ->resp_buf);
- memcpy(out_buf, midQ->resp_buf, *pbytes_returned + 4);
- rc = cifs_check_receive(midQ, server, 0);
-out:
- delete_mid(midQ);
- if (rstart && rc == -EACCES)
- return -ERESTARTSYS;
- return rc;
-}
/*
* Discard any remaining data in the current SMB. To do this, we borrow the
diff --git a/fs/smb/common/smbdirect/smbdirect.h b/fs/smb/common/smbdirect/smbdirect.h
new file mode 100644
index 000000000000..b9a385344ff3
--- /dev/null
+++ b/fs/smb/common/smbdirect/smbdirect.h
@@ -0,0 +1,37 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * Copyright (C) 2017, Microsoft Corporation.
+ * Copyright (C) 2018, LG Electronics.
+ */
+
+#ifndef __FS_SMB_COMMON_SMBDIRECT_SMBDIRECT_H__
+#define __FS_SMB_COMMON_SMBDIRECT_SMBDIRECT_H__
+
+/* SMB-DIRECT buffer descriptor V1 structure [MS-SMBD] 2.2.3.1 */
+struct smbdirect_buffer_descriptor_v1 {
+ __le64 offset;
+ __le32 token;
+ __le32 length;
+} __packed;
+
+/*
+ * Connection parameters mostly from [MS-SMBD] 3.1.1.1
+ *
+ * These are setup and negotiated at the beginning of a
+ * connection and remain constant unless explicitly changed.
+ *
+ * Some values are important for the upper layer.
+ */
+struct smbdirect_socket_parameters {
+ __u16 recv_credit_max;
+ __u16 send_credit_target;
+ __u32 max_send_size;
+ __u32 max_fragmented_send_size;
+ __u32 max_recv_size;
+ __u32 max_fragmented_recv_size;
+ __u32 max_read_write_size;
+ __u32 keepalive_interval_msec;
+ __u32 keepalive_timeout_msec;
+} __packed;
+
+#endif /* __FS_SMB_COMMON_SMBDIRECT_SMBDIRECT_H__ */
diff --git a/fs/smb/common/smbdirect/smbdirect_pdu.h b/fs/smb/common/smbdirect/smbdirect_pdu.h
new file mode 100644
index 000000000000..ae9fdb05ce23
--- /dev/null
+++ b/fs/smb/common/smbdirect/smbdirect_pdu.h
@@ -0,0 +1,55 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * Copyright (c) 2017 Stefan Metzmacher
+ */
+
+#ifndef __FS_SMB_COMMON_SMBDIRECT_SMBDIRECT_PDU_H__
+#define __FS_SMB_COMMON_SMBDIRECT_SMBDIRECT_PDU_H__
+
+#define SMBDIRECT_V1 0x0100
+
+/* SMBD negotiation request packet [MS-SMBD] 2.2.1 */
+struct smbdirect_negotiate_req {
+ __le16 min_version;
+ __le16 max_version;
+ __le16 reserved;
+ __le16 credits_requested;
+ __le32 preferred_send_size;
+ __le32 max_receive_size;
+ __le32 max_fragmented_size;
+} __packed;
+
+/* SMBD negotiation response packet [MS-SMBD] 2.2.2 */
+struct smbdirect_negotiate_resp {
+ __le16 min_version;
+ __le16 max_version;
+ __le16 negotiated_version;
+ __le16 reserved;
+ __le16 credits_requested;
+ __le16 credits_granted;
+ __le32 status;
+ __le32 max_readwrite_size;
+ __le32 preferred_send_size;
+ __le32 max_receive_size;
+ __le32 max_fragmented_size;
+} __packed;
+
+#define SMBDIRECT_DATA_MIN_HDR_SIZE 0x14
+#define SMBDIRECT_DATA_OFFSET 0x18
+
+#define SMBDIRECT_FLAG_RESPONSE_REQUESTED 0x0001
+
+/* SMBD data transfer packet with payload [MS-SMBD] 2.2.3 */
+struct smbdirect_data_transfer {
+ __le16 credits_requested;
+ __le16 credits_granted;
+ __le16 flags;
+ __le16 reserved;
+ __le32 remaining_data_length;
+ __le32 data_offset;
+ __le32 data_length;
+ __le32 padding;
+ __u8 buffer[];
+} __packed;
+
+#endif /* __FS_SMB_COMMON_SMBDIRECT_SMBDIRECT_PDU_H__ */
diff --git a/fs/smb/common/smbdirect/smbdirect_socket.h b/fs/smb/common/smbdirect/smbdirect_socket.h
new file mode 100644
index 000000000000..3c4a8d627aa3
--- /dev/null
+++ b/fs/smb/common/smbdirect/smbdirect_socket.h
@@ -0,0 +1,161 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * Copyright (c) 2025 Stefan Metzmacher
+ */
+
+#ifndef __FS_SMB_COMMON_SMBDIRECT_SMBDIRECT_SOCKET_H__
+#define __FS_SMB_COMMON_SMBDIRECT_SMBDIRECT_SOCKET_H__
+
+enum smbdirect_socket_status {
+ SMBDIRECT_SOCKET_CREATED,
+ SMBDIRECT_SOCKET_CONNECTING,
+ SMBDIRECT_SOCKET_CONNECTED,
+ SMBDIRECT_SOCKET_NEGOTIATE_FAILED,
+ SMBDIRECT_SOCKET_DISCONNECTING,
+ SMBDIRECT_SOCKET_DISCONNECTED,
+ SMBDIRECT_SOCKET_DESTROYED
+};
+
+struct smbdirect_socket {
+ enum smbdirect_socket_status status;
+
+ /* RDMA related */
+ struct {
+ struct rdma_cm_id *cm_id;
+ } rdma;
+
+ /* IB verbs related */
+ struct {
+ struct ib_pd *pd;
+ struct ib_cq *send_cq;
+ struct ib_cq *recv_cq;
+
+ /*
+ * shortcuts for rdma.cm_id->{qp,device};
+ */
+ struct ib_qp *qp;
+ struct ib_device *dev;
+ } ib;
+
+ struct smbdirect_socket_parameters parameters;
+
+ /*
+ * The state for posted send buffers
+ */
+ struct {
+ /*
+ * Memory pools for preallocating
+ * smbdirect_send_io buffers
+ */
+ struct {
+ struct kmem_cache *cache;
+ mempool_t *pool;
+ } mem;
+ } send_io;
+
+ /*
+ * The state for posted receive buffers
+ */
+ struct {
+ /*
+ * The type of PDU we are expecting
+ */
+ enum {
+ SMBDIRECT_EXPECT_NEGOTIATE_REQ = 1,
+ SMBDIRECT_EXPECT_NEGOTIATE_REP = 2,
+ SMBDIRECT_EXPECT_DATA_TRANSFER = 3,
+ } expected;
+
+ /*
+ * Memory pools for preallocating
+ * smbdirect_recv_io buffers
+ */
+ struct {
+ struct kmem_cache *cache;
+ mempool_t *pool;
+ } mem;
+
+ /*
+ * The list of free smbdirect_recv_io
+ * structures
+ */
+ struct {
+ struct list_head list;
+ spinlock_t lock;
+ } free;
+
+ /*
+ * The list of arrived non-empty smbdirect_recv_io
+ * structures
+ *
+ * This represents the reassembly queue.
+ */
+ struct {
+ struct list_head list;
+ spinlock_t lock;
+ wait_queue_head_t wait_queue;
+ /* total data length of reassembly queue */
+ int data_length;
+ int queue_length;
+ /* the offset to first buffer in reassembly queue */
+ int first_entry_offset;
+ /*
+ * Indicate if we have received a full packet on the
+ * connection This is used to identify the first SMBD
+ * packet of a assembled payload (SMB packet) in
+ * reassembly queue so we can return a RFC1002 length to
+ * upper layer to indicate the length of the SMB packet
+ * received
+ */
+ bool full_packet_received;
+ } reassembly;
+ } recv_io;
+};
+
+struct smbdirect_send_io {
+ struct smbdirect_socket *socket;
+ struct ib_cqe cqe;
+
+ /*
+ * The SGE entries for this work request
+ *
+ * The first points to the packet header
+ */
+#define SMBDIRECT_SEND_IO_MAX_SGE 6
+ size_t num_sge;
+ struct ib_sge sge[SMBDIRECT_SEND_IO_MAX_SGE];
+
+ /*
+ * Link to the list of sibling smbdirect_send_io
+ * messages.
+ */
+ struct list_head sibling_list;
+ struct ib_send_wr wr;
+
+ /* SMBD packet header follows this structure */
+ u8 packet[];
+};
+
+struct smbdirect_recv_io {
+ struct smbdirect_socket *socket;
+ struct ib_cqe cqe;
+
+ /*
+ * For now we only use a single SGE
+ * as we have just one large buffer
+ * per posted recv.
+ */
+#define SMBDIRECT_RECV_IO_MAX_SGE 1
+ struct ib_sge sge;
+
+ /* Link to free or reassembly list */
+ struct list_head list;
+
+ /* Indicate if this is the 1st packet of a payload */
+ bool first_segment;
+
+ /* SMBD packet header and payload follows this structure */
+ u8 packet[];
+};
+
+#endif /* __FS_SMB_COMMON_SMBDIRECT_SMBDIRECT_SOCKET_H__ */
diff --git a/fs/smb/server/Kconfig b/fs/smb/server/Kconfig
index cf70e96ad4de..4a23a5e7e8fe 100644
--- a/fs/smb/server/Kconfig
+++ b/fs/smb/server/Kconfig
@@ -11,6 +11,7 @@ config SMB_SERVER
select CRYPTO_HMAC
select CRYPTO_ECB
select CRYPTO_LIB_DES
+ select CRYPTO_LIB_SHA256
select CRYPTO_SHA256
select CRYPTO_CMAC
select CRYPTO_SHA512
diff --git a/fs/smb/server/auth.c b/fs/smb/server/auth.c
index b3d121052408..d99871c21451 100644
--- a/fs/smb/server/auth.c
+++ b/fs/smb/server/auth.c
@@ -979,40 +979,6 @@ out:
return rc;
}
-int ksmbd_gen_sd_hash(struct ksmbd_conn *conn, char *sd_buf, int len,
- __u8 *pi_hash)
-{
- int rc;
- struct ksmbd_crypto_ctx *ctx = NULL;
-
- ctx = ksmbd_crypto_ctx_find_sha256();
- if (!ctx) {
- ksmbd_debug(AUTH, "could not alloc sha256\n");
- return -ENOMEM;
- }
-
- rc = crypto_shash_init(CRYPTO_SHA256(ctx));
- if (rc) {
- ksmbd_debug(AUTH, "could not init shashn");
- goto out;
- }
-
- rc = crypto_shash_update(CRYPTO_SHA256(ctx), sd_buf, len);
- if (rc) {
- ksmbd_debug(AUTH, "could not update with n\n");
- goto out;
- }
-
- rc = crypto_shash_final(CRYPTO_SHA256(ctx), pi_hash);
- if (rc) {
- ksmbd_debug(AUTH, "Could not generate hash err : %d\n", rc);
- goto out;
- }
-out:
- ksmbd_release_crypto_ctx(ctx);
- return rc;
-}
-
static int ksmbd_get_encryption_key(struct ksmbd_work *work, __u64 ses_id,
int enc, u8 *key)
{
diff --git a/fs/smb/server/auth.h b/fs/smb/server/auth.h
index 362b6159a6cf..6879a1bd1b91 100644
--- a/fs/smb/server/auth.h
+++ b/fs/smb/server/auth.h
@@ -66,6 +66,4 @@ int ksmbd_gen_smb311_encryptionkey(struct ksmbd_conn *conn,
struct ksmbd_session *sess);
int ksmbd_gen_preauth_integrity_hash(struct ksmbd_conn *conn, char *buf,
__u8 *pi_hash);
-int ksmbd_gen_sd_hash(struct ksmbd_conn *conn, char *sd_buf, int len,
- __u8 *pi_hash);
#endif
diff --git a/fs/smb/server/connection.c b/fs/smb/server/connection.c
index 83764c230e9d..3f04a2977ba8 100644
--- a/fs/smb/server/connection.c
+++ b/fs/smb/server/connection.c
@@ -40,7 +40,7 @@ void ksmbd_conn_free(struct ksmbd_conn *conn)
kvfree(conn->request_buf);
kfree(conn->preauth_info);
if (atomic_dec_and_test(&conn->refcnt)) {
- ksmbd_free_transport(conn->transport);
+ conn->transport->ops->free_transport(conn->transport);
kfree(conn);
}
}
diff --git a/fs/smb/server/connection.h b/fs/smb/server/connection.h
index 14620e147dda..31dd1caac1e8 100644
--- a/fs/smb/server/connection.h
+++ b/fs/smb/server/connection.h
@@ -46,6 +46,7 @@ struct ksmbd_conn {
struct mutex srv_mutex;
int status;
unsigned int cli_cap;
+ __be32 inet_addr;
char *request_buf;
struct ksmbd_transport *transport;
struct nls_table *local_nls;
@@ -108,6 +109,7 @@ struct ksmbd_conn {
__le16 signing_algorithm;
bool binding;
atomic_t refcnt;
+ bool is_aapl;
};
struct ksmbd_conn_ops {
@@ -132,6 +134,7 @@ struct ksmbd_transport_ops {
void *buf, unsigned int len,
struct smb2_buffer_desc_v1 *desc,
unsigned int desc_len);
+ void (*free_transport)(struct ksmbd_transport *kt);
};
struct ksmbd_transport {
diff --git a/fs/smb/server/crypto_ctx.c b/fs/smb/server/crypto_ctx.c
index ce733dc9a4a3..80bd68c8635e 100644
--- a/fs/smb/server/crypto_ctx.c
+++ b/fs/smb/server/crypto_ctx.c
@@ -75,9 +75,6 @@ static struct shash_desc *alloc_shash_desc(int id)
case CRYPTO_SHASH_CMACAES:
tfm = crypto_alloc_shash("cmac(aes)", 0, 0);
break;
- case CRYPTO_SHASH_SHA256:
- tfm = crypto_alloc_shash("sha256", 0, 0);
- break;
case CRYPTO_SHASH_SHA512:
tfm = crypto_alloc_shash("sha512", 0, 0);
break;
@@ -198,11 +195,6 @@ struct ksmbd_crypto_ctx *ksmbd_crypto_ctx_find_cmacaes(void)
return ____crypto_shash_ctx_find(CRYPTO_SHASH_CMACAES);
}
-struct ksmbd_crypto_ctx *ksmbd_crypto_ctx_find_sha256(void)
-{
- return ____crypto_shash_ctx_find(CRYPTO_SHASH_SHA256);
-}
-
struct ksmbd_crypto_ctx *ksmbd_crypto_ctx_find_sha512(void)
{
return ____crypto_shash_ctx_find(CRYPTO_SHASH_SHA512);
diff --git a/fs/smb/server/crypto_ctx.h b/fs/smb/server/crypto_ctx.h
index 4a367c62f653..ac64801d52d3 100644
--- a/fs/smb/server/crypto_ctx.h
+++ b/fs/smb/server/crypto_ctx.h
@@ -13,7 +13,6 @@ enum {
CRYPTO_SHASH_HMACMD5 = 0,
CRYPTO_SHASH_HMACSHA256,
CRYPTO_SHASH_CMACAES,
- CRYPTO_SHASH_SHA256,
CRYPTO_SHASH_SHA512,
CRYPTO_SHASH_MAX,
};
@@ -39,14 +38,12 @@ struct ksmbd_crypto_ctx {
#define CRYPTO_HMACMD5(c) ((c)->desc[CRYPTO_SHASH_HMACMD5])
#define CRYPTO_HMACSHA256(c) ((c)->desc[CRYPTO_SHASH_HMACSHA256])
#define CRYPTO_CMACAES(c) ((c)->desc[CRYPTO_SHASH_CMACAES])
-#define CRYPTO_SHA256(c) ((c)->desc[CRYPTO_SHASH_SHA256])
#define CRYPTO_SHA512(c) ((c)->desc[CRYPTO_SHASH_SHA512])
#define CRYPTO_HMACMD5_TFM(c) ((c)->desc[CRYPTO_SHASH_HMACMD5]->tfm)
#define CRYPTO_HMACSHA256_TFM(c)\
((c)->desc[CRYPTO_SHASH_HMACSHA256]->tfm)
#define CRYPTO_CMACAES_TFM(c) ((c)->desc[CRYPTO_SHASH_CMACAES]->tfm)
-#define CRYPTO_SHA256_TFM(c) ((c)->desc[CRYPTO_SHASH_SHA256]->tfm)
#define CRYPTO_SHA512_TFM(c) ((c)->desc[CRYPTO_SHASH_SHA512]->tfm)
#define CRYPTO_GCM(c) ((c)->ccmaes[CRYPTO_AEAD_AES_GCM])
@@ -57,7 +54,6 @@ struct ksmbd_crypto_ctx *ksmbd_crypto_ctx_find_hmacmd5(void);
struct ksmbd_crypto_ctx *ksmbd_crypto_ctx_find_hmacsha256(void);
struct ksmbd_crypto_ctx *ksmbd_crypto_ctx_find_cmacaes(void);
struct ksmbd_crypto_ctx *ksmbd_crypto_ctx_find_sha512(void);
-struct ksmbd_crypto_ctx *ksmbd_crypto_ctx_find_sha256(void);
struct ksmbd_crypto_ctx *ksmbd_crypto_ctx_find_gcm(void);
struct ksmbd_crypto_ctx *ksmbd_crypto_ctx_find_ccm(void);
void ksmbd_crypto_destroy(void);
diff --git a/fs/smb/server/oplock.c b/fs/smb/server/oplock.c
index 81a29857b1e3..d7a8a580d013 100644
--- a/fs/smb/server/oplock.c
+++ b/fs/smb/server/oplock.c
@@ -146,12 +146,9 @@ static struct oplock_info *opinfo_get_list(struct ksmbd_inode *ci)
{
struct oplock_info *opinfo;
- if (list_empty(&ci->m_op_list))
- return NULL;
-
down_read(&ci->m_lock);
- opinfo = list_first_entry(&ci->m_op_list, struct oplock_info,
- op_entry);
+ opinfo = list_first_entry_or_null(&ci->m_op_list, struct oplock_info,
+ op_entry);
if (opinfo) {
if (opinfo->conn == NULL ||
!atomic_inc_not_zero(&opinfo->refcount))
@@ -1496,7 +1493,7 @@ struct lease_ctx_info *parse_lease_state(void *open_req)
if (le16_to_cpu(cc->DataOffset) + le32_to_cpu(cc->DataLength) <
sizeof(struct create_lease_v2) - 4)
- return NULL;
+ goto err_out;
memcpy(lreq->lease_key, lc->lcontext.LeaseKey, SMB2_LEASE_KEY_SIZE);
lreq->req_state = lc->lcontext.LeaseState;
@@ -1512,7 +1509,7 @@ struct lease_ctx_info *parse_lease_state(void *open_req)
if (le16_to_cpu(cc->DataOffset) + le32_to_cpu(cc->DataLength) <
sizeof(struct create_lease))
- return NULL;
+ goto err_out;
memcpy(lreq->lease_key, lc->lcontext.LeaseKey, SMB2_LEASE_KEY_SIZE);
lreq->req_state = lc->lcontext.LeaseState;
@@ -1521,6 +1518,9 @@ struct lease_ctx_info *parse_lease_state(void *open_req)
lreq->version = 1;
}
return lreq;
+err_out:
+ kfree(lreq);
+ return NULL;
}
/**
diff --git a/fs/smb/server/server.c b/fs/smb/server/server.c
index ab533c602987..8c9c49c3a0a4 100644
--- a/fs/smb/server/server.c
+++ b/fs/smb/server/server.c
@@ -631,6 +631,5 @@ MODULE_SOFTDEP("pre: sha512");
MODULE_SOFTDEP("pre: aead2");
MODULE_SOFTDEP("pre: ccm");
MODULE_SOFTDEP("pre: gcm");
-MODULE_SOFTDEP("pre: crc32");
module_init(ksmbd_server_init)
module_exit(ksmbd_server_exit)
diff --git a/fs/smb/server/smb2pdu.c b/fs/smb/server/smb2pdu.c
index 46aa08245742..0d92ce49aed7 100644
--- a/fs/smb/server/smb2pdu.c
+++ b/fs/smb/server/smb2pdu.c
@@ -633,6 +633,11 @@ smb2_get_name(const char *src, const int maxlen, struct nls_table *local_nls)
return name;
}
+ if (*name == '\0') {
+ kfree(name);
+ return ERR_PTR(-EINVAL);
+ }
+
if (*name == '\\') {
pr_err("not allow directory name included leading slash\n");
kfree(name);
@@ -1589,7 +1594,7 @@ static int krb5_authenticate(struct ksmbd_work *work,
struct ksmbd_conn *conn = work->conn;
struct ksmbd_session *sess = work->sess;
char *in_blob, *out_blob;
- struct channel *chann = NULL;
+ struct channel *chann = NULL, *old;
u64 prev_sess_id;
int in_len, out_len;
int retval;
@@ -1602,24 +1607,38 @@ static int krb5_authenticate(struct ksmbd_work *work,
out_len = work->response_sz -
(le16_to_cpu(rsp->SecurityBufferOffset) + 4);
- /* Check previous session */
- prev_sess_id = le64_to_cpu(req->PreviousSessionId);
- if (prev_sess_id && prev_sess_id != sess->id)
- destroy_previous_session(conn, sess->user, prev_sess_id);
-
retval = ksmbd_krb5_authenticate(sess, in_blob, in_len,
out_blob, &out_len);
if (retval) {
ksmbd_debug(SMB, "krb5 authentication failed\n");
return -EINVAL;
}
+
+ /* Check previous session */
+ prev_sess_id = le64_to_cpu(req->PreviousSessionId);
+ if (prev_sess_id && prev_sess_id != sess->id)
+ destroy_previous_session(conn, sess->user, prev_sess_id);
+
rsp->SecurityBufferLength = cpu_to_le16(out_len);
- if ((conn->sign || server_conf.enforced_signing) ||
+ /*
+ * If session state is SMB2_SESSION_VALID, We can assume
+ * that it is reauthentication. And the user/password
+ * has been verified, so return it here.
+ */
+ if (sess->state == SMB2_SESSION_VALID) {
+ if (conn->binding)
+ goto binding_session;
+ return 0;
+ }
+
+ if ((rsp->SessionFlags != SMB2_SESSION_FLAG_IS_GUEST_LE &&
+ (conn->sign || server_conf.enforced_signing)) ||
(req->SecurityMode & SMB2_NEGOTIATE_SIGNING_REQUIRED))
sess->sign = true;
- if (smb3_encryption_negotiated(conn)) {
+ if (smb3_encryption_negotiated(conn) &&
+ !(req->Flags & SMB2_SESSION_REQ_FLAG_BINDING)) {
retval = conn->ops->generate_encryptionkey(conn, sess);
if (retval) {
ksmbd_debug(SMB,
@@ -1632,6 +1651,7 @@ static int krb5_authenticate(struct ksmbd_work *work,
sess->sign = false;
}
+binding_session:
if (conn->dialect >= SMB30_PROT_ID) {
chann = lookup_chann_list(sess, conn);
if (!chann) {
@@ -1640,7 +1660,12 @@ static int krb5_authenticate(struct ksmbd_work *work,
return -ENOMEM;
chann->conn = conn;
- xa_store(&sess->ksmbd_chann_list, (long)conn, chann, KSMBD_DEFAULT_GFP);
+ old = xa_store(&sess->ksmbd_chann_list, (long)conn,
+ chann, KSMBD_DEFAULT_GFP);
+ if (xa_is_err(old)) {
+ kfree(chann);
+ return xa_err(old);
+ }
}
}
@@ -1827,8 +1852,6 @@ int smb2_sess_setup(struct ksmbd_work *work)
ksmbd_conn_set_good(conn);
sess->state = SMB2_SESSION_VALID;
}
- kfree(sess->Preauth_HashValue);
- sess->Preauth_HashValue = NULL;
} else if (conn->preferred_auth_mech == KSMBD_AUTH_NTLMSSP) {
if (negblob->MessageType == NtLmNegotiate) {
rc = ntlm_negotiate(work, negblob, negblob_len, rsp);
@@ -1855,8 +1878,6 @@ int smb2_sess_setup(struct ksmbd_work *work)
kfree(preauth_sess);
}
}
- kfree(sess->Preauth_HashValue);
- sess->Preauth_HashValue = NULL;
} else {
pr_info_ratelimited("Unknown NTLMSSP message type : 0x%x\n",
le32_to_cpu(negblob->MessageType));
@@ -2575,7 +2596,7 @@ static void smb2_update_xattrs(struct ksmbd_tree_connect *tcon,
}
}
-static int smb2_creat(struct ksmbd_work *work, struct path *parent_path,
+static int smb2_creat(struct ksmbd_work *work,
struct path *path, char *name, int open_flags,
umode_t posix_mode, bool is_dir)
{
@@ -2604,7 +2625,7 @@ static int smb2_creat(struct ksmbd_work *work, struct path *parent_path,
return rc;
}
- rc = ksmbd_vfs_kern_path_locked(work, name, 0, parent_path, path, 0);
+ rc = ksmbd_vfs_kern_path(work, name, 0, path, 0);
if (rc) {
pr_err("cannot get linux path (%s), err = %d\n",
name, rc);
@@ -2854,7 +2875,7 @@ int smb2_open(struct ksmbd_work *work)
struct ksmbd_tree_connect *tcon = work->tcon;
struct smb2_create_req *req;
struct smb2_create_rsp *rsp;
- struct path path, parent_path;
+ struct path path;
struct ksmbd_share_config *share = tcon->share_conf;
struct ksmbd_file *fp = NULL;
struct file *filp = NULL;
@@ -2869,7 +2890,7 @@ int smb2_open(struct ksmbd_work *work)
int req_op_level = 0, open_flags = 0, may_flags = 0, file_info = 0;
int rc = 0;
int contxt_cnt = 0, query_disk_id = 0;
- int maximal_access_ctxt = 0, posix_ctxt = 0;
+ bool maximal_access_ctxt = false, posix_ctxt = false;
int s_type = 0;
int next_off = 0;
char *name = NULL;
@@ -2898,6 +2919,27 @@ int smb2_open(struct ksmbd_work *work)
return create_smb2_pipe(work);
}
+ if (req->CreateContextsOffset && tcon->posix_extensions) {
+ context = smb2_find_context_vals(req, SMB2_CREATE_TAG_POSIX, 16);
+ if (IS_ERR(context)) {
+ rc = PTR_ERR(context);
+ goto err_out2;
+ } else if (context) {
+ struct create_posix *posix = (struct create_posix *)context;
+
+ if (le16_to_cpu(context->DataOffset) +
+ le32_to_cpu(context->DataLength) <
+ sizeof(struct create_posix) - 4) {
+ rc = -EINVAL;
+ goto err_out2;
+ }
+ ksmbd_debug(SMB, "get posix context\n");
+
+ posix_mode = le32_to_cpu(posix->Mode);
+ posix_ctxt = true;
+ }
+ }
+
if (req->NameLength) {
name = smb2_get_name((char *)req + le16_to_cpu(req->NameOffset),
le16_to_cpu(req->NameLength),
@@ -2920,9 +2962,11 @@ int smb2_open(struct ksmbd_work *work)
goto err_out2;
}
- rc = ksmbd_validate_filename(name);
- if (rc < 0)
- goto err_out2;
+ if (posix_ctxt == false) {
+ rc = ksmbd_validate_filename(name);
+ if (rc < 0)
+ goto err_out2;
+ }
if (ksmbd_share_veto_filename(share, name)) {
rc = -ENOENT;
@@ -3080,28 +3124,6 @@ int smb2_open(struct ksmbd_work *work)
rc = -EBADF;
goto err_out2;
}
-
- if (tcon->posix_extensions) {
- context = smb2_find_context_vals(req,
- SMB2_CREATE_TAG_POSIX, 16);
- if (IS_ERR(context)) {
- rc = PTR_ERR(context);
- goto err_out2;
- } else if (context) {
- struct create_posix *posix =
- (struct create_posix *)context;
- if (le16_to_cpu(context->DataOffset) +
- le32_to_cpu(context->DataLength) <
- sizeof(struct create_posix) - 4) {
- rc = -EINVAL;
- goto err_out2;
- }
- ksmbd_debug(SMB, "get posix context\n");
-
- posix_mode = le32_to_cpu(posix->Mode);
- posix_ctxt = 1;
- }
- }
}
if (ksmbd_override_fsids(work)) {
@@ -3109,8 +3131,8 @@ int smb2_open(struct ksmbd_work *work)
goto err_out2;
}
- rc = ksmbd_vfs_kern_path_locked(work, name, LOOKUP_NO_SYMLINKS,
- &parent_path, &path, 1);
+ rc = ksmbd_vfs_kern_path(work, name, LOOKUP_NO_SYMLINKS,
+ &path, 1);
if (!rc) {
file_present = true;
@@ -3231,7 +3253,7 @@ int smb2_open(struct ksmbd_work *work)
/*create file if not present */
if (!file_present) {
- rc = smb2_creat(work, &parent_path, &path, name, open_flags,
+ rc = smb2_creat(work, &path, name, open_flags,
posix_mode,
req->CreateOptions & FILE_DIRECTORY_FILE_LE);
if (rc) {
@@ -3436,7 +3458,7 @@ int smb2_open(struct ksmbd_work *work)
}
if (file_present || created)
- ksmbd_vfs_kern_path_unlock(&parent_path, &path);
+ path_put(&path);
if (!S_ISDIR(file_inode(filp)->i_mode) && open_flags & O_TRUNC &&
!fp->attrib_only && !stream_name) {
@@ -3534,6 +3556,15 @@ int smb2_open(struct ksmbd_work *work)
ksmbd_debug(SMB, "get query on disk id context\n");
query_disk_id = 1;
}
+
+ if (conn->is_aapl == false) {
+ context = smb2_find_context_vals(req, SMB2_CREATE_AAPL, 4);
+ if (IS_ERR(context)) {
+ rc = PTR_ERR(context);
+ goto err_out1;
+ } else if (context)
+ conn->is_aapl = true;
+ }
}
rc = ksmbd_vfs_getattr(&path, &stat);
@@ -3708,7 +3739,7 @@ reconnected_fp:
err_out:
if (rc && (file_present || created))
- ksmbd_vfs_kern_path_unlock(&parent_path, &path);
+ path_put(&path);
err_out1:
ksmbd_revert_fsids(work);
@@ -3973,7 +4004,10 @@ static int smb2_populate_readdir_entry(struct ksmbd_conn *conn, int info_level,
if (dinfo->EaSize)
dinfo->ExtFileAttributes = FILE_ATTRIBUTE_REPARSE_POINT_LE;
dinfo->Reserved = 0;
- dinfo->UniqueId = cpu_to_le64(ksmbd_kstat->kstat->ino);
+ if (conn->is_aapl)
+ dinfo->UniqueId = 0;
+ else
+ dinfo->UniqueId = cpu_to_le64(ksmbd_kstat->kstat->ino);
if (d_info->hide_dot_file && d_info->name[0] == '.')
dinfo->ExtFileAttributes |= FILE_ATTRIBUTE_HIDDEN_LE;
memcpy(dinfo->FileName, conv_name, conv_len);
@@ -3990,7 +4024,10 @@ static int smb2_populate_readdir_entry(struct ksmbd_conn *conn, int info_level,
smb2_get_reparse_tag_special_file(ksmbd_kstat->kstat->mode);
if (fibdinfo->EaSize)
fibdinfo->ExtFileAttributes = FILE_ATTRIBUTE_REPARSE_POINT_LE;
- fibdinfo->UniqueId = cpu_to_le64(ksmbd_kstat->kstat->ino);
+ if (conn->is_aapl)
+ fibdinfo->UniqueId = 0;
+ else
+ fibdinfo->UniqueId = cpu_to_le64(ksmbd_kstat->kstat->ino);
fibdinfo->ShortNameLength = 0;
fibdinfo->Reserved = 0;
fibdinfo->Reserved2 = cpu_to_le16(0);
@@ -4086,20 +4123,6 @@ struct smb2_query_dir_private {
int info_level;
};
-static void lock_dir(struct ksmbd_file *dir_fp)
-{
- struct dentry *dir = dir_fp->filp->f_path.dentry;
-
- inode_lock_nested(d_inode(dir), I_MUTEX_PARENT);
-}
-
-static void unlock_dir(struct ksmbd_file *dir_fp)
-{
- struct dentry *dir = dir_fp->filp->f_path.dentry;
-
- inode_unlock(d_inode(dir));
-}
-
static int process_query_dir_entries(struct smb2_query_dir_private *priv)
{
struct mnt_idmap *idmap = file_mnt_idmap(priv->dir_fp->filp);
@@ -4114,11 +4137,10 @@ static int process_query_dir_entries(struct smb2_query_dir_private *priv)
if (dentry_name(priv->d_info, priv->info_level))
return -EINVAL;
- lock_dir(priv->dir_fp);
- dent = lookup_one(idmap, priv->d_info->name,
- priv->dir_fp->filp->f_path.dentry,
- priv->d_info->name_len);
- unlock_dir(priv->dir_fp);
+ dent = lookup_one_unlocked(idmap,
+ &QSTR_LEN(priv->d_info->name,
+ priv->d_info->name_len),
+ priv->dir_fp->filp->f_path.dentry);
if (IS_ERR(dent)) {
ksmbd_debug(SMB, "Cannot lookup `%s' [%ld]\n",
@@ -4849,8 +4871,13 @@ static int get_file_standard_info(struct smb2_query_info_rsp *rsp,
sinfo = (struct smb2_file_standard_info *)rsp->Buffer;
delete_pending = ksmbd_inode_pending_delete(fp);
- sinfo->AllocationSize = cpu_to_le64(stat.blocks << 9);
- sinfo->EndOfFile = S_ISDIR(stat.mode) ? 0 : cpu_to_le64(stat.size);
+ if (ksmbd_stream_fd(fp) == false) {
+ sinfo->AllocationSize = cpu_to_le64(stat.blocks << 9);
+ sinfo->EndOfFile = S_ISDIR(stat.mode) ? 0 : cpu_to_le64(stat.size);
+ } else {
+ sinfo->AllocationSize = cpu_to_le64(fp->stream.size);
+ sinfo->EndOfFile = cpu_to_le64(fp->stream.size);
+ }
sinfo->NumberOfLinks = cpu_to_le32(get_nlink(&stat) - delete_pending);
sinfo->DeletePending = delete_pending;
sinfo->Directory = S_ISDIR(stat.mode) ? 1 : 0;
@@ -4913,9 +4940,14 @@ static int get_file_all_info(struct ksmbd_work *work,
file_info->ChangeTime = cpu_to_le64(time);
file_info->Attributes = fp->f_ci->m_fattr;
file_info->Pad1 = 0;
- file_info->AllocationSize =
- cpu_to_le64(stat.blocks << 9);
- file_info->EndOfFile = S_ISDIR(stat.mode) ? 0 : cpu_to_le64(stat.size);
+ if (ksmbd_stream_fd(fp) == false) {
+ file_info->AllocationSize =
+ cpu_to_le64(stat.blocks << 9);
+ file_info->EndOfFile = S_ISDIR(stat.mode) ? 0 : cpu_to_le64(stat.size);
+ } else {
+ file_info->AllocationSize = cpu_to_le64(fp->stream.size);
+ file_info->EndOfFile = cpu_to_le64(fp->stream.size);
+ }
file_info->NumberOfLinks =
cpu_to_le32(get_nlink(&stat) - delete_pending);
file_info->DeletePending = delete_pending;
@@ -4924,7 +4956,10 @@ static int get_file_all_info(struct ksmbd_work *work,
file_info->IndexNumber = cpu_to_le64(stat.ino);
file_info->EASize = 0;
file_info->AccessFlags = fp->daccess;
- file_info->CurrentByteOffset = cpu_to_le64(fp->filp->f_pos);
+ if (ksmbd_stream_fd(fp) == false)
+ file_info->CurrentByteOffset = cpu_to_le64(fp->filp->f_pos);
+ else
+ file_info->CurrentByteOffset = cpu_to_le64(fp->stream.pos);
file_info->Mode = fp->coption;
file_info->AlignmentRequirement = 0;
conv_len = smbConvertToUTF16((__le16 *)file_info->FileName, filename,
@@ -5112,8 +5147,13 @@ static int get_file_network_open_info(struct smb2_query_info_rsp *rsp,
time = ksmbd_UnixTimeToNT(stat.ctime);
file_info->ChangeTime = cpu_to_le64(time);
file_info->Attributes = fp->f_ci->m_fattr;
- file_info->AllocationSize = cpu_to_le64(stat.blocks << 9);
- file_info->EndOfFile = S_ISDIR(stat.mode) ? 0 : cpu_to_le64(stat.size);
+ if (ksmbd_stream_fd(fp) == false) {
+ file_info->AllocationSize = cpu_to_le64(stat.blocks << 9);
+ file_info->EndOfFile = S_ISDIR(stat.mode) ? 0 : cpu_to_le64(stat.size);
+ } else {
+ file_info->AllocationSize = cpu_to_le64(fp->stream.size);
+ file_info->EndOfFile = cpu_to_le64(fp->stream.size);
+ }
file_info->Reserved = cpu_to_le32(0);
rsp->OutputBufferLength =
cpu_to_le32(sizeof(struct smb2_file_ntwrk_info));
@@ -5136,7 +5176,11 @@ static void get_file_position_info(struct smb2_query_info_rsp *rsp,
struct smb2_file_pos_info *file_info;
file_info = (struct smb2_file_pos_info *)rsp->Buffer;
- file_info->CurrentByteOffset = cpu_to_le64(fp->filp->f_pos);
+ if (ksmbd_stream_fd(fp) == false)
+ file_info->CurrentByteOffset = cpu_to_le64(fp->filp->f_pos);
+ else
+ file_info->CurrentByteOffset = cpu_to_le64(fp->stream.pos);
+
rsp->OutputBufferLength =
cpu_to_le32(sizeof(struct smb2_file_pos_info));
}
@@ -5225,8 +5269,13 @@ static int find_file_posix_info(struct smb2_query_info_rsp *rsp,
file_info->ChangeTime = cpu_to_le64(time);
file_info->DosAttributes = fp->f_ci->m_fattr;
file_info->Inode = cpu_to_le64(stat.ino);
- file_info->EndOfFile = cpu_to_le64(stat.size);
- file_info->AllocationSize = cpu_to_le64(stat.blocks << 9);
+ if (ksmbd_stream_fd(fp) == false) {
+ file_info->EndOfFile = cpu_to_le64(stat.size);
+ file_info->AllocationSize = cpu_to_le64(stat.blocks << 9);
+ } else {
+ file_info->EndOfFile = cpu_to_le64(fp->stream.size);
+ file_info->AllocationSize = cpu_to_le64(fp->stream.size);
+ }
file_info->HardLinks = cpu_to_le32(stat.nlink);
file_info->Mode = cpu_to_le32(stat.mode & 0777);
switch (stat.mode & S_IFMT) {
@@ -6002,8 +6051,7 @@ static int smb2_create_link(struct ksmbd_work *work,
struct nls_table *local_nls)
{
char *link_name = NULL, *target_name = NULL, *pathname = NULL;
- struct path path, parent_path;
- bool file_present = false;
+ struct path path;
int rc;
if (buf_len < (u64)sizeof(struct smb2_file_link_info) +
@@ -6032,15 +6080,12 @@ static int smb2_create_link(struct ksmbd_work *work,
ksmbd_debug(SMB, "target name is %s\n", target_name);
rc = ksmbd_vfs_kern_path_locked(work, link_name, LOOKUP_NO_SYMLINKS,
- &parent_path, &path, 0);
+ &path, 0);
if (rc) {
if (rc != -ENOENT)
goto out;
- } else
- file_present = true;
-
- if (file_info->ReplaceIfExists) {
- if (file_present) {
+ } else {
+ if (file_info->ReplaceIfExists) {
rc = ksmbd_vfs_remove_file(work, &path);
if (rc) {
rc = -EINVAL;
@@ -6048,21 +6093,17 @@ static int smb2_create_link(struct ksmbd_work *work,
link_name);
goto out;
}
- }
- } else {
- if (file_present) {
+ } else {
rc = -EEXIST;
ksmbd_debug(SMB, "link already exists\n");
goto out;
}
+ ksmbd_vfs_kern_path_unlock(&path);
}
-
rc = ksmbd_vfs_link(work, target_name, link_name);
if (rc)
rc = -EINVAL;
out:
- if (file_present)
- ksmbd_vfs_kern_path_unlock(&parent_path, &path);
if (!IS_ERR(link_name))
kfree(link_name);
@@ -6168,6 +6209,9 @@ static int set_file_allocation_info(struct ksmbd_work *work,
if (!(fp->daccess & FILE_WRITE_DATA_LE))
return -EACCES;
+ if (ksmbd_stream_fd(fp) == true)
+ return 0;
+
rc = vfs_getattr(&fp->filp->f_path, &stat, STATX_BASIC_STATS,
AT_STATX_SYNC_AS_STAT);
if (rc)
@@ -6226,7 +6270,8 @@ static int set_end_of_file_info(struct ksmbd_work *work, struct ksmbd_file *fp,
* truncate of some filesystem like FAT32 fill zero data in
* truncated range.
*/
- if (inode->i_sb->s_magic != MSDOS_SUPER_MAGIC) {
+ if (inode->i_sb->s_magic != MSDOS_SUPER_MAGIC &&
+ ksmbd_stream_fd(fp) == false) {
ksmbd_debug(SMB, "truncated to newsize %lld\n", newsize);
rc = ksmbd_vfs_truncate(work, fp, newsize);
if (rc) {
@@ -6299,7 +6344,13 @@ static int set_file_position_info(struct ksmbd_file *fp,
return -EINVAL;
}
- fp->filp->f_pos = current_byte_offset;
+ if (ksmbd_stream_fd(fp) == false)
+ fp->filp->f_pos = current_byte_offset;
+ else {
+ if (current_byte_offset > XATTR_SIZE_MAX)
+ current_byte_offset = XATTR_SIZE_MAX;
+ fp->stream.pos = current_byte_offset;
+ }
return 0;
}
@@ -7787,7 +7838,7 @@ static int fsctl_query_iface_info_ioctl(struct ksmbd_conn *conn,
if (!ksmbd_find_netdev_name_iface_list(netdev->name))
continue;
- flags = dev_get_flags(netdev);
+ flags = netif_get_flags(netdev);
if (!(flags & IFF_RUNNING))
continue;
ipv6_retry:
@@ -8513,11 +8564,6 @@ static void smb20_oplock_break_ack(struct ksmbd_work *work)
goto err_out;
}
- opinfo->op_state = OPLOCK_STATE_NONE;
- wake_up_interruptible_all(&opinfo->oplock_q);
- opinfo_put(opinfo);
- ksmbd_fd_put(work, fp);
-
rsp->StructureSize = cpu_to_le16(24);
rsp->OplockLevel = rsp_oplevel;
rsp->Reserved = 0;
@@ -8525,16 +8571,15 @@ static void smb20_oplock_break_ack(struct ksmbd_work *work)
rsp->VolatileFid = volatile_id;
rsp->PersistentFid = persistent_id;
ret = ksmbd_iov_pin_rsp(work, rsp, sizeof(struct smb2_oplock_break));
- if (!ret)
- return;
-
+ if (ret) {
err_out:
+ smb2_set_err_rsp(work);
+ }
+
opinfo->op_state = OPLOCK_STATE_NONE;
wake_up_interruptible_all(&opinfo->oplock_q);
-
opinfo_put(opinfo);
ksmbd_fd_put(work, fp);
- smb2_set_err_rsp(work);
}
static int check_lease_state(struct lease *lease, __le32 req_state)
@@ -8664,11 +8709,6 @@ static void smb21_lease_break_ack(struct ksmbd_work *work)
}
lease_state = lease->state;
- opinfo->op_state = OPLOCK_STATE_NONE;
- wake_up_interruptible_all(&opinfo->oplock_q);
- atomic_dec(&opinfo->breaking_cnt);
- wake_up_interruptible_all(&opinfo->oplock_brk);
- opinfo_put(opinfo);
rsp->StructureSize = cpu_to_le16(36);
rsp->Reserved = 0;
@@ -8677,16 +8717,16 @@ static void smb21_lease_break_ack(struct ksmbd_work *work)
rsp->LeaseState = lease_state;
rsp->LeaseDuration = 0;
ret = ksmbd_iov_pin_rsp(work, rsp, sizeof(struct smb2_lease_ack));
- if (!ret)
- return;
-
+ if (ret) {
err_out:
+ smb2_set_err_rsp(work);
+ }
+
+ opinfo->op_state = OPLOCK_STATE_NONE;
wake_up_interruptible_all(&opinfo->oplock_q);
atomic_dec(&opinfo->breaking_cnt);
wake_up_interruptible_all(&opinfo->oplock_brk);
-
opinfo_put(opinfo);
- smb2_set_err_rsp(work);
}
/**
diff --git a/fs/smb/server/smb2pdu.h b/fs/smb/server/smb2pdu.h
index 17a0b18a8406..16ae8a10490b 100644
--- a/fs/smb/server/smb2pdu.h
+++ b/fs/smb/server/smb2pdu.h
@@ -63,6 +63,9 @@ struct preauth_integrity_info {
#define SMB2_SESSION_TIMEOUT (10 * HZ)
+/* Apple Defined Contexts */
+#define SMB2_CREATE_AAPL "AAPL"
+
struct create_durable_req_v2 {
struct create_context_hdr ccontext;
__u8 Name[8];
diff --git a/fs/smb/server/smb_common.c b/fs/smb/server/smb_common.c
index 425c756bcfb8..b23203a1c286 100644
--- a/fs/smb/server/smb_common.c
+++ b/fs/smb/server/smb_common.c
@@ -515,7 +515,7 @@ int ksmbd_extract_shortname(struct ksmbd_conn *conn, const char *longname,
p = strrchr(longname, '.');
if (p == longname) { /*name starts with a dot*/
- strscpy(extension, "___", strlen("___"));
+ strscpy(extension, "___", sizeof(extension));
} else {
if (p) {
p++;
diff --git a/fs/smb/server/transport_rdma.c b/fs/smb/server/transport_rdma.c
index 4998df04ab95..8d366db5f605 100644
--- a/fs/smb/server/transport_rdma.c
+++ b/fs/smb/server/transport_rdma.c
@@ -129,9 +129,6 @@ struct smb_direct_transport {
spinlock_t recvmsg_queue_lock;
struct list_head recvmsg_queue;
- spinlock_t empty_recvmsg_queue_lock;
- struct list_head empty_recvmsg_queue;
-
int send_credit_target;
atomic_t send_credits;
spinlock_t lock_new_recv_credits;
@@ -159,7 +156,8 @@ struct smb_direct_transport {
};
#define KSMBD_TRANS(t) ((struct ksmbd_transport *)&((t)->transport))
-
+#define SMBD_TRANS(t) ((struct smb_direct_transport *)container_of(t, \
+ struct smb_direct_transport, transport))
enum {
SMB_DIRECT_MSG_NEGOTIATE_REQ = 0,
SMB_DIRECT_MSG_DATA_TRANSFER
@@ -267,40 +265,19 @@ smb_direct_recvmsg *get_free_recvmsg(struct smb_direct_transport *t)
static void put_recvmsg(struct smb_direct_transport *t,
struct smb_direct_recvmsg *recvmsg)
{
- ib_dma_unmap_single(t->cm_id->device, recvmsg->sge.addr,
- recvmsg->sge.length, DMA_FROM_DEVICE);
+ if (likely(recvmsg->sge.length != 0)) {
+ ib_dma_unmap_single(t->cm_id->device,
+ recvmsg->sge.addr,
+ recvmsg->sge.length,
+ DMA_FROM_DEVICE);
+ recvmsg->sge.length = 0;
+ }
spin_lock(&t->recvmsg_queue_lock);
list_add(&recvmsg->list, &t->recvmsg_queue);
spin_unlock(&t->recvmsg_queue_lock);
}
-static struct
-smb_direct_recvmsg *get_empty_recvmsg(struct smb_direct_transport *t)
-{
- struct smb_direct_recvmsg *recvmsg = NULL;
-
- spin_lock(&t->empty_recvmsg_queue_lock);
- if (!list_empty(&t->empty_recvmsg_queue)) {
- recvmsg = list_first_entry(&t->empty_recvmsg_queue,
- struct smb_direct_recvmsg, list);
- list_del(&recvmsg->list);
- }
- spin_unlock(&t->empty_recvmsg_queue_lock);
- return recvmsg;
-}
-
-static void put_empty_recvmsg(struct smb_direct_transport *t,
- struct smb_direct_recvmsg *recvmsg)
-{
- ib_dma_unmap_single(t->cm_id->device, recvmsg->sge.addr,
- recvmsg->sge.length, DMA_FROM_DEVICE);
-
- spin_lock(&t->empty_recvmsg_queue_lock);
- list_add_tail(&recvmsg->list, &t->empty_recvmsg_queue);
- spin_unlock(&t->empty_recvmsg_queue_lock);
-}
-
static void enqueue_reassembly(struct smb_direct_transport *t,
struct smb_direct_recvmsg *recvmsg,
int data_length)
@@ -385,9 +362,6 @@ static struct smb_direct_transport *alloc_transport(struct rdma_cm_id *cm_id)
spin_lock_init(&t->recvmsg_queue_lock);
INIT_LIST_HEAD(&t->recvmsg_queue);
- spin_lock_init(&t->empty_recvmsg_queue_lock);
- INIT_LIST_HEAD(&t->empty_recvmsg_queue);
-
init_waitqueue_head(&t->wait_send_pending);
atomic_set(&t->send_pending, 0);
@@ -410,6 +384,11 @@ err:
return NULL;
}
+static void smb_direct_free_transport(struct ksmbd_transport *kt)
+{
+ kfree(SMBD_TRANS(kt));
+}
+
static void free_transport(struct smb_direct_transport *t)
{
struct smb_direct_recvmsg *recvmsg;
@@ -427,7 +406,8 @@ static void free_transport(struct smb_direct_transport *t)
if (t->qp) {
ib_drain_qp(t->qp);
ib_mr_pool_destroy(t->qp, &t->qp->rdma_mrs);
- ib_destroy_qp(t->qp);
+ t->qp = NULL;
+ rdma_destroy_qp(t->cm_id);
}
ksmbd_debug(RDMA, "drain the reassembly queue\n");
@@ -455,7 +435,6 @@ static void free_transport(struct smb_direct_transport *t)
smb_direct_destroy_pools(t);
ksmbd_conn_free(KSMBD_TRANS(t)->conn);
- kfree(t);
}
static struct smb_direct_sendmsg
@@ -542,13 +521,13 @@ static void recv_done(struct ib_cq *cq, struct ib_wc *wc)
t = recvmsg->transport;
if (wc->status != IB_WC_SUCCESS || wc->opcode != IB_WC_RECV) {
+ put_recvmsg(t, recvmsg);
if (wc->status != IB_WC_WR_FLUSH_ERR) {
pr_err("Recv error. status='%s (%d)' opcode=%d\n",
ib_wc_status_msg(wc->status), wc->status,
wc->opcode);
smb_direct_disconnect_rdma_connection(t);
}
- put_empty_recvmsg(t, recvmsg);
return;
}
@@ -562,7 +541,8 @@ static void recv_done(struct ib_cq *cq, struct ib_wc *wc)
switch (recvmsg->type) {
case SMB_DIRECT_MSG_NEGOTIATE_REQ:
if (wc->byte_len < sizeof(struct smb_direct_negotiate_req)) {
- put_empty_recvmsg(t, recvmsg);
+ put_recvmsg(t, recvmsg);
+ smb_direct_disconnect_rdma_connection(t);
return;
}
t->negotiation_requested = true;
@@ -570,7 +550,7 @@ static void recv_done(struct ib_cq *cq, struct ib_wc *wc)
t->status = SMB_DIRECT_CS_CONNECTED;
enqueue_reassembly(t, recvmsg, 0);
wake_up_interruptible(&t->wait_status);
- break;
+ return;
case SMB_DIRECT_MSG_DATA_TRANSFER: {
struct smb_direct_data_transfer *data_transfer =
(struct smb_direct_data_transfer *)recvmsg->packet;
@@ -579,7 +559,8 @@ static void recv_done(struct ib_cq *cq, struct ib_wc *wc)
if (wc->byte_len <
offsetof(struct smb_direct_data_transfer, padding)) {
- put_empty_recvmsg(t, recvmsg);
+ put_recvmsg(t, recvmsg);
+ smb_direct_disconnect_rdma_connection(t);
return;
}
@@ -587,7 +568,8 @@ static void recv_done(struct ib_cq *cq, struct ib_wc *wc)
if (data_length) {
if (wc->byte_len < sizeof(struct smb_direct_data_transfer) +
(u64)data_length) {
- put_empty_recvmsg(t, recvmsg);
+ put_recvmsg(t, recvmsg);
+ smb_direct_disconnect_rdma_connection(t);
return;
}
@@ -599,16 +581,11 @@ static void recv_done(struct ib_cq *cq, struct ib_wc *wc)
else
t->full_packet_received = true;
- enqueue_reassembly(t, recvmsg, (int)data_length);
- wake_up_interruptible(&t->wait_reassembly_queue);
-
spin_lock(&t->receive_credit_lock);
receive_credits = --(t->recv_credits);
avail_recvmsg_count = t->count_avail_recvmsg;
spin_unlock(&t->receive_credit_lock);
} else {
- put_empty_recvmsg(t, recvmsg);
-
spin_lock(&t->receive_credit_lock);
receive_credits = --(t->recv_credits);
avail_recvmsg_count = ++(t->count_avail_recvmsg);
@@ -630,11 +607,23 @@ static void recv_done(struct ib_cq *cq, struct ib_wc *wc)
if (is_receive_credit_post_required(receive_credits, avail_recvmsg_count))
mod_delayed_work(smb_direct_wq,
&t->post_recv_credits_work, 0);
- break;
+
+ if (data_length) {
+ enqueue_reassembly(t, recvmsg, (int)data_length);
+ wake_up_interruptible(&t->wait_reassembly_queue);
+ } else
+ put_recvmsg(t, recvmsg);
+
+ return;
}
- default:
- break;
}
+
+ /*
+ * This is an internal error!
+ */
+ WARN_ON_ONCE(recvmsg->type != SMB_DIRECT_MSG_DATA_TRANSFER);
+ put_recvmsg(t, recvmsg);
+ smb_direct_disconnect_rdma_connection(t);
}
static int smb_direct_post_recv(struct smb_direct_transport *t,
@@ -664,6 +653,7 @@ static int smb_direct_post_recv(struct smb_direct_transport *t,
ib_dma_unmap_single(t->cm_id->device,
recvmsg->sge.addr, recvmsg->sge.length,
DMA_FROM_DEVICE);
+ recvmsg->sge.length = 0;
smb_direct_disconnect_rdma_connection(t);
return ret;
}
@@ -805,7 +795,6 @@ static void smb_direct_post_recv_credits(struct work_struct *work)
struct smb_direct_recvmsg *recvmsg;
int receive_credits, credits = 0;
int ret;
- int use_free = 1;
spin_lock(&t->receive_credit_lock);
receive_credits = t->recv_credits;
@@ -813,18 +802,9 @@ static void smb_direct_post_recv_credits(struct work_struct *work)
if (receive_credits < t->recv_credit_target) {
while (true) {
- if (use_free)
- recvmsg = get_free_recvmsg(t);
- else
- recvmsg = get_empty_recvmsg(t);
- if (!recvmsg) {
- if (use_free) {
- use_free = 0;
- continue;
- } else {
- break;
- }
- }
+ recvmsg = get_free_recvmsg(t);
+ if (!recvmsg)
+ break;
recvmsg->type = SMB_DIRECT_MSG_DATA_TRANSFER;
recvmsg->first_segment = false;
@@ -1800,8 +1780,6 @@ static void smb_direct_destroy_pools(struct smb_direct_transport *t)
while ((recvmsg = get_free_recvmsg(t)))
mempool_free(recvmsg, t->recvmsg_mempool);
- while ((recvmsg = get_empty_recvmsg(t)))
- mempool_free(recvmsg, t->recvmsg_mempool);
mempool_destroy(t->recvmsg_mempool);
t->recvmsg_mempool = NULL;
@@ -1857,6 +1835,7 @@ static int smb_direct_create_pools(struct smb_direct_transport *t)
if (!recvmsg)
goto err;
recvmsg->transport = t;
+ recvmsg->sge.length = 0;
list_add(&recvmsg->list, &t->recvmsg_queue);
}
t->count_avail_recvmsg = t->recv_credit_max;
@@ -1935,8 +1914,8 @@ static int smb_direct_create_qpair(struct smb_direct_transport *t,
return 0;
err:
if (t->qp) {
- ib_destroy_qp(t->qp);
t->qp = NULL;
+ rdma_destroy_qp(t->cm_id);
}
if (t->recv_cq) {
ib_destroy_cq(t->recv_cq);
@@ -2281,4 +2260,5 @@ static const struct ksmbd_transport_ops ksmbd_smb_direct_transport_ops = {
.read = smb_direct_read,
.rdma_read = smb_direct_rdma_read,
.rdma_write = smb_direct_rdma_write,
+ .free_transport = smb_direct_free_transport,
};
diff --git a/fs/smb/server/transport_tcp.c b/fs/smb/server/transport_tcp.c
index abedf510899a..b1df02e321b0 100644
--- a/fs/smb/server/transport_tcp.c
+++ b/fs/smb/server/transport_tcp.c
@@ -58,12 +58,10 @@ static inline void ksmbd_tcp_reuseaddr(struct socket *sock)
static inline void ksmbd_tcp_rcv_timeout(struct socket *sock, s64 secs)
{
- lock_sock(sock->sk);
if (secs && secs < MAX_SCHEDULE_TIMEOUT / HZ - 1)
- sock->sk->sk_rcvtimeo = secs * HZ;
+ WRITE_ONCE(sock->sk->sk_rcvtimeo, secs * HZ);
else
- sock->sk->sk_rcvtimeo = MAX_SCHEDULE_TIMEOUT;
- release_sock(sock->sk);
+ WRITE_ONCE(sock->sk->sk_rcvtimeo, MAX_SCHEDULE_TIMEOUT);
}
static inline void ksmbd_tcp_snd_timeout(struct socket *sock, s64 secs)
@@ -87,13 +85,14 @@ static struct tcp_transport *alloc_transport(struct socket *client_sk)
return NULL;
}
+ conn->inet_addr = inet_sk(client_sk->sk)->inet_daddr;
conn->transport = KSMBD_TRANS(t);
KSMBD_TRANS(t)->conn = conn;
KSMBD_TRANS(t)->ops = &ksmbd_tcp_transport_ops;
return t;
}
-void ksmbd_free_transport(struct ksmbd_transport *kt)
+static void ksmbd_tcp_free_transport(struct ksmbd_transport *kt)
{
struct tcp_transport *t = TCP_TRANS(kt);
@@ -230,6 +229,8 @@ static int ksmbd_kthread_fn(void *p)
{
struct socket *client_sk = NULL;
struct interface *iface = (struct interface *)p;
+ struct inet_sock *csk_inet;
+ struct ksmbd_conn *conn;
int ret;
while (!kthread_should_stop()) {
@@ -248,6 +249,20 @@ static int ksmbd_kthread_fn(void *p)
continue;
}
+ /*
+ * Limits repeated connections from clients with the same IP.
+ */
+ csk_inet = inet_sk(client_sk->sk);
+ down_read(&conn_list_lock);
+ list_for_each_entry(conn, &conn_list, conns_list)
+ if (csk_inet->inet_daddr == conn->inet_addr) {
+ ret = -EAGAIN;
+ break;
+ }
+ up_read(&conn_list_lock);
+ if (ret == -EAGAIN)
+ continue;
+
if (server_conf.max_connections &&
atomic_inc_return(&active_num_conn) >= server_conf.max_connections) {
pr_info_ratelimited("Limit the maximum number of connections(%u)\n",
@@ -656,4 +671,5 @@ static const struct ksmbd_transport_ops ksmbd_tcp_transport_ops = {
.read = ksmbd_tcp_read,
.writev = ksmbd_tcp_writev,
.disconnect = ksmbd_tcp_disconnect,
+ .free_transport = ksmbd_tcp_free_transport,
};
diff --git a/fs/smb/server/vfs.c b/fs/smb/server/vfs.c
index 391d07da586c..04539037108c 100644
--- a/fs/smb/server/vfs.c
+++ b/fs/smb/server/vfs.c
@@ -4,6 +4,7 @@
* Copyright (C) 2018 Samsung Electronics Co., Ltd.
*/
+#include <crypto/sha2.h>
#include <linux/kernel.h>
#include <linux/fs.h>
#include <linux/filelock.h>
@@ -65,13 +66,12 @@ int ksmbd_vfs_lock_parent(struct dentry *parent, struct dentry *child)
return 0;
}
-static int ksmbd_vfs_path_lookup_locked(struct ksmbd_share_config *share_conf,
- char *pathname, unsigned int flags,
- struct path *parent_path,
- struct path *path)
+static int ksmbd_vfs_path_lookup(struct ksmbd_share_config *share_conf,
+ char *pathname, unsigned int flags,
+ struct path *path, bool do_lock)
{
struct qstr last;
- struct filename *filename;
+ struct filename *filename __free(putname) = NULL;
struct path *root_share_path = &share_conf->vfs_path;
int err, type;
struct dentry *d;
@@ -88,51 +88,57 @@ static int ksmbd_vfs_path_lookup_locked(struct ksmbd_share_config *share_conf,
return PTR_ERR(filename);
err = vfs_path_parent_lookup(filename, flags,
- parent_path, &last, &type,
+ path, &last, &type,
root_share_path);
- if (err) {
- putname(filename);
+ if (err)
return err;
- }
if (unlikely(type != LAST_NORM)) {
- path_put(parent_path);
- putname(filename);
+ path_put(path);
return -ENOENT;
}
- err = mnt_want_write(parent_path->mnt);
- if (err) {
- path_put(parent_path);
- putname(filename);
+ if (do_lock) {
+ err = mnt_want_write(path->mnt);
+ if (err) {
+ path_put(path);
+ return -ENOENT;
+ }
+
+ inode_lock_nested(path->dentry->d_inode, I_MUTEX_PARENT);
+ d = lookup_one_qstr_excl(&last, path->dentry, 0);
+
+ if (!IS_ERR(d)) {
+ dput(path->dentry);
+ path->dentry = d;
+ return 0;
+ }
+ inode_unlock(path->dentry->d_inode);
+ mnt_drop_write(path->mnt);
+ path_put(path);
return -ENOENT;
}
- inode_lock_nested(parent_path->dentry->d_inode, I_MUTEX_PARENT);
- d = lookup_one_qstr_excl(&last, parent_path->dentry, 0);
- if (IS_ERR(d))
- goto err_out;
-
+ d = lookup_noperm_unlocked(&last, path->dentry);
+ if (!IS_ERR(d) && d_is_negative(d)) {
+ dput(d);
+ d = ERR_PTR(-ENOENT);
+ }
+ if (IS_ERR(d)) {
+ path_put(path);
+ return -ENOENT;
+ }
+ dput(path->dentry);
path->dentry = d;
- path->mnt = mntget(parent_path->mnt);
if (test_share_config_flag(share_conf, KSMBD_SHARE_FLAG_CROSSMNT)) {
err = follow_down(path, 0);
if (err < 0) {
path_put(path);
- goto err_out;
+ return -ENOENT;
}
}
-
- putname(filename);
return 0;
-
-err_out:
- inode_unlock(d_inode(parent_path->dentry));
- mnt_drop_write(parent_path->mnt);
- path_put(parent_path);
- putname(filename);
- return -ENOENT;
}
void ksmbd_vfs_query_maximal_access(struct mnt_idmap *idmap,
@@ -292,6 +298,7 @@ static int ksmbd_vfs_stream_read(struct ksmbd_file *fp, char *buf, loff_t *pos,
if (v_len - *pos < count)
count = v_len - *pos;
+ fp->stream.pos = v_len;
memcpy(buf, &stream_buf[*pos], count);
@@ -409,10 +416,15 @@ static int ksmbd_vfs_stream_write(struct ksmbd_file *fp, char *buf, loff_t *pos,
ksmbd_debug(VFS, "write stream data pos : %llu, count : %zd\n",
*pos, count);
+ if (*pos >= XATTR_SIZE_MAX) {
+ pr_err("stream write position %lld is out of bounds\n", *pos);
+ return -EINVAL;
+ }
+
size = *pos + count;
if (size > XATTR_SIZE_MAX) {
size = XATTR_SIZE_MAX;
- count = (*pos + count) - XATTR_SIZE_MAX;
+ count = XATTR_SIZE_MAX - *pos;
}
v_len = ksmbd_vfs_getcasexattr(idmap,
@@ -450,8 +462,8 @@ static int ksmbd_vfs_stream_write(struct ksmbd_file *fp, char *buf, loff_t *pos,
true);
if (err < 0)
goto out;
-
- fp->filp->f_pos = *pos;
+ else
+ fp->stream.pos = size;
err = 0;
out:
kvfree(stream_buf);
@@ -541,7 +553,8 @@ int ksmbd_vfs_getattr(const struct path *path, struct kstat *stat)
{
int err;
- err = vfs_getattr(path, stat, STATX_BTIME, AT_STATX_SYNC_AS_STAT);
+ err = vfs_getattr(path, stat, STATX_BASIC_STATS | STATX_BTIME,
+ AT_STATX_SYNC_AS_STAT);
if (err)
pr_err("getattr failed, err %d\n", err);
return err;
@@ -677,7 +690,7 @@ int ksmbd_vfs_rename(struct ksmbd_work *work, const struct path *old_path,
struct ksmbd_file *parent_fp;
int new_type;
int err, lookup_flags = LOOKUP_NO_SYMLINKS;
- int target_lookup_flags = LOOKUP_RENAME_TARGET;
+ int target_lookup_flags = LOOKUP_RENAME_TARGET | LOOKUP_CREATE;
if (ksmbd_override_fsids(work))
return -ENOMEM;
@@ -758,10 +771,10 @@ retry:
}
rd.old_mnt_idmap = mnt_idmap(old_path->mnt),
- rd.old_dir = d_inode(old_parent),
+ rd.old_parent = old_parent,
rd.old_dentry = old_child,
rd.new_mnt_idmap = mnt_idmap(new_path.mnt),
- rd.new_dir = new_path.dentry->d_inode,
+ rd.new_parent = new_path.dentry,
rd.new_dentry = new_dentry,
rd.flags = flags,
rd.delegated_inode = NULL,
@@ -1191,103 +1204,114 @@ static int ksmbd_vfs_lookup_in_dir(const struct path *dir, char *name,
return ret;
}
-/**
- * ksmbd_vfs_kern_path_locked() - lookup a file and get path info
- * @work: work
- * @name: file path that is relative to share
- * @flags: lookup flags
- * @parent_path: if lookup succeed, return parent_path info
- * @path: if lookup succeed, return path info
- * @caseless: caseless filename lookup
- *
- * Return: 0 on success, otherwise error
- */
-int ksmbd_vfs_kern_path_locked(struct ksmbd_work *work, char *name,
- unsigned int flags, struct path *parent_path,
- struct path *path, bool caseless)
+static
+int __ksmbd_vfs_kern_path(struct ksmbd_work *work, char *filepath,
+ unsigned int flags,
+ struct path *path, bool caseless, bool do_lock)
{
struct ksmbd_share_config *share_conf = work->tcon->share_conf;
+ struct path parent_path;
+ size_t path_len, remain_len;
int err;
- err = ksmbd_vfs_path_lookup_locked(share_conf, name, flags, parent_path,
- path);
- if (!err)
- return 0;
-
- if (caseless) {
- char *filepath;
- size_t path_len, remain_len;
-
- filepath = name;
- path_len = strlen(filepath);
- remain_len = path_len;
-
- *parent_path = share_conf->vfs_path;
- path_get(parent_path);
-
- while (d_can_lookup(parent_path->dentry)) {
- char *filename = filepath + path_len - remain_len;
- char *next = strchrnul(filename, '/');
- size_t filename_len = next - filename;
- bool is_last = !next[0];
+retry:
+ err = ksmbd_vfs_path_lookup(share_conf, filepath, flags, path, do_lock);
+ if (!err || !caseless)
+ return err;
- if (filename_len == 0)
- break;
+ path_len = strlen(filepath);
+ remain_len = path_len;
- err = ksmbd_vfs_lookup_in_dir(parent_path, filename,
- filename_len,
- work->conn->um);
- if (err)
- goto out2;
+ parent_path = share_conf->vfs_path;
+ path_get(&parent_path);
- next[0] = '\0';
+ while (d_can_lookup(parent_path.dentry)) {
+ char *filename = filepath + path_len - remain_len;
+ char *next = strchrnul(filename, '/');
+ size_t filename_len = next - filename;
+ bool is_last = !next[0];
- err = vfs_path_lookup(share_conf->vfs_path.dentry,
- share_conf->vfs_path.mnt,
- filepath,
- flags,
- path);
- if (!is_last)
- next[0] = '/';
- if (err)
- goto out2;
- else if (is_last)
- goto out1;
- path_put(parent_path);
- *parent_path = *path;
+ if (filename_len == 0)
+ break;
- remain_len -= filename_len + 1;
+ err = ksmbd_vfs_lookup_in_dir(&parent_path, filename,
+ filename_len,
+ work->conn->um);
+ path_put(&parent_path);
+ if (err)
+ goto out;
+ if (is_last) {
+ caseless = false;
+ goto retry;
}
+ next[0] = '\0';
+
+ err = vfs_path_lookup(share_conf->vfs_path.dentry,
+ share_conf->vfs_path.mnt,
+ filepath,
+ flags,
+ &parent_path);
+ next[0] = '/';
+ if (err)
+ goto out;
- err = -EINVAL;
-out2:
- path_put(parent_path);
+ remain_len -= filename_len + 1;
}
-out1:
- if (!err) {
- err = mnt_want_write(parent_path->mnt);
- if (err) {
- path_put(path);
- path_put(parent_path);
- return err;
- }
-
- err = ksmbd_vfs_lock_parent(parent_path->dentry, path->dentry);
- if (err) {
- path_put(path);
- path_put(parent_path);
- }
- }
+ err = -EINVAL;
+ path_put(&parent_path);
+out:
return err;
}
-void ksmbd_vfs_kern_path_unlock(struct path *parent_path, struct path *path)
+/**
+ * ksmbd_vfs_kern_path() - lookup a file and get path info
+ * @work: work
+ * @filepath: file path that is relative to share
+ * @flags: lookup flags
+ * @path: if lookup succeed, return path info
+ * @caseless: caseless filename lookup
+ *
+ * Perform the lookup, possibly crossing over any mount point.
+ * On return no locks will be held and write-access to filesystem
+ * won't have been checked.
+ * Return: 0 if file was found, otherwise error
+ */
+int ksmbd_vfs_kern_path(struct ksmbd_work *work, char *filepath,
+ unsigned int flags,
+ struct path *path, bool caseless)
{
- inode_unlock(d_inode(parent_path->dentry));
- mnt_drop_write(parent_path->mnt);
+ return __ksmbd_vfs_kern_path(work, filepath, flags, path,
+ caseless, false);
+}
+
+/**
+ * ksmbd_vfs_kern_path_locked() - lookup a file and get path info
+ * @work: work
+ * @filepath: file path that is relative to share
+ * @flags: lookup flags
+ * @path: if lookup succeed, return path info
+ * @caseless: caseless filename lookup
+ *
+ * Perform the lookup, but don't cross over any mount point.
+ * On return the parent of path->dentry will be locked and write-access to
+ * filesystem will have been gained.
+ * Return: 0 on if file was found, otherwise error
+ */
+int ksmbd_vfs_kern_path_locked(struct ksmbd_work *work, char *filepath,
+ unsigned int flags,
+ struct path *path, bool caseless)
+{
+ return __ksmbd_vfs_kern_path(work, filepath, flags, path,
+ caseless, true);
+}
+
+void ksmbd_vfs_kern_path_unlock(struct path *path)
+{
+ /* While lock is still held, ->d_parent is safe */
+ inode_unlock(d_inode(path->dentry->d_parent));
+ mnt_drop_write(path->mnt);
path_put(path);
- path_put(parent_path);
}
struct dentry *ksmbd_vfs_kern_path_create(struct ksmbd_work *work,
@@ -1471,11 +1495,7 @@ int ksmbd_vfs_set_sd_xattr(struct ksmbd_conn *conn,
acl.sd_buf = (char *)pntsd;
acl.sd_size = len;
- rc = ksmbd_gen_sd_hash(conn, acl.sd_buf, acl.sd_size, acl.hash);
- if (rc) {
- pr_err("failed to generate hash for ndr acl\n");
- return rc;
- }
+ sha256(acl.sd_buf, acl.sd_size, acl.hash);
smb_acl = ksmbd_vfs_make_xattr_posix_acl(idmap, inode,
ACL_TYPE_ACCESS);
@@ -1490,12 +1510,7 @@ int ksmbd_vfs_set_sd_xattr(struct ksmbd_conn *conn,
goto out;
}
- rc = ksmbd_gen_sd_hash(conn, acl_ndr.data, acl_ndr.offset,
- acl.posix_acl_hash);
- if (rc) {
- pr_err("failed to generate hash for ndr acl\n");
- goto out;
- }
+ sha256(acl_ndr.data, acl_ndr.offset, acl.posix_acl_hash);
rc = ndr_encode_v4_ntacl(&sd_ndr, &acl);
if (rc) {
@@ -1552,11 +1567,7 @@ int ksmbd_vfs_get_sd_xattr(struct ksmbd_conn *conn,
goto out_free;
}
- rc = ksmbd_gen_sd_hash(conn, acl_ndr.data, acl_ndr.offset, cmp_hash);
- if (rc) {
- pr_err("failed to generate hash for ndr acl\n");
- goto out_free;
- }
+ sha256(acl_ndr.data, acl_ndr.offset, cmp_hash);
if (memcmp(cmp_hash, acl.posix_acl_hash, XATTR_SD_HASH_SIZE)) {
pr_err("hash value diff\n");
diff --git a/fs/smb/server/vfs.h b/fs/smb/server/vfs.h
index 2893f59803a6..d47472f3e30b 100644
--- a/fs/smb/server/vfs.h
+++ b/fs/smb/server/vfs.h
@@ -117,10 +117,13 @@ int ksmbd_vfs_xattr_stream_name(char *stream_name, char **xattr_stream_name,
int ksmbd_vfs_remove_xattr(struct mnt_idmap *idmap,
const struct path *path, char *attr_name,
bool get_write);
+int ksmbd_vfs_kern_path(struct ksmbd_work *work, char *name,
+ unsigned int flags,
+ struct path *path, bool caseless);
int ksmbd_vfs_kern_path_locked(struct ksmbd_work *work, char *name,
- unsigned int flags, struct path *parent_path,
+ unsigned int flags,
struct path *path, bool caseless);
-void ksmbd_vfs_kern_path_unlock(struct path *parent_path, struct path *path);
+void ksmbd_vfs_kern_path_unlock(struct path *path);
struct dentry *ksmbd_vfs_kern_path_create(struct ksmbd_work *work,
const char *name,
unsigned int flags,
diff --git a/fs/smb/server/vfs_cache.c b/fs/smb/server/vfs_cache.c
index 1f8fa3468173..dfed6fce8904 100644
--- a/fs/smb/server/vfs_cache.c
+++ b/fs/smb/server/vfs_cache.c
@@ -661,21 +661,40 @@ __close_file_table_ids(struct ksmbd_file_table *ft,
bool (*skip)(struct ksmbd_tree_connect *tcon,
struct ksmbd_file *fp))
{
- unsigned int id;
- struct ksmbd_file *fp;
- int num = 0;
+ struct ksmbd_file *fp;
+ unsigned int id = 0;
+ int num = 0;
+
+ while (1) {
+ write_lock(&ft->lock);
+ fp = idr_get_next(ft->idr, &id);
+ if (!fp) {
+ write_unlock(&ft->lock);
+ break;
+ }
- idr_for_each_entry(ft->idr, fp, id) {
- if (skip(tcon, fp))
+ if (skip(tcon, fp) ||
+ !atomic_dec_and_test(&fp->refcount)) {
+ id++;
+ write_unlock(&ft->lock);
continue;
+ }
set_close_state_blocked_works(fp);
+ idr_remove(ft->idr, fp->volatile_id);
+ fp->volatile_id = KSMBD_NO_FID;
+ write_unlock(&ft->lock);
+
+ down_write(&fp->f_ci->m_lock);
+ list_del_init(&fp->node);
+ up_write(&fp->f_ci->m_lock);
- if (!atomic_dec_and_test(&fp->refcount))
- continue;
__ksmbd_close_fd(ft, fp);
+
num++;
+ id++;
}
+
return num;
}
diff --git a/fs/smb/server/vfs_cache.h b/fs/smb/server/vfs_cache.h
index 5bbb179736c2..0708155b5caf 100644
--- a/fs/smb/server/vfs_cache.h
+++ b/fs/smb/server/vfs_cache.h
@@ -44,6 +44,7 @@ struct ksmbd_lock {
struct stream {
char *name;
ssize_t size;
+ loff_t pos;
};
struct ksmbd_inode {