diff options
author | Long Li <longli@microsoft.com> | 2019-04-16 00:49:17 +0300 |
---|---|---|
committer | Steve French <stfrench@microsoft.com> | 2019-05-08 07:24:55 +0300 |
commit | 4739f2328661d070f93f9bcc8afb2a82706c826d (patch) | |
tree | 17b12780ba55f25a866e414fda77d0410b606694 /fs | |
parent | 46e6661963fb5d55952b550f0716bda22e10f1ae (diff) | |
download | linux-4739f2328661d070f93f9bcc8afb2a82706c826d.tar.xz |
cifs: smbd: take an array of reqeusts when sending upper layer data
To support compounding, __smb_send_rqst() now sends an array of requests to
the transport layer.
Change smbd_send() to take an array of requests, and send them in as few
packets as possible.
Signed-off-by: Long Li <longli@microsoft.com>
Signed-off-by: Steve French <stfrench@microsoft.com>
CC: Stable <stable@vger.kernel.org>
Diffstat (limited to 'fs')
-rw-r--r-- | fs/cifs/smbdirect.c | 55 | ||||
-rw-r--r-- | fs/cifs/smbdirect.h | 5 | ||||
-rw-r--r-- | fs/cifs/transport.c | 2 |
3 files changed, 32 insertions, 30 deletions
diff --git a/fs/cifs/smbdirect.c b/fs/cifs/smbdirect.c index 05b05e78f31b..251ef1223206 100644 --- a/fs/cifs/smbdirect.c +++ b/fs/cifs/smbdirect.c @@ -2072,7 +2072,8 @@ out: * rqst: the data to write * return value: 0 if successfully write, otherwise error code */ -int smbd_send(struct TCP_Server_Info *server, struct smb_rqst *rqst) +int smbd_send(struct TCP_Server_Info *server, + int num_rqst, struct smb_rqst *rqst_array) { struct smbd_connection *info = server->smbd_conn; struct kvec vec; @@ -2084,6 +2085,8 @@ int smbd_send(struct TCP_Server_Info *server, struct smb_rqst *rqst) info->max_send_size - sizeof(struct smbd_data_transfer); struct kvec *iov; int rc; + struct smb_rqst *rqst; + int rqst_idx; if (info->transport_status != SMBD_CONNECTED) { rc = -EAGAIN; @@ -2091,46 +2094,40 @@ int smbd_send(struct TCP_Server_Info *server, struct smb_rqst *rqst) } /* - * Skip the RFC1002 length defined in MS-SMB2 section 2.1 - * It is used only for TCP transport in the iov[0] - * In future we may want to add a transport layer under protocol - * layer so this will only be issued to TCP transport - */ - - if (rqst->rq_iov[0].iov_len != 4) { - log_write(ERR, "expected the pdu length in 1st iov, but got %zu\n", rqst->rq_iov[0].iov_len); - return -EINVAL; - } - - /* * Add in the page array if there is one. The caller needs to set * rq_tailsz to PAGE_SIZE when the buffer has multiple pages and * ends at page boundary */ - buflen = smb_rqst_len(server, rqst); + remaining_data_length = 0; + for (i = 0; i < num_rqst; i++) + remaining_data_length += smb_rqst_len(server, &rqst_array[i]); - if (buflen + sizeof(struct smbd_data_transfer) > + if (remaining_data_length + sizeof(struct smbd_data_transfer) > info->max_fragmented_send_size) { log_write(ERR, "payload size %d > max size %d\n", - buflen, info->max_fragmented_send_size); + remaining_data_length, info->max_fragmented_send_size); rc = -EINVAL; goto done; } - iov = &rqst->rq_iov[1]; + rqst_idx = 0; + +next_rqst: + rqst = &rqst_array[rqst_idx]; + iov = rqst->rq_iov; - cifs_dbg(FYI, "Sending smb (RDMA): smb_len=%u\n", buflen); - for (i = 0; i < rqst->rq_nvec-1; i++) + cifs_dbg(FYI, "Sending smb (RDMA): idx=%d smb_len=%lu\n", + rqst_idx, smb_rqst_len(server, rqst)); + for (i = 0; i < rqst->rq_nvec; i++) dump_smb(iov[i].iov_base, iov[i].iov_len); - remaining_data_length = buflen; - log_write(INFO, "rqst->rq_nvec=%d rqst->rq_npages=%d rq_pagesz=%d " - "rq_tailsz=%d buflen=%d\n", - rqst->rq_nvec, rqst->rq_npages, rqst->rq_pagesz, - rqst->rq_tailsz, buflen); + log_write(INFO, "rqst_idx=%d nvec=%d rqst->rq_npages=%d rq_pagesz=%d " + "rq_tailsz=%d buflen=%lu\n", + rqst_idx, rqst->rq_nvec, rqst->rq_npages, rqst->rq_pagesz, + rqst->rq_tailsz, smb_rqst_len(server, rqst)); - start = i = iov[0].iov_len ? 0 : 1; + start = i = 0; buflen = 0; while (true) { buflen += iov[i].iov_len; @@ -2178,14 +2175,14 @@ int smbd_send(struct TCP_Server_Info *server, struct smb_rqst *rqst) goto done; } i++; - if (i == rqst->rq_nvec-1) + if (i == rqst->rq_nvec) break; } start = i; buflen = 0; } else { i++; - if (i == rqst->rq_nvec-1) { + if (i == rqst->rq_nvec) { /* send out all remaining vecs */ remaining_data_length -= buflen; log_write(INFO, @@ -2229,6 +2226,10 @@ int smbd_send(struct TCP_Server_Info *server, struct smb_rqst *rqst) } } + rqst_idx++; + if (rqst_idx < num_rqst) + goto next_rqst; + done: /* * As an optimization, we don't wait for individual I/O to finish diff --git a/fs/cifs/smbdirect.h b/fs/cifs/smbdirect.h index a3c7b3d0561e..f6241b8bce5f 100644 --- a/fs/cifs/smbdirect.h +++ b/fs/cifs/smbdirect.h @@ -284,7 +284,8 @@ void smbd_destroy(struct TCP_Server_Info *server); /* Interface for carrying upper layer I/O through send/recv */ int smbd_recv(struct smbd_connection *info, struct msghdr *msg); -int smbd_send(struct TCP_Server_Info *server, struct smb_rqst *rqst); +int smbd_send(struct TCP_Server_Info *server, + int num_rqst, struct smb_rqst *rqst); enum mr_state { MR_READY, @@ -324,7 +325,7 @@ static inline void *smbd_get_connection( static inline int smbd_reconnect(struct TCP_Server_Info *server) {return -1; } static inline void smbd_destroy(struct TCP_Server_Info *server) {} static inline int smbd_recv(struct smbd_connection *info, struct msghdr *msg) {return -1; } -static inline int smbd_send(struct TCP_Server_Info *server, struct smb_rqst *rqst) {return -1; } +static inline int smbd_send(struct TCP_Server_Info *server, int num_rqst, struct smb_rqst *rqst) {return -1; } #endif #endif diff --git a/fs/cifs/transport.c b/fs/cifs/transport.c index 65d9f3e2eb88..5c59c498f56a 100644 --- a/fs/cifs/transport.c +++ b/fs/cifs/transport.c @@ -319,7 +319,7 @@ __smb_send_rqst(struct TCP_Server_Info *server, int num_rqst, __be32 rfc1002_marker; if (cifs_rdma_enabled(server) && server->smbd_conn) { - rc = smbd_send(server, rqst); + rc = smbd_send(server, num_rqst, rqst); goto smbd_done; } |