diff options
author | Chuck Lever <chuck.lever@oracle.com> | 2018-05-04 22:35:20 +0300 |
---|---|---|
committer | Anna Schumaker <Anna.Schumaker@Netapp.com> | 2018-05-07 16:20:04 +0300 |
commit | 7c8d9e7c8863905951d4eaa7a8d277150f3a37f7 (patch) | |
tree | 16017a18ac2150272b7b5dd4be6fe1cd5d230aeb /net/sunrpc/xprtrdma/backchannel.c | |
parent | 0e0b854cfb3302b1907e9d3a927469b95710238f (diff) | |
download | linux-7c8d9e7c8863905951d4eaa7a8d277150f3a37f7.tar.xz |
xprtrdma: Move Receive posting to Receive handler
Receive completion and Reply handling are done by a BOUND
workqueue, meaning they run on only one CPU.
Posting receives is currently done in the send_request path, which
on large systems is typically done on a different CPU than the one
handling Receive completions. This results in movement of
Receive-related cachelines between the sending and receiving CPUs.
More importantly, it means that currently Receives are posted while
the transport's write lock is held, which is unnecessary and costly.
Finally, allocation of Receive buffers is performed on-demand in
the Receive completion handler. This helps guarantee that they are
allocated on the same NUMA node as the CPU that handles Receive
completions.
Signed-off-by: Chuck Lever <chuck.lever@oracle.com>
Signed-off-by: Anna Schumaker <Anna.Schumaker@Netapp.com>
Diffstat (limited to 'net/sunrpc/xprtrdma/backchannel.c')
-rw-r--r-- | net/sunrpc/xprtrdma/backchannel.c | 32 |
1 files changed, 6 insertions, 26 deletions
diff --git a/net/sunrpc/xprtrdma/backchannel.c b/net/sunrpc/xprtrdma/backchannel.c index 4034788c9856..c8f1c2b89dad 100644 --- a/net/sunrpc/xprtrdma/backchannel.c +++ b/net/sunrpc/xprtrdma/backchannel.c @@ -71,23 +71,6 @@ out_fail: return -ENOMEM; } -/* Allocate and add receive buffers to the rpcrdma_buffer's - * existing list of rep's. These are released when the - * transport is destroyed. - */ -static int rpcrdma_bc_setup_reps(struct rpcrdma_xprt *r_xprt, - unsigned int count) -{ - int rc = 0; - - while (count--) { - rc = rpcrdma_create_rep(r_xprt); - if (rc) - break; - } - return rc; -} - /** * xprt_rdma_bc_setup - Pre-allocate resources for handling backchannel requests * @xprt: transport associated with these backchannel resources @@ -116,14 +99,6 @@ int xprt_rdma_bc_setup(struct rpc_xprt *xprt, unsigned int reqs) if (rc) goto out_free; - rc = rpcrdma_bc_setup_reps(r_xprt, reqs); - if (rc) - goto out_free; - - rc = rpcrdma_ep_post_extra_recv(r_xprt, reqs); - if (rc) - goto out_free; - r_xprt->rx_buf.rb_bc_srv_max_requests = reqs; request_module("svcrdma"); trace_xprtrdma_cb_setup(r_xprt, reqs); @@ -228,6 +203,7 @@ int xprt_rdma_bc_send_reply(struct rpc_rqst *rqst) if (rc < 0) goto failed_marshal; + rpcrdma_post_recvs(r_xprt, true); if (rpcrdma_ep_post(&r_xprt->rx_ia, &r_xprt->rx_ep, req)) goto drop_connection; return 0; @@ -268,10 +244,14 @@ void xprt_rdma_bc_destroy(struct rpc_xprt *xprt, unsigned int reqs) */ void xprt_rdma_bc_free_rqst(struct rpc_rqst *rqst) { + struct rpcrdma_req *req = rpcr_to_rdmar(rqst); struct rpc_xprt *xprt = rqst->rq_xprt; dprintk("RPC: %s: freeing rqst %p (req %p)\n", - __func__, rqst, rpcr_to_rdmar(rqst)); + __func__, rqst, req); + + rpcrdma_recv_buffer_put(req->rl_reply); + req->rl_reply = NULL; spin_lock_bh(&xprt->bc_pa_lock); list_add_tail(&rqst->rq_bc_pa_list, &xprt->bc_pa_list); |