diff options
author | Chuck Lever <chuck.lever@oracle.com> | 2021-01-05 18:15:09 +0300 |
---|---|---|
committer | Chuck Lever <chuck.lever@oracle.com> | 2021-03-22 20:22:13 +0300 |
commit | 82011c80b3ec0e05940a2ee2c76c1df9fd2b1ce8 (patch) | |
tree | 8918ebef358af24940e2b5a292a23046d62c1861 /net/sunrpc/svc_xprt.c | |
parent | 7dcfbd86adc45f6d6b37278efd22530cf80ab474 (diff) | |
download | linux-82011c80b3ec0e05940a2ee2c76c1df9fd2b1ce8.tar.xz |
SUNRPC: Move svc_xprt_received() call sites
Currently, XPT_BUSY is not cleared until xpo_recvfrom returns.
That effectively blocks the receipt and handling of the next RPC
message until the current one has been taken off the transport.
This strict ordering is a requirement for socket transports.
For our kernel RPC/RDMA transport implementation, however, dequeuing
an ingress message is nothing more than a list_del(). The transport
can safely be marked un-busy as soon as that is done.
To keep the changes simpler, this patch just moves the
svc_xprt_received() call site from svc_handle_xprt() into the
transports, so that the actual optimization can be done in a
subsequent patch.
Signed-off-by: Chuck Lever <chuck.lever@oracle.com>
Diffstat (limited to 'net/sunrpc/svc_xprt.c')
-rw-r--r-- | net/sunrpc/svc_xprt.c | 7 |
1 files changed, 4 insertions, 3 deletions
diff --git a/net/sunrpc/svc_xprt.c b/net/sunrpc/svc_xprt.c index 9d1374e82e90..42565f0c7d5a 100644 --- a/net/sunrpc/svc_xprt.c +++ b/net/sunrpc/svc_xprt.c @@ -820,8 +820,10 @@ static int svc_handle_xprt(struct svc_rqst *rqstp, struct svc_xprt *xprt) newxpt->xpt_cred = get_cred(xprt->xpt_cred); svc_add_new_temp_xprt(serv, newxpt); trace_svc_xprt_accept(newxpt, serv->sv_name); - } else + } else { module_put(xprt->xpt_class->xcl_owner); + } + svc_xprt_received(xprt); } else if (svc_xprt_reserve_slot(rqstp, xprt)) { /* XPT_DATA|XPT_DEFERRED case: */ dprintk("svc: server %p, pool %u, transport %p, inuse=%d\n", @@ -836,8 +838,6 @@ static int svc_handle_xprt(struct svc_rqst *rqstp, struct svc_xprt *xprt) rqstp->rq_reserved = serv->sv_max_mesg; atomic_add(rqstp->rq_reserved, &xprt->xpt_reserved); } - /* clear XPT_BUSY: */ - svc_xprt_received(xprt); out: trace_svc_handle_xprt(xprt, len); return len; @@ -1248,6 +1248,7 @@ static noinline int svc_deferred_recv(struct svc_rqst *rqstp) rqstp->rq_xprt_hlen = dr->xprt_hlen; rqstp->rq_daddr = dr->daddr; rqstp->rq_respages = rqstp->rq_pages; + svc_xprt_received(rqstp->rq_xprt); return (dr->argslen<<2) - dr->xprt_hlen; } |