diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2021-03-16 20:22:50 +0300 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2021-03-16 20:22:50 +0300 |
commit | 4108e101972ce4e25d87fd4806b182505ef22ee8 (patch) | |
tree | 64a5a2cdbd34968e9a5deae044c38eaa35774739 /net/sunrpc | |
parent | 1a4431a5db2bf800c647ee0ed87f2727b8d6c29c (diff) | |
parent | bade4be69a6ea6f38c5894468ede10ee60b6f7a0 (diff) | |
download | linux-4108e101972ce4e25d87fd4806b182505ef22ee8.tar.xz |
Merge tag 'nfsd-5.12-2' of git://git.kernel.org/pub/scm/linux/kernel/git/cel/linux
Pull nfsd fixes from Chuck Lever:
"Miscellaneous NFSD fixes for v5.12-rc"
* tag 'nfsd-5.12-2' of git://git.kernel.org/pub/scm/linux/kernel/git/cel/linux:
svcrdma: Revert "svcrdma: Reduce Receive doorbell rate"
NFSD: fix error handling in NFSv4.0 callbacks
NFSD: fix dest to src mount in inter-server COPY
Revert "nfsd4: a client's own opens needn't prevent delegations"
Revert "nfsd4: remove check_conflicting_opens warning"
rpc: fix NULL dereference on kmalloc failure
sunrpc: fix refcount leak for rpc auth modules
NFSD: Repair misuse of sv_lock in 5.10.16-rt30.
nfsd: don't abort copies early
fs: nfsd: fix kconfig dependency warning for NFSD_V4
svcrdma: disable timeouts on rdma backchannel
nfsd: Don't keep looking up unhashed files in the nfsd file cache
Diffstat (limited to 'net/sunrpc')
-rw-r--r-- | net/sunrpc/auth_gss/svcauth_gss.c | 11 | ||||
-rw-r--r-- | net/sunrpc/svc.c | 6 | ||||
-rw-r--r-- | net/sunrpc/svc_xprt.c | 4 | ||||
-rw-r--r-- | net/sunrpc/xprtrdma/svc_rdma_backchannel.c | 6 | ||||
-rw-r--r-- | net/sunrpc/xprtrdma/svc_rdma_recvfrom.c | 82 |
5 files changed, 55 insertions, 54 deletions
diff --git a/net/sunrpc/auth_gss/svcauth_gss.c b/net/sunrpc/auth_gss/svcauth_gss.c index bd4678db9d76..6dff64374bfe 100644 --- a/net/sunrpc/auth_gss/svcauth_gss.c +++ b/net/sunrpc/auth_gss/svcauth_gss.c @@ -1825,11 +1825,14 @@ static int svcauth_gss_release(struct svc_rqst *rqstp) { struct gss_svc_data *gsd = (struct gss_svc_data *)rqstp->rq_auth_data; - struct rpc_gss_wire_cred *gc = &gsd->clcred; + struct rpc_gss_wire_cred *gc; struct xdr_buf *resbuf = &rqstp->rq_res; int stat = -EINVAL; struct sunrpc_net *sn = net_generic(SVC_NET(rqstp), sunrpc_net_id); + if (!gsd) + goto out; + gc = &gsd->clcred; if (gc->gc_proc != RPC_GSS_PROC_DATA) goto out; /* Release can be called twice, but we only wrap once. */ @@ -1870,10 +1873,10 @@ out_err: if (rqstp->rq_cred.cr_group_info) put_group_info(rqstp->rq_cred.cr_group_info); rqstp->rq_cred.cr_group_info = NULL; - if (gsd->rsci) + if (gsd && gsd->rsci) { cache_put(&gsd->rsci->h, sn->rsc_cache); - gsd->rsci = NULL; - + gsd->rsci = NULL; + } return stat; } diff --git a/net/sunrpc/svc.c b/net/sunrpc/svc.c index 61fb8a18552c..d76dc9d95d16 100644 --- a/net/sunrpc/svc.c +++ b/net/sunrpc/svc.c @@ -1413,7 +1413,7 @@ svc_process_common(struct svc_rqst *rqstp, struct kvec *argv, struct kvec *resv) sendit: if (svc_authorise(rqstp)) - goto close; + goto close_xprt; return 1; /* Caller can now send it */ release_dropit: @@ -1425,6 +1425,8 @@ release_dropit: return 0; close: + svc_authorise(rqstp); +close_xprt: if (rqstp->rq_xprt && test_bit(XPT_TEMP, &rqstp->rq_xprt->xpt_flags)) svc_close_xprt(rqstp->rq_xprt); dprintk("svc: svc_process close\n"); @@ -1433,7 +1435,7 @@ release_dropit: err_short_len: svc_printk(rqstp, "short len %zd, dropping request\n", argv->iov_len); - goto close; + goto close_xprt; err_bad_rpc: serv->sv_stats->rpcbadfmt++; diff --git a/net/sunrpc/svc_xprt.c b/net/sunrpc/svc_xprt.c index dcc50ae54550..3cdd71a8df1e 100644 --- a/net/sunrpc/svc_xprt.c +++ b/net/sunrpc/svc_xprt.c @@ -1060,7 +1060,7 @@ static int svc_close_list(struct svc_serv *serv, struct list_head *xprt_list, st struct svc_xprt *xprt; int ret = 0; - spin_lock(&serv->sv_lock); + spin_lock_bh(&serv->sv_lock); list_for_each_entry(xprt, xprt_list, xpt_list) { if (xprt->xpt_net != net) continue; @@ -1068,7 +1068,7 @@ static int svc_close_list(struct svc_serv *serv, struct list_head *xprt_list, st set_bit(XPT_CLOSE, &xprt->xpt_flags); svc_xprt_enqueue(xprt); } - spin_unlock(&serv->sv_lock); + spin_unlock_bh(&serv->sv_lock); return ret; } diff --git a/net/sunrpc/xprtrdma/svc_rdma_backchannel.c b/net/sunrpc/xprtrdma/svc_rdma_backchannel.c index 4a1edbb4028e..9150df35fb6f 100644 --- a/net/sunrpc/xprtrdma/svc_rdma_backchannel.c +++ b/net/sunrpc/xprtrdma/svc_rdma_backchannel.c @@ -252,9 +252,9 @@ xprt_setup_rdma_bc(struct xprt_create *args) xprt->timeout = &xprt_rdma_bc_timeout; xprt_set_bound(xprt); xprt_set_connected(xprt); - xprt->bind_timeout = RPCRDMA_BIND_TO; - xprt->reestablish_timeout = RPCRDMA_INIT_REEST_TO; - xprt->idle_timeout = RPCRDMA_IDLE_DISC_TO; + xprt->bind_timeout = 0; + xprt->reestablish_timeout = 0; + xprt->idle_timeout = 0; xprt->prot = XPRT_TRANSPORT_BC_RDMA; xprt->ops = &xprt_rdma_bc_procs; diff --git a/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c b/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c index 6d28f23ceb35..7d34290e2ff8 100644 --- a/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c +++ b/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c @@ -266,46 +266,33 @@ void svc_rdma_release_rqst(struct svc_rqst *rqstp) svc_rdma_recv_ctxt_put(rdma, ctxt); } -static bool svc_rdma_refresh_recvs(struct svcxprt_rdma *rdma, - unsigned int wanted, bool temp) +static int __svc_rdma_post_recv(struct svcxprt_rdma *rdma, + struct svc_rdma_recv_ctxt *ctxt) { - const struct ib_recv_wr *bad_wr = NULL; - struct svc_rdma_recv_ctxt *ctxt; - struct ib_recv_wr *recv_chain; int ret; - recv_chain = NULL; - while (wanted--) { - ctxt = svc_rdma_recv_ctxt_get(rdma); - if (!ctxt) - break; - - trace_svcrdma_post_recv(ctxt); - ctxt->rc_temp = temp; - ctxt->rc_recv_wr.next = recv_chain; - recv_chain = &ctxt->rc_recv_wr; - rdma->sc_pending_recvs++; - } - if (!recv_chain) - return false; - - ret = ib_post_recv(rdma->sc_qp, recv_chain, &bad_wr); + trace_svcrdma_post_recv(ctxt); + ret = ib_post_recv(rdma->sc_qp, &ctxt->rc_recv_wr, NULL); if (ret) goto err_post; - return true; + return 0; err_post: - while (bad_wr) { - ctxt = container_of(bad_wr, struct svc_rdma_recv_ctxt, - rc_recv_wr); - bad_wr = bad_wr->next; - svc_rdma_recv_ctxt_put(rdma, ctxt); - } - trace_svcrdma_rq_post_err(rdma, ret); - /* Since we're destroying the xprt, no need to reset - * sc_pending_recvs. */ - return false; + svc_rdma_recv_ctxt_put(rdma, ctxt); + return ret; +} + +static int svc_rdma_post_recv(struct svcxprt_rdma *rdma) +{ + struct svc_rdma_recv_ctxt *ctxt; + + if (test_bit(XPT_CLOSE, &rdma->sc_xprt.xpt_flags)) + return 0; + ctxt = svc_rdma_recv_ctxt_get(rdma); + if (!ctxt) + return -ENOMEM; + return __svc_rdma_post_recv(rdma, ctxt); } /** @@ -316,7 +303,20 @@ err_post: */ bool svc_rdma_post_recvs(struct svcxprt_rdma *rdma) { - return svc_rdma_refresh_recvs(rdma, rdma->sc_max_requests, true); + struct svc_rdma_recv_ctxt *ctxt; + unsigned int i; + int ret; + + for (i = 0; i < rdma->sc_max_requests; i++) { + ctxt = svc_rdma_recv_ctxt_get(rdma); + if (!ctxt) + return false; + ctxt->rc_temp = true; + ret = __svc_rdma_post_recv(rdma, ctxt); + if (ret) + return false; + } + return true; } /** @@ -324,6 +324,8 @@ bool svc_rdma_post_recvs(struct svcxprt_rdma *rdma) * @cq: Completion Queue context * @wc: Work Completion object * + * NB: The svc_xprt/svcxprt_rdma is pinned whenever it's possible that + * the Receive completion handler could be running. */ static void svc_rdma_wc_receive(struct ib_cq *cq, struct ib_wc *wc) { @@ -331,8 +333,6 @@ static void svc_rdma_wc_receive(struct ib_cq *cq, struct ib_wc *wc) struct ib_cqe *cqe = wc->wr_cqe; struct svc_rdma_recv_ctxt *ctxt; - rdma->sc_pending_recvs--; - /* WARNING: Only wc->wr_cqe and wc->status are reliable */ ctxt = container_of(cqe, struct svc_rdma_recv_ctxt, rc_cqe); @@ -340,6 +340,9 @@ static void svc_rdma_wc_receive(struct ib_cq *cq, struct ib_wc *wc) if (wc->status != IB_WC_SUCCESS) goto flushed; + if (svc_rdma_post_recv(rdma)) + goto post_err; + /* All wc fields are now known to be valid */ ctxt->rc_byte_len = wc->byte_len; @@ -350,18 +353,11 @@ static void svc_rdma_wc_receive(struct ib_cq *cq, struct ib_wc *wc) spin_unlock(&rdma->sc_rq_dto_lock); if (!test_bit(RDMAXPRT_CONN_PENDING, &rdma->sc_flags)) svc_xprt_enqueue(&rdma->sc_xprt); - - if (!test_bit(XPT_CLOSE, &rdma->sc_xprt.xpt_flags) && - rdma->sc_pending_recvs < rdma->sc_max_requests) - if (!svc_rdma_refresh_recvs(rdma, RPCRDMA_MAX_RECV_BATCH, - false)) - goto post_err; - return; flushed: - svc_rdma_recv_ctxt_put(rdma, ctxt); post_err: + svc_rdma_recv_ctxt_put(rdma, ctxt); set_bit(XPT_CLOSE, &rdma->sc_xprt.xpt_flags); svc_xprt_enqueue(&rdma->sc_xprt); } |