diff options
author | J. Bruce Fields <bfields@redhat.com> | 2012-08-14 01:03:00 +0400 |
---|---|---|
committer | J. Bruce Fields <bfields@redhat.com> | 2012-08-21 22:08:40 +0400 |
commit | 719f8bcc883e7992615f4d5625922e24995e2d98 (patch) | |
tree | 537ba2ab2a43abad53caa83825b8efc897fd1df6 /net/sunrpc/svc_xprt.c | |
parent | 21179d81f1de37c93435dce10d2a4378c370ecca (diff) | |
download | linux-719f8bcc883e7992615f4d5625922e24995e2d98.tar.xz |
svcrpc: fix xpt_list traversal locking on shutdown
Server threads are not running at this point, but svc_age_temp_xprts
still may be, so we need this locking.
Signed-off-by: J. Bruce Fields <bfields@redhat.com>
Diffstat (limited to 'net/sunrpc/svc_xprt.c')
-rw-r--r-- | net/sunrpc/svc_xprt.c | 24 |
1 files changed, 15 insertions, 9 deletions
diff --git a/net/sunrpc/svc_xprt.c b/net/sunrpc/svc_xprt.c index bac973a31367..e1810b947dea 100644 --- a/net/sunrpc/svc_xprt.c +++ b/net/sunrpc/svc_xprt.c @@ -917,16 +917,18 @@ void svc_close_xprt(struct svc_xprt *xprt) } EXPORT_SYMBOL_GPL(svc_close_xprt); -static void svc_close_list(struct list_head *xprt_list, struct net *net) +static void svc_close_list(struct svc_serv *serv, struct list_head *xprt_list, struct net *net) { struct svc_xprt *xprt; + spin_lock(&serv->sv_lock); list_for_each_entry(xprt, xprt_list, xpt_list) { if (xprt->xpt_net != net) continue; set_bit(XPT_CLOSE, &xprt->xpt_flags); set_bit(XPT_BUSY, &xprt->xpt_flags); } + spin_unlock(&serv->sv_lock); } static void svc_clear_pools(struct svc_serv *serv, struct net *net) @@ -949,24 +951,28 @@ static void svc_clear_pools(struct svc_serv *serv, struct net *net) } } -static void svc_clear_list(struct list_head *xprt_list, struct net *net) +static void svc_clear_list(struct svc_serv *serv, struct list_head *xprt_list, struct net *net) { struct svc_xprt *xprt; struct svc_xprt *tmp; + LIST_HEAD(victims); + spin_lock(&serv->sv_lock); list_for_each_entry_safe(xprt, tmp, xprt_list, xpt_list) { if (xprt->xpt_net != net) continue; - svc_delete_xprt(xprt); + list_move(&xprt->xpt_list, &victims); } - list_for_each_entry(xprt, xprt_list, xpt_list) - BUG_ON(xprt->xpt_net == net); + spin_unlock(&serv->sv_lock); + + list_for_each_entry_safe(xprt, tmp, &victims, xpt_list) + svc_delete_xprt(xprt); } void svc_close_net(struct svc_serv *serv, struct net *net) { - svc_close_list(&serv->sv_tempsocks, net); - svc_close_list(&serv->sv_permsocks, net); + svc_close_list(serv, &serv->sv_tempsocks, net); + svc_close_list(serv, &serv->sv_permsocks, net); svc_clear_pools(serv, net); /* @@ -974,8 +980,8 @@ void svc_close_net(struct svc_serv *serv, struct net *net) * svc_xprt_enqueue will not add new entries without taking the * sp_lock and checking XPT_BUSY. */ - svc_clear_list(&serv->sv_tempsocks, net); - svc_clear_list(&serv->sv_permsocks, net); + svc_clear_list(serv, &serv->sv_tempsocks, net); + svc_clear_list(serv, &serv->sv_permsocks, net); } /* |