diff options
Diffstat (limited to 'net/sunrpc')
-rw-r--r-- | net/sunrpc/Kconfig | 2 | ||||
-rw-r--r-- | net/sunrpc/sched.c | 33 | ||||
-rw-r--r-- | net/sunrpc/xprt.c | 2 | ||||
-rw-r--r-- | net/sunrpc/xprtsock.c | 23 |
4 files changed, 40 insertions, 20 deletions
diff --git a/net/sunrpc/Kconfig b/net/sunrpc/Kconfig index dcef600d0bf5..5592883e1e4a 100644 --- a/net/sunrpc/Kconfig +++ b/net/sunrpc/Kconfig @@ -6,7 +6,7 @@ config SUNRPC_GSS config SUNRPC_XPRT_RDMA tristate - depends on SUNRPC && INFINIBAND && EXPERIMENTAL + depends on SUNRPC && INFINIBAND && INFINIBAND_ADDR_TRANS && EXPERIMENTAL default SUNRPC && INFINIBAND help This option allows the NFS client and server to support diff --git a/net/sunrpc/sched.c b/net/sunrpc/sched.c index 385f427bedad..ff50a0546865 100644 --- a/net/sunrpc/sched.c +++ b/net/sunrpc/sched.c @@ -293,11 +293,6 @@ static void rpc_make_runnable(struct rpc_task *task) rpc_clear_queued(task); if (rpc_test_and_set_running(task)) return; - /* We might have raced */ - if (RPC_IS_QUEUED(task)) { - rpc_clear_running(task); - return; - } if (RPC_IS_ASYNC(task)) { int status; @@ -607,7 +602,9 @@ void rpc_release_calldata(const struct rpc_call_ops *ops, void *calldata) */ static void __rpc_execute(struct rpc_task *task) { - int status = 0; + struct rpc_wait_queue *queue; + int task_is_async = RPC_IS_ASYNC(task); + int status = 0; dprintk("RPC: %5u __rpc_execute flags=0x%x\n", task->tk_pid, task->tk_flags); @@ -647,15 +644,25 @@ static void __rpc_execute(struct rpc_task *task) */ if (!RPC_IS_QUEUED(task)) continue; - rpc_clear_running(task); - if (RPC_IS_ASYNC(task)) { - /* Careful! we may have raced... */ - if (RPC_IS_QUEUED(task)) - return; - if (rpc_test_and_set_running(task)) - return; + /* + * The queue->lock protects against races with + * rpc_make_runnable(). + * + * Note that once we clear RPC_TASK_RUNNING on an asynchronous + * rpc_task, rpc_make_runnable() can assign it to a + * different workqueue. We therefore cannot assume that the + * rpc_task pointer may still be dereferenced. + */ + queue = task->tk_waitqueue; + spin_lock_bh(&queue->lock); + if (!RPC_IS_QUEUED(task)) { + spin_unlock_bh(&queue->lock); continue; } + rpc_clear_running(task); + spin_unlock_bh(&queue->lock); + if (task_is_async) + return; /* sync task: sleep here */ dprintk("RPC: %5u sync task going to sleep\n", task->tk_pid); diff --git a/net/sunrpc/xprt.c b/net/sunrpc/xprt.c index 29e401bb612e..62098d101a1f 100644 --- a/net/sunrpc/xprt.c +++ b/net/sunrpc/xprt.c @@ -663,7 +663,7 @@ void xprt_connect(struct rpc_task *task) xprt, (xprt_connected(xprt) ? "is" : "is not")); if (!xprt_bound(xprt)) { - task->tk_status = -EIO; + task->tk_status = -EAGAIN; return; } if (!xprt_lock_write(xprt, task)) diff --git a/net/sunrpc/xprtsock.c b/net/sunrpc/xprtsock.c index 5cbb404c4cdf..29c71e645b27 100644 --- a/net/sunrpc/xprtsock.c +++ b/net/sunrpc/xprtsock.c @@ -467,7 +467,7 @@ static int xs_sendpages(struct socket *sock, struct sockaddr *addr, int addrlen, int err, sent = 0; if (unlikely(!sock)) - return -ENOTCONN; + return -ENOTSOCK; clear_bit(SOCK_ASYNC_NOSPACE, &sock->flags); if (base != 0) { @@ -577,6 +577,8 @@ static int xs_udp_send_request(struct rpc_task *task) req->rq_svec->iov_base, req->rq_svec->iov_len); + if (!xprt_bound(xprt)) + return -ENOTCONN; status = xs_sendpages(transport->sock, xs_addr(xprt), xprt->addrlen, xdr, @@ -594,6 +596,10 @@ static int xs_udp_send_request(struct rpc_task *task) } switch (status) { + case -ENOTSOCK: + status = -ENOTCONN; + /* Should we call xs_close() here? */ + break; case -EAGAIN: xs_nospace(task); break; @@ -693,6 +699,10 @@ static int xs_tcp_send_request(struct rpc_task *task) } switch (status) { + case -ENOTSOCK: + status = -ENOTCONN; + /* Should we call xs_close() here? */ + break; case -EAGAIN: xs_nospace(task); break; @@ -1523,7 +1533,7 @@ static void xs_udp_connect_worker4(struct work_struct *work) struct socket *sock = transport->sock; int err, status = -EIO; - if (xprt->shutdown || !xprt_bound(xprt)) + if (xprt->shutdown) goto out; /* Start by resetting any existing state */ @@ -1564,7 +1574,7 @@ static void xs_udp_connect_worker6(struct work_struct *work) struct socket *sock = transport->sock; int err, status = -EIO; - if (xprt->shutdown || !xprt_bound(xprt)) + if (xprt->shutdown) goto out; /* Start by resetting any existing state */ @@ -1648,6 +1658,9 @@ static int xs_tcp_finish_connecting(struct rpc_xprt *xprt, struct socket *sock) write_unlock_bh(&sk->sk_callback_lock); } + if (!xprt_bound(xprt)) + return -ENOTCONN; + /* Tell the socket layer to start connecting... */ xprt->stat.connect_count++; xprt->stat.connect_start = jiffies; @@ -1668,7 +1681,7 @@ static void xs_tcp_connect_worker4(struct work_struct *work) struct socket *sock = transport->sock; int err, status = -EIO; - if (xprt->shutdown || !xprt_bound(xprt)) + if (xprt->shutdown) goto out; if (!sock) { @@ -1728,7 +1741,7 @@ static void xs_tcp_connect_worker6(struct work_struct *work) struct socket *sock = transport->sock; int err, status = -EIO; - if (xprt->shutdown || !xprt_bound(xprt)) + if (xprt->shutdown) goto out; if (!sock) { |