diff options
Diffstat (limited to 'fs/afs/rxrpc.c')
-rw-r--r-- | fs/afs/rxrpc.c | 37 |
1 files changed, 15 insertions, 22 deletions
diff --git a/fs/afs/rxrpc.c b/fs/afs/rxrpc.c index 9f2a3bb56ec6..886416ea1d96 100644 --- a/fs/afs/rxrpc.c +++ b/fs/afs/rxrpc.c @@ -149,7 +149,8 @@ static struct afs_call *afs_alloc_call(struct afs_net *net, call->net = net; call->debug_id = atomic_inc_return(&rxrpc_debug_id); refcount_set(&call->ref, 1); - INIT_WORK(&call->async_work, afs_process_async_call); + INIT_WORK(&call->async_work, type->async_rx ?: afs_process_async_call); + INIT_WORK(&call->work, call->type->work); INIT_WORK(&call->free_work, afs_deferred_free_worker); init_waitqueue_head(&call->waitq); spin_lock_init(&call->state_lock); @@ -235,27 +236,12 @@ void afs_deferred_put_call(struct afs_call *call) schedule_work(&call->free_work); } -static struct afs_call *afs_get_call(struct afs_call *call, - enum afs_call_trace why) -{ - int r; - - __refcount_inc(&call->ref, &r); - - trace_afs_call(call->debug_id, why, r + 1, - atomic_read(&call->net->nr_outstanding_calls), - __builtin_return_address(0)); - return call; -} - /* * Queue the call for actual work. */ static void afs_queue_call_work(struct afs_call *call) { if (call->type->work) { - INIT_WORK(&call->work, call->type->work); - afs_get_call(call, afs_call_trace_work); if (!queue_work(afs_wq, &call->work)) afs_put_call(call); @@ -430,11 +416,16 @@ void afs_make_call(struct afs_call *call, gfp_t gfp) return; error_do_abort: - if (ret != -ECONNABORTED) { + if (ret != -ECONNABORTED) rxrpc_kernel_abort_call(call->net->socket, rxcall, RX_USER_ABORT, ret, afs_abort_send_data_error); - } else { + if (call->async) { + afs_see_call(call, afs_call_trace_async_abort); + return; + } + + if (ret == -ECONNABORTED) { len = 0; iov_iter_kvec(&msg.msg_iter, ITER_DEST, NULL, 0, 0); rxrpc_kernel_recv_data(call->net->socket, rxcall, @@ -445,8 +436,10 @@ error_do_abort: call->error = ret; trace_afs_call_done(call); error_kill_call: - if (call->type->done) - call->type->done(call); + if (call->async) + afs_see_call(call, afs_call_trace_async_kill); + if (call->type->immediate_cancel) + call->type->immediate_cancel(call); /* We need to dispose of the extra ref we grabbed for an async call. * The call, however, might be queued on afs_async_calls and we need to @@ -501,7 +494,7 @@ static void afs_log_error(struct afs_call *call, s32 remote_abort) /* * deliver messages to a call */ -static void afs_deliver_to_call(struct afs_call *call) +void afs_deliver_to_call(struct afs_call *call) { enum afs_call_state state; size_t len; @@ -602,7 +595,6 @@ local_abort: abort_code = 0; call_complete: afs_set_call_complete(call, ret, remote_abort); - state = AFS_CALL_COMPLETE; goto done; } @@ -803,6 +795,7 @@ static int afs_deliver_cm_op_id(struct afs_call *call) return -ENOTSUPP; trace_afs_cb_call(call); + call->work.func = call->type->work; /* pass responsibility for the remainer of this message off to the * cache manager op */ |