summaryrefslogtreecommitdiff
path: root/fs
diff options
context:
space:
mode:
authorMiklos Szeredi <mszeredi@suse.cz>2015-07-01 17:25:59 +0300
committerMiklos Szeredi <mszeredi@suse.cz>2015-07-01 17:25:59 +0300
commit41f982747e8175a4eb5e8d1939bdbb10f435b7f6 (patch)
tree3816ed2b40f2f1d3ace14d7290789d458c7c4ed4 /fs
parentb716d425385ed392adc8e619020c1d77ae5ec1cb (diff)
downloadlinux-41f982747e8175a4eb5e8d1939bdbb10f435b7f6.tar.xz
fuse: rework abort
Splice fc->pending and fc->processing lists into a common kill list while holding fc->lock. By the time we release fc->lock, pending and processing lists are empty and the io list contains only locked requests. Signed-off-by: Miklos Szeredi <mszeredi@suse.cz> Reviewed-by: Ashish Samant <ashish.samant@oracle.com>
Diffstat (limited to 'fs')
-rw-r--r--fs/fuse/dev.c21
1 files changed, 10 insertions, 11 deletions
diff --git a/fs/fuse/dev.c b/fs/fuse/dev.c
index fc3268b65bf8..6cb0b0bc9029 100644
--- a/fs/fuse/dev.c
+++ b/fs/fuse/dev.c
@@ -2101,9 +2101,6 @@ static void end_polls(struct fuse_conn *fc)
* asynchronous request and the tricky deadlock (see
* Documentation/filesystems/fuse.txt).
*
- * Request progression from one list to the next is prevented by fc->connected
- * being false.
- *
* Aborting requests under I/O goes as follows: 1: Separate out unlocked
* requests, they should be finished off immediately. Locked requests will be
* finished after unlock; see unlock_request(). 2: Finish off the unlocked
@@ -2116,7 +2113,8 @@ void fuse_abort_conn(struct fuse_conn *fc)
spin_lock(&fc->lock);
if (fc->connected) {
struct fuse_req *req, *next;
- LIST_HEAD(to_end);
+ LIST_HEAD(to_end1);
+ LIST_HEAD(to_end2);
fc->connected = 0;
fc->blocked = 0;
@@ -2126,19 +2124,20 @@ void fuse_abort_conn(struct fuse_conn *fc)
spin_lock(&req->waitq.lock);
set_bit(FR_ABORTED, &req->flags);
if (!test_bit(FR_LOCKED, &req->flags))
- list_move(&req->list, &to_end);
+ list_move(&req->list, &to_end1);
spin_unlock(&req->waitq.lock);
}
- while (!list_empty(&to_end)) {
- req = list_first_entry(&to_end, struct fuse_req, list);
+ fc->max_background = UINT_MAX;
+ flush_bg_queue(fc);
+ list_splice_init(&fc->pending, &to_end2);
+ list_splice_init(&fc->processing, &to_end2);
+ while (!list_empty(&to_end1)) {
+ req = list_first_entry(&to_end1, struct fuse_req, list);
__fuse_get_request(req);
request_end(fc, req);
spin_lock(&fc->lock);
}
- fc->max_background = UINT_MAX;
- flush_bg_queue(fc);
- end_requests(fc, &fc->pending);
- end_requests(fc, &fc->processing);
+ end_requests(fc, &to_end2);
while (forget_pending(fc))
kfree(dequeue_forget(fc, 1, NULL));
end_polls(fc);