diff options
author | Pavel Begunkov <asml.silence@gmail.com> | 2020-03-04 16:14:10 +0300 |
---|---|---|
committer | Jens Axboe <axboe@kernel.dk> | 2020-03-04 21:39:04 +0300 |
commit | 58e3931987377d3f4ec7bbc13e4ea0aab52dc6b0 (patch) | |
tree | 624d986e6422514759b574c5ae3b62a8dc25f8f1 /fs/io-wq.c | |
parent | dc026a73c7221b4d9d146ed0bde69ff578ebe8dc (diff) | |
download | linux-58e3931987377d3f4ec7bbc13e4ea0aab52dc6b0.tar.xz |
io-wq: optimise locking in io_worker_handle_work()
There are 2 optimisations:
- Now, io_worker_handler_work() do io_assign_current_work() twice per
request, and each one adds lock/unlock(worker->lock) pair. The first is
to reset worker->cur_work to NULL, and the second to set a real work
shortly after. If there is a dependant work, set it immediately, that
effectively removes the extra NULL'ing.
- And there is no use in taking wqe->lock for linked works, as they are
not hashed now. Optimise it out.
Signed-off-by: Pavel Begunkov <asml.silence@gmail.com>
Signed-off-by: Jens Axboe <axboe@kernel.dk>
Diffstat (limited to 'fs/io-wq.c')
-rw-r--r-- | fs/io-wq.c | 15 |
1 files changed, 7 insertions, 8 deletions
diff --git a/fs/io-wq.c b/fs/io-wq.c index e438dc4d7cb3..473af080470a 100644 --- a/fs/io-wq.c +++ b/fs/io-wq.c @@ -476,7 +476,7 @@ static void io_worker_handle_work(struct io_worker *worker) struct io_wq *wq = wqe->wq; do { - struct io_wq_work *work, *old_work; + struct io_wq_work *work; unsigned hash = -1U; /* @@ -495,12 +495,13 @@ static void io_worker_handle_work(struct io_worker *worker) spin_unlock_irq(&wqe->lock); if (!work) break; + io_assign_current_work(worker, work); /* handle a whole dependent link */ do { - io_assign_current_work(worker, work); - io_impersonate_work(worker, work); + struct io_wq_work *old_work; + io_impersonate_work(worker, work); /* * OK to set IO_WQ_WORK_CANCEL even for uncancellable * work, the worker function will do the right thing. @@ -513,10 +514,8 @@ static void io_worker_handle_work(struct io_worker *worker) old_work = work; work->func(&work); - - spin_lock_irq(&worker->lock); - worker->cur_work = NULL; - spin_unlock_irq(&worker->lock); + work = (old_work == work) ? NULL : work; + io_assign_current_work(worker, work); if (wq->put_work) wq->put_work(old_work); @@ -529,7 +528,7 @@ static void io_worker_handle_work(struct io_worker *worker) /* dependent work is not hashed */ hash = -1U; } - } while (work && work != old_work); + } while (work); spin_lock_irq(&wqe->lock); } while (1); |