diff options
author | Pavel Begunkov <asml.silence@gmail.com> | 2021-08-27 13:55:01 +0300 |
---|---|---|
committer | Jens Axboe <axboe@kernel.dk> | 2021-08-27 16:29:41 +0300 |
commit | 9a10867ae54e02a0f204d2eebea5a446fb7a86f9 (patch) | |
tree | 6e89fed58538efb603f1edd294cbb25d850e6ea9 /fs | |
parent | a8295b982c46d4a7c259a4cdd58a2681929068a9 (diff) | |
download | linux-9a10867ae54e02a0f204d2eebea5a446fb7a86f9.tar.xz |
io_uring: add task-refs-get helper
As we have a more complicated task referencing, which apart from normal
task references includes taking tctx->inflight and caching all that, it
would be a good idea to have all that isolated in helpers.
Signed-off-by: Pavel Begunkov <asml.silence@gmail.com>
Link: https://lore.kernel.org/r/d9114d037f1c195897aa13f38a496078eca2afdb.1630023531.git.asml.silence@gmail.com
Signed-off-by: Jens Axboe <axboe@kernel.dk>
Diffstat (limited to 'fs')
-rw-r--r-- | fs/io_uring.c | 30 |
1 files changed, 19 insertions, 11 deletions
diff --git a/fs/io_uring.c b/fs/io_uring.c index 1341b714ed12..5059049da242 100644 --- a/fs/io_uring.c +++ b/fs/io_uring.c @@ -1670,6 +1670,24 @@ static inline void io_put_task(struct task_struct *task, int nr) } } +static void io_task_refs_refill(struct io_uring_task *tctx) +{ + unsigned int refill = -tctx->cached_refs + IO_TCTX_REFS_CACHE_NR; + + percpu_counter_add(&tctx->inflight, refill); + refcount_add(refill, ¤t->usage); + tctx->cached_refs += refill; +} + +static inline void io_get_task_refs(int nr) +{ + struct io_uring_task *tctx = current->io_uring; + + tctx->cached_refs -= nr; + if (unlikely(tctx->cached_refs < 0)) + io_task_refs_refill(tctx); +} + static bool io_cqring_event_overflow(struct io_ring_ctx *ctx, u64 user_data, long res, unsigned int cflags) { @@ -6890,25 +6908,15 @@ static const struct io_uring_sqe *io_get_sqe(struct io_ring_ctx *ctx) static int io_submit_sqes(struct io_ring_ctx *ctx, unsigned int nr) __must_hold(&ctx->uring_lock) { - struct io_uring_task *tctx; int submitted = 0; /* make sure SQ entry isn't read before tail */ nr = min3(nr, ctx->sq_entries, io_sqring_entries(ctx)); if (!percpu_ref_tryget_many(&ctx->refs, nr)) return -EAGAIN; + io_get_task_refs(nr); - tctx = current->io_uring; - tctx->cached_refs -= nr; - if (unlikely(tctx->cached_refs < 0)) { - unsigned int refill = -tctx->cached_refs + IO_TCTX_REFS_CACHE_NR; - - percpu_counter_add(&tctx->inflight, refill); - refcount_add(refill, ¤t->usage); - tctx->cached_refs += refill; - } io_submit_state_start(&ctx->submit_state, nr); - while (submitted < nr) { const struct io_uring_sqe *sqe; struct io_kiocb *req; |