diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2021-04-29 00:56:09 +0300 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2021-04-29 00:56:09 +0300 |
commit | 625434dafdd97372d15de21972be4b682709e854 (patch) | |
tree | 4cdc02f8732525e17024877ff9fbd9c67d9faf7f /kernel | |
parent | c05a182bf45681c5529a58c71ce5647535b3ae7a (diff) | |
parent | 7b289c38335ec7bebe45ed31137d596c808e23ac (diff) | |
download | linux-625434dafdd97372d15de21972be4b682709e854.tar.xz |
Merge tag 'for-5.13/io_uring-2021-04-27' of git://git.kernel.dk/linux-block
Pull io_uring updates from Jens Axboe:
- Support for multi-shot mode for POLL requests
- More efficient reference counting. This is shamelessly stolen from
the mm side. Even though referencing is mostly single/dual user, the
128 count was retained to keep the code the same. Maybe this
should/could be made generic at some point.
- Removal of the need to have a manager thread for each ring. The
manager threads only job was checking and creating new io-threads as
needed, instead we handle this from the queue path.
- Allow SQPOLL without CAP_SYS_ADMIN or CAP_SYS_NICE. Since 5.12, this
thread is "just" a regular application thread, so no need to restrict
use of it anymore.
- Cleanup of how internal async poll data lifetime is managed.
- Fix for syzbot reported crash on SQPOLL cancelation.
- Make buffer registration more like file registrations, which includes
flexibility in avoiding full set unregistration and re-registration.
- Fix for io-wq affinity setting.
- Be a bit more defensive in task->pf_io_worker setup.
- Various SQPOLL fixes.
- Cleanup of SQPOLL creds handling.
- Improvements to in-flight request tracking.
- File registration cleanups.
- Tons of cleanups and little fixes
* tag 'for-5.13/io_uring-2021-04-27' of git://git.kernel.dk/linux-block: (156 commits)
io_uring: maintain drain logic for multishot poll requests
io_uring: Check current->io_uring in io_uring_cancel_sqpoll
io_uring: fix NULL reg-buffer
io_uring: simplify SQPOLL cancellations
io_uring: fix work_exit sqpoll cancellations
io_uring: Fix uninitialized variable up.resv
io_uring: fix invalid error check after malloc
io_uring: io_sq_thread() no longer needs to reset current->pf_io_worker
kernel: always initialize task->pf_io_worker to NULL
io_uring: update sq_thread_idle after ctx deleted
io_uring: add full-fledged dynamic buffers support
io_uring: implement fixed buffers registration similar to fixed files
io_uring: prepare fixed rw for dynanic buffers
io_uring: keep table of pointers to ubufs
io_uring: add generic rsrc update with tags
io_uring: add IORING_REGISTER_RSRC
io_uring: enumerate dynamic resources
io_uring: add generic path for rsrc update
io_uring: preparation for rsrc tagging
io_uring: decouple CQE filling from requests
...
Diffstat (limited to 'kernel')
-rw-r--r-- | kernel/fork.c | 3 | ||||
-rw-r--r-- | kernel/task_work.c | 35 |
2 files changed, 30 insertions, 8 deletions
diff --git a/kernel/fork.c b/kernel/fork.c index f98de491f013..0f1992d3f80b 100644 --- a/kernel/fork.c +++ b/kernel/fork.c @@ -927,6 +927,7 @@ static struct task_struct *dup_task_struct(struct task_struct *orig, int node) tsk->splice_pipe = NULL; tsk->task_frag.page = NULL; tsk->wake_q.next = NULL; + tsk->pf_io_worker = NULL; account_kernel_stack(tsk, 1); @@ -1941,7 +1942,7 @@ static __latent_entropy struct task_struct *copy_process( recalc_sigpending(); spin_unlock_irq(¤t->sighand->siglock); retval = -ERESTARTNOINTR; - if (signal_pending(current)) + if (task_sigpending(current)) goto fork_out; retval = -ENOMEM; diff --git a/kernel/task_work.c b/kernel/task_work.c index 9cde961875c0..e9316198c64b 100644 --- a/kernel/task_work.c +++ b/kernel/task_work.c @@ -59,18 +59,17 @@ int task_work_add(struct task_struct *task, struct callback_head *work, } /** - * task_work_cancel - cancel a pending work added by task_work_add() + * task_work_cancel_match - cancel a pending work added by task_work_add() * @task: the task which should execute the work - * @func: identifies the work to remove - * - * Find the last queued pending work with ->func == @func and remove - * it from queue. + * @match: match function to call * * RETURNS: * The found work or NULL if not found. */ struct callback_head * -task_work_cancel(struct task_struct *task, task_work_func_t func) +task_work_cancel_match(struct task_struct *task, + bool (*match)(struct callback_head *, void *data), + void *data) { struct callback_head **pprev = &task->task_works; struct callback_head *work; @@ -86,7 +85,7 @@ task_work_cancel(struct task_struct *task, task_work_func_t func) */ raw_spin_lock_irqsave(&task->pi_lock, flags); while ((work = READ_ONCE(*pprev))) { - if (work->func != func) + if (!match(work, data)) pprev = &work->next; else if (cmpxchg(pprev, work, work->next) == work) break; @@ -96,6 +95,28 @@ task_work_cancel(struct task_struct *task, task_work_func_t func) return work; } +static bool task_work_func_match(struct callback_head *cb, void *data) +{ + return cb->func == data; +} + +/** + * task_work_cancel - cancel a pending work added by task_work_add() + * @task: the task which should execute the work + * @func: identifies the work to remove + * + * Find the last queued pending work with ->func == @func and remove + * it from queue. + * + * RETURNS: + * The found work or NULL if not found. + */ +struct callback_head * +task_work_cancel(struct task_struct *task, task_work_func_t func) +{ + return task_work_cancel_match(task, task_work_func_match, func); +} + /** * task_work_run - execute the works added by task_work_add() * |