summaryrefslogtreecommitdiff
path: root/io_uring/uring_cmd.c
diff options
context:
space:
mode:
Diffstat (limited to 'io_uring/uring_cmd.c')
-rw-r--r--io_uring/uring_cmd.c68
1 files changed, 24 insertions, 44 deletions
diff --git a/io_uring/uring_cmd.c b/io_uring/uring_cmd.c
index af842e9b4eb9..e6701b7aa147 100644
--- a/io_uring/uring_cmd.c
+++ b/io_uring/uring_cmd.c
@@ -16,26 +16,15 @@
#include "rsrc.h"
#include "uring_cmd.h"
-static struct uring_cache *io_uring_async_get(struct io_kiocb *req)
-{
- struct io_ring_ctx *ctx = req->ctx;
- struct uring_cache *cache;
-
- cache = io_alloc_cache_get(&ctx->uring_cache);
- if (cache) {
- req->flags |= REQ_F_ASYNC_DATA;
- req->async_data = cache;
- return cache;
- }
- if (!io_alloc_async_data(req))
- return req->async_data;
- return NULL;
-}
-
static void io_req_uring_cleanup(struct io_kiocb *req, unsigned int issue_flags)
{
struct io_uring_cmd *ioucmd = io_kiocb_to_cmd(req, struct io_uring_cmd);
- struct uring_cache *cache = req->async_data;
+ struct io_uring_cmd_data *cache = req->async_data;
+
+ if (cache->op_data) {
+ kfree(cache->op_data);
+ cache->op_data = NULL;
+ }
if (issue_flags & IO_URING_F_UNLOCKED)
return;
@@ -65,9 +54,6 @@ bool io_uring_try_cancel_uring_cmd(struct io_ring_ctx *ctx,
continue;
if (cmd->flags & IORING_URING_CMD_CANCELABLE) {
- /* ->sqe isn't available if no async data */
- if (!req_has_async_data(req))
- cmd->sqe = NULL;
file->f_op->uring_cmd(cmd, IO_URING_F_CANCEL |
IO_URING_F_COMPLETE_DEFER);
ret = true;
@@ -121,7 +107,7 @@ static void io_uring_cmd_work(struct io_kiocb *req, struct io_tw_state *ts)
struct io_uring_cmd *ioucmd = io_kiocb_to_cmd(req, struct io_uring_cmd);
unsigned int flags = IO_URING_F_COMPLETE_DEFER;
- if (current->flags & (PF_EXITING | PF_KTHREAD))
+ if (io_should_terminate_tw())
flags |= IO_URING_F_TASK_DEAD;
/* task_work executor checks the deffered list completion */
@@ -183,20 +169,22 @@ static int io_uring_cmd_prep_setup(struct io_kiocb *req,
const struct io_uring_sqe *sqe)
{
struct io_uring_cmd *ioucmd = io_kiocb_to_cmd(req, struct io_uring_cmd);
- struct uring_cache *cache;
+ struct io_uring_cmd_data *cache;
- cache = io_uring_async_get(req);
- if (unlikely(!cache))
+ cache = io_uring_alloc_async_data(&req->ctx->uring_cache, req);
+ if (!cache)
return -ENOMEM;
-
- if (!(req->flags & REQ_F_FORCE_ASYNC)) {
- /* defer memcpy until we need it */
- ioucmd->sqe = sqe;
- return 0;
- }
-
- memcpy(req->async_data, sqe, uring_sqe_size(req->ctx));
- ioucmd->sqe = req->async_data;
+ cache->op_data = NULL;
+
+ /*
+ * Unconditionally cache the SQE for now - this is only needed for
+ * requests that go async, but prep handlers must ensure that any
+ * sqe data is stable beyond prep. Since uring_cmd is special in
+ * that it doesn't read in per-op data, play it safe and ensure that
+ * any SQE data is stable beyond prep. This can later get relaxed.
+ */
+ memcpy(cache->sqes, sqe, uring_sqe_size(req->ctx));
+ ioucmd->sqe = cache->sqes;
return 0;
}
@@ -259,16 +247,8 @@ int io_uring_cmd(struct io_kiocb *req, unsigned int issue_flags)
}
ret = file->f_op->uring_cmd(ioucmd, issue_flags);
- if (ret == -EAGAIN) {
- struct uring_cache *cache = req->async_data;
-
- if (ioucmd->sqe != (void *) cache)
- memcpy(cache, ioucmd->sqe, uring_sqe_size(req->ctx));
- return -EAGAIN;
- } else if (ret == -EIOCBQUEUED) {
- return -EIOCBQUEUED;
- }
-
+ if (ret == -EAGAIN || ret == -EIOCBQUEUED)
+ return ret;
if (ret < 0)
req_set_fail(req);
io_req_uring_cleanup(req, issue_flags);
@@ -353,7 +333,7 @@ int io_uring_cmd_sock(struct io_uring_cmd *cmd, unsigned int issue_flags)
if (!prot || !prot->ioctl)
return -EOPNOTSUPP;
- switch (cmd->sqe->cmd_op) {
+ switch (cmd->cmd_op) {
case SOCKET_URING_OP_SIOCINQ:
ret = prot->ioctl(sk, SIOCINQ, &arg);
if (ret)