summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--io_uring/io_uring.c16
1 files changed, 14 insertions, 2 deletions
diff --git a/io_uring/io_uring.c b/io_uring/io_uring.c
index 1016cf13a02a..445afda927f4 100644
--- a/io_uring/io_uring.c
+++ b/io_uring/io_uring.c
@@ -486,6 +486,7 @@ struct io_poll_iocb {
struct file *file;
struct wait_queue_head *head;
__poll_t events;
+ int retries;
struct wait_queue_entry wait;
};
@@ -5749,6 +5750,14 @@ enum {
IO_APOLL_READY
};
+/*
+ * We can't reliably detect loops in repeated poll triggers and issue
+ * subsequently failing. But rather than fail these immediately, allow a
+ * certain amount of retries before we give up. Given that this condition
+ * should _rarely_ trigger even once, we should be fine with a larger value.
+ */
+#define APOLL_MAX_RETRY 128
+
static int io_arm_poll_handler(struct io_kiocb *req)
{
const struct io_op_def *def = &io_op_defs[req->opcode];
@@ -5760,8 +5769,6 @@ static int io_arm_poll_handler(struct io_kiocb *req)
if (!req->file || !file_can_poll(req->file))
return IO_APOLL_ABORTED;
- if ((req->flags & (REQ_F_POLLED|REQ_F_PARTIAL_IO)) == REQ_F_POLLED)
- return IO_APOLL_ABORTED;
if (!def->pollin && !def->pollout)
return IO_APOLL_ABORTED;
@@ -5779,8 +5786,13 @@ static int io_arm_poll_handler(struct io_kiocb *req)
if (req->flags & REQ_F_POLLED) {
apoll = req->apoll;
kfree(apoll->double_poll);
+ if (unlikely(!--apoll->poll.retries)) {
+ apoll->double_poll = NULL;
+ return IO_APOLL_ABORTED;
+ }
} else {
apoll = kmalloc(sizeof(*apoll), GFP_ATOMIC);
+ apoll->poll.retries = APOLL_MAX_RETRY;
}
if (unlikely(!apoll))
return IO_APOLL_ABORTED;