summaryrefslogtreecommitdiff
path: root/fs
diff options
context:
space:
mode:
authorChristoph Hellwig <hch@lst.de>2021-10-12 14:12:19 +0300
committerJens Axboe <axboe@kernel.dk>2021-10-18 15:17:36 +0300
commitef99b2d37666b7a600baab9e1c4944436652b0a2 (patch)
tree81457d0c9620f8c7311bf76d532670fa5d53074f /fs
parent28a1ae6b9daba6ac65700eeb38479bd6fadec089 (diff)
downloadlinux-ef99b2d37666b7a600baab9e1c4944436652b0a2.tar.xz
block: replace the spin argument to blk_iopoll with a flags argument
Switch the boolean spin argument to blk_poll to passing a set of flags instead. This will allow to control polling behavior in a more fine grained way. Signed-off-by: Christoph Hellwig <hch@lst.de> Tested-by: Mark Wunderlich <mark.wunderlich@intel.com> Link: https://lore.kernel.org/r/20211012111226.760968-10-hch@lst.de [axboe: adapt to changed io_uring iopoll] Signed-off-by: Jens Axboe <axboe@kernel.dk>
Diffstat (limited to 'fs')
-rw-r--r--fs/io_uring.c9
-rw-r--r--fs/iomap/direct-io.c6
2 files changed, 8 insertions, 7 deletions
diff --git a/fs/io_uring.c b/fs/io_uring.c
index d2e86788c872..541fec2bd49a 100644
--- a/fs/io_uring.c
+++ b/fs/io_uring.c
@@ -2457,14 +2457,15 @@ static int io_do_iopoll(struct io_ring_ctx *ctx, unsigned int *nr_events,
long min)
{
struct io_kiocb *req, *tmp;
+ unsigned int poll_flags = 0;
LIST_HEAD(done);
- bool spin;
/*
* Only spin for completions if we don't have multiple devices hanging
* off our complete list, and we're under the requested amount.
*/
- spin = !ctx->poll_multi_queue && *nr_events < min;
+ if (ctx->poll_multi_queue || *nr_events >= min)
+ poll_flags |= BLK_POLL_ONESHOT;
list_for_each_entry_safe(req, tmp, &ctx->iopoll_list, inflight_entry) {
struct kiocb *kiocb = &req->rw.kiocb;
@@ -2482,11 +2483,11 @@ static int io_do_iopoll(struct io_ring_ctx *ctx, unsigned int *nr_events,
if (!list_empty(&done))
break;
- ret = kiocb->ki_filp->f_op->iopoll(kiocb, spin);
+ ret = kiocb->ki_filp->f_op->iopoll(kiocb, poll_flags);
if (unlikely(ret < 0))
return ret;
else if (ret)
- spin = false;
+ poll_flags |= BLK_POLL_ONESHOT;
/* iopoll may have completed current req */
if (READ_ONCE(req->iopoll_completed))
diff --git a/fs/iomap/direct-io.c b/fs/iomap/direct-io.c
index 560ae967f70e..236aba256cd1 100644
--- a/fs/iomap/direct-io.c
+++ b/fs/iomap/direct-io.c
@@ -49,13 +49,13 @@ struct iomap_dio {
};
};
-int iomap_dio_iopoll(struct kiocb *kiocb, bool spin)
+int iomap_dio_iopoll(struct kiocb *kiocb, unsigned int flags)
{
struct request_queue *q = READ_ONCE(kiocb->private);
if (!q)
return 0;
- return blk_poll(q, READ_ONCE(kiocb->ki_cookie), spin);
+ return blk_poll(q, READ_ONCE(kiocb->ki_cookie), flags);
}
EXPORT_SYMBOL_GPL(iomap_dio_iopoll);
@@ -642,7 +642,7 @@ __iomap_dio_rw(struct kiocb *iocb, struct iov_iter *iter,
if (!(iocb->ki_flags & IOCB_HIPRI) ||
!dio->submit.last_queue ||
!blk_poll(dio->submit.last_queue,
- dio->submit.cookie, true))
+ dio->submit.cookie, 0))
blk_io_schedule();
}
__set_current_state(TASK_RUNNING);