summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorJens Axboe <axboe@kernel.dk>2020-01-08 21:01:46 +0300
committerJens Axboe <axboe@kernel.dk>2020-01-21 03:04:04 +0300
commit69b3e546139a21b3046b6bf0cb79d5e8c9a3fa75 (patch)
tree4b68c9c19e9036685ef44aded640ae350b012f7c
parentc150368b496837cb207712e78f903ccfd7633b93 (diff)
downloadlinux-69b3e546139a21b3046b6bf0cb79d5e8c9a3fa75.tar.xz
io_uring: change io_ring_ctx bool fields into bit fields
In preparation for adding another one, which would make us spill into another long (and hence bump the size of the ctx), change them to bit fields. Signed-off-by: Jens Axboe <axboe@kernel.dk>
-rw-r--r--fs/io_uring.c14
1 files changed, 7 insertions, 7 deletions
diff --git a/fs/io_uring.c b/fs/io_uring.c
index 2c036972930f..42bf83b3fbd5 100644
--- a/fs/io_uring.c
+++ b/fs/io_uring.c
@@ -202,10 +202,10 @@ struct io_ring_ctx {
struct {
unsigned int flags;
- bool compat;
- bool account_mem;
- bool cq_overflow_flushed;
- bool drain_next;
+ int compat: 1;
+ int account_mem: 1;
+ int cq_overflow_flushed: 1;
+ int drain_next: 1;
/*
* Ring buffer of indices into array of io_uring_sqe, which is
@@ -994,7 +994,7 @@ static bool io_cqring_overflow_flush(struct io_ring_ctx *ctx, bool force)
/* if force is set, the ring is going away. always drop after that */
if (force)
- ctx->cq_overflow_flushed = true;
+ ctx->cq_overflow_flushed = 1;
cqe = NULL;
while (!list_empty(&ctx->cq_overflow_list)) {
@@ -4489,9 +4489,9 @@ static void io_queue_sqe(struct io_kiocb *req, const struct io_uring_sqe *sqe)
if (unlikely(req->ctx->drain_next)) {
req->flags |= REQ_F_IO_DRAIN;
- req->ctx->drain_next = false;
+ req->ctx->drain_next = 0;
}
- req->ctx->drain_next = (req->flags & REQ_F_DRAIN_LINK);
+ req->ctx->drain_next = (req->flags & REQ_F_DRAIN_LINK) != 0;
ret = io_req_defer(req, sqe);
if (ret) {