summaryrefslogtreecommitdiff
path: root/block
diff options
context:
space:
mode:
authorJens Axboe <axboe@kernel.dk>2021-10-14 23:39:59 +0300
committerJens Axboe <axboe@kernel.dk>2021-12-04 00:51:29 +0300
commit0a467d0fdd9594fbb449ebc93852533332c528fd (patch)
treefd368b2ce4f835da5d78e1bbd381b38aa98fd078 /block
parentceaa762527f41a431b552bc000de4b626d2d8cb7 (diff)
downloadlinux-0a467d0fdd9594fbb449ebc93852533332c528fd.tar.xz
block: switch to atomic_t for request references
refcount_t is not as expensive as it used to be, but it's still more expensive than the io_uring method of using atomic_t and just checking for potential over/underflow. This borrows that same implementation, which in turn is based on the mm implementation from Linus. Reviewed-by: Keith Busch <kbusch@kernel.org> Signed-off-by: Jens Axboe <axboe@kernel.dk>
Diffstat (limited to 'block')
-rw-r--r--block/blk-flush.c4
-rw-r--r--block/blk-mq-tag.c2
-rw-r--r--block/blk-mq.c12
-rw-r--r--block/blk.h31
4 files changed, 40 insertions, 9 deletions
diff --git a/block/blk-flush.c b/block/blk-flush.c
index f78bb39e589e..e4df894189ce 100644
--- a/block/blk-flush.c
+++ b/block/blk-flush.c
@@ -229,7 +229,7 @@ static void flush_end_io(struct request *flush_rq, blk_status_t error)
/* release the tag's ownership to the req cloned from */
spin_lock_irqsave(&fq->mq_flush_lock, flags);
- if (!refcount_dec_and_test(&flush_rq->ref)) {
+ if (!req_ref_put_and_test(flush_rq)) {
fq->rq_status = error;
spin_unlock_irqrestore(&fq->mq_flush_lock, flags);
return;
@@ -349,7 +349,7 @@ static void blk_kick_flush(struct request_queue *q, struct blk_flush_queue *fq,
* and READ flush_rq->end_io
*/
smp_wmb();
- refcount_set(&flush_rq->ref, 1);
+ req_ref_set(flush_rq, 1);
blk_flush_queue_rq(flush_rq, false);
}
diff --git a/block/blk-mq-tag.c b/block/blk-mq-tag.c
index 995336abee33..380e2dd31bfc 100644
--- a/block/blk-mq-tag.c
+++ b/block/blk-mq-tag.c
@@ -228,7 +228,7 @@ static struct request *blk_mq_find_and_get_req(struct blk_mq_tags *tags,
spin_lock_irqsave(&tags->lock, flags);
rq = tags->rqs[bitnr];
- if (!rq || rq->tag != bitnr || !refcount_inc_not_zero(&rq->ref))
+ if (!rq || rq->tag != bitnr || !req_ref_inc_not_zero(rq))
rq = NULL;
spin_unlock_irqrestore(&tags->lock, flags);
return rq;
diff --git a/block/blk-mq.c b/block/blk-mq.c
index fc4520e992b1..8c7cab75229e 100644
--- a/block/blk-mq.c
+++ b/block/blk-mq.c
@@ -394,7 +394,7 @@ static struct request *blk_mq_rq_ctx_init(struct blk_mq_alloc_data *data,
INIT_LIST_HEAD(&rq->queuelist);
/* tag was already set */
WRITE_ONCE(rq->deadline, 0);
- refcount_set(&rq->ref, 1);
+ req_ref_set(rq, 1);
if (rq->rq_flags & RQF_ELV) {
struct elevator_queue *e = data->q->elevator;
@@ -642,7 +642,7 @@ void blk_mq_free_request(struct request *rq)
rq_qos_done(q, rq);
WRITE_ONCE(rq->state, MQ_RQ_IDLE);
- if (refcount_dec_and_test(&rq->ref))
+ if (req_ref_put_and_test(rq))
__blk_mq_free_request(rq);
}
EXPORT_SYMBOL_GPL(blk_mq_free_request);
@@ -938,7 +938,7 @@ void blk_mq_end_request_batch(struct io_comp_batch *iob)
rq_qos_done(rq->q, rq);
WRITE_ONCE(rq->state, MQ_RQ_IDLE);
- if (!refcount_dec_and_test(&rq->ref))
+ if (!req_ref_put_and_test(rq))
continue;
blk_crypto_free_request(rq);
@@ -1401,7 +1401,7 @@ void blk_mq_put_rq_ref(struct request *rq)
{
if (is_flush_rq(rq))
rq->end_io(rq, 0);
- else if (refcount_dec_and_test(&rq->ref))
+ else if (req_ref_put_and_test(rq))
__blk_mq_free_request(rq);
}
@@ -3049,7 +3049,7 @@ static void blk_mq_clear_rq_mapping(struct blk_mq_tags *drv_tags,
unsigned long rq_addr = (unsigned long)rq;
if (rq_addr >= start && rq_addr < end) {
- WARN_ON_ONCE(refcount_read(&rq->ref) != 0);
+ WARN_ON_ONCE(req_ref_read(rq) != 0);
cmpxchg(&drv_tags->rqs[i], rq, NULL);
}
}
@@ -3383,7 +3383,7 @@ static void blk_mq_clear_flush_rq_mapping(struct blk_mq_tags *tags,
if (!tags)
return;
- WARN_ON_ONCE(refcount_read(&flush_rq->ref) != 0);
+ WARN_ON_ONCE(req_ref_read(flush_rq) != 0);
for (i = 0; i < queue_depth; i++)
cmpxchg(&tags->rqs[i], flush_rq, NULL);
diff --git a/block/blk.h b/block/blk.h
index a55d82c3d1c2..24d8b333bb03 100644
--- a/block/blk.h
+++ b/block/blk.h
@@ -461,4 +461,35 @@ static inline bool should_fail_request(struct block_device *part,
}
#endif /* CONFIG_FAIL_MAKE_REQUEST */
+/*
+ * Optimized request reference counting. Ideally we'd make timeouts be more
+ * clever, as that's the only reason we need references at all... But until
+ * this happens, this is faster than using refcount_t. Also see:
+ *
+ * abc54d634334 ("io_uring: switch to atomic_t for io_kiocb reference count")
+ */
+#define req_ref_zero_or_close_to_overflow(req) \
+ ((unsigned int) atomic_read(&(req->ref)) + 127u <= 127u)
+
+static inline bool req_ref_inc_not_zero(struct request *req)
+{
+ return atomic_inc_not_zero(&req->ref);
+}
+
+static inline bool req_ref_put_and_test(struct request *req)
+{
+ WARN_ON_ONCE(req_ref_zero_or_close_to_overflow(req));
+ return atomic_dec_and_test(&req->ref);
+}
+
+static inline void req_ref_set(struct request *req, int value)
+{
+ atomic_set(&req->ref, value);
+}
+
+static inline int req_ref_read(struct request *req)
+{
+ return atomic_read(&req->ref);
+}
+
#endif /* BLK_INTERNAL_H */