diff options
author | Ming Lei <ming.lei@redhat.com> | 2021-05-11 18:22:34 +0300 |
---|---|---|
committer | Jens Axboe <axboe@kernel.dk> | 2021-05-24 15:47:22 +0300 |
commit | 2e315dc07df009c3e29d6926871f62a30cfae394 (patch) | |
tree | c3fe336910932031c6114b806a2a68582fadea94 /block/blk-mq-tag.c | |
parent | 84da7acc3ba53af26f15c4b0ada446127b7a7836 (diff) | |
download | linux-2e315dc07df009c3e29d6926871f62a30cfae394.tar.xz |
blk-mq: grab rq->refcount before calling ->fn in blk_mq_tagset_busy_iter
Grab rq->refcount before calling ->fn in blk_mq_tagset_busy_iter(), and
this way will prevent the request from being re-used when ->fn is
running. The approach is same as what we do during handling timeout.
Fix request use-after-free(UAF) related with completion race or queue
releasing:
- If one rq is referred before rq->q is frozen, then queue won't be
frozen before the request is released during iteration.
- If one rq is referred after rq->q is frozen, refcount_inc_not_zero()
will return false, and we won't iterate over this request.
However, still one request UAF not covered: refcount_inc_not_zero() may
read one freed request, and it will be handled in next patch.
Tested-by: John Garry <john.garry@huawei.com>
Reviewed-by: Christoph Hellwig <hch@lst.de>
Reviewed-by: Bart Van Assche <bvanassche@acm.org>
Signed-off-by: Ming Lei <ming.lei@redhat.com>
Link: https://lore.kernel.org/r/20210511152236.763464-3-ming.lei@redhat.com
Signed-off-by: Jens Axboe <axboe@kernel.dk>
Diffstat (limited to 'block/blk-mq-tag.c')
-rw-r--r-- | block/blk-mq-tag.c | 44 |
1 files changed, 33 insertions, 11 deletions
diff --git a/block/blk-mq-tag.c b/block/blk-mq-tag.c index 2a37731e8244..544edf2c56a5 100644 --- a/block/blk-mq-tag.c +++ b/block/blk-mq-tag.c @@ -199,6 +199,16 @@ struct bt_iter_data { bool reserved; }; +static struct request *blk_mq_find_and_get_req(struct blk_mq_tags *tags, + unsigned int bitnr) +{ + struct request *rq = tags->rqs[bitnr]; + + if (!rq || !refcount_inc_not_zero(&rq->ref)) + return NULL; + return rq; +} + static bool bt_iter(struct sbitmap *bitmap, unsigned int bitnr, void *data) { struct bt_iter_data *iter_data = data; @@ -206,18 +216,22 @@ static bool bt_iter(struct sbitmap *bitmap, unsigned int bitnr, void *data) struct blk_mq_tags *tags = hctx->tags; bool reserved = iter_data->reserved; struct request *rq; + bool ret = true; if (!reserved) bitnr += tags->nr_reserved_tags; - rq = tags->rqs[bitnr]; - /* * We can hit rq == NULL here, because the tagging functions * test and set the bit before assigning ->rqs[]. */ - if (rq && rq->q == hctx->queue && rq->mq_hctx == hctx) - return iter_data->fn(hctx, rq, iter_data->data, reserved); - return true; + rq = blk_mq_find_and_get_req(tags, bitnr); + if (!rq) + return true; + + if (rq->q == hctx->queue && rq->mq_hctx == hctx) + ret = iter_data->fn(hctx, rq, iter_data->data, reserved); + blk_mq_put_rq_ref(rq); + return ret; } /** @@ -264,6 +278,8 @@ static bool bt_tags_iter(struct sbitmap *bitmap, unsigned int bitnr, void *data) struct blk_mq_tags *tags = iter_data->tags; bool reserved = iter_data->flags & BT_TAG_ITER_RESERVED; struct request *rq; + bool ret = true; + bool iter_static_rqs = !!(iter_data->flags & BT_TAG_ITER_STATIC_RQS); if (!reserved) bitnr += tags->nr_reserved_tags; @@ -272,16 +288,19 @@ static bool bt_tags_iter(struct sbitmap *bitmap, unsigned int bitnr, void *data) * We can hit rq == NULL here, because the tagging functions * test and set the bit before assigning ->rqs[]. */ - if (iter_data->flags & BT_TAG_ITER_STATIC_RQS) + if (iter_static_rqs) rq = tags->static_rqs[bitnr]; else - rq = tags->rqs[bitnr]; + rq = blk_mq_find_and_get_req(tags, bitnr); if (!rq) return true; - if ((iter_data->flags & BT_TAG_ITER_STARTED) && - !blk_mq_request_started(rq)) - return true; - return iter_data->fn(rq, iter_data->data, reserved); + + if (!(iter_data->flags & BT_TAG_ITER_STARTED) || + blk_mq_request_started(rq)) + ret = iter_data->fn(rq, iter_data->data, reserved); + if (!iter_static_rqs) + blk_mq_put_rq_ref(rq); + return ret; } /** @@ -348,6 +367,9 @@ void blk_mq_all_tag_iter(struct blk_mq_tags *tags, busy_tag_iter_fn *fn, * indicates whether or not @rq is a reserved request. Return * true to continue iterating tags, false to stop. * @priv: Will be passed as second argument to @fn. + * + * We grab one request reference before calling @fn and release it after + * @fn returns. */ void blk_mq_tagset_busy_iter(struct blk_mq_tag_set *tagset, busy_tag_iter_fn *fn, void *priv) |