diff options
| author | Ming Lei <ming.lei@redhat.com> | 2026-01-16 17:18:43 +0300 |
|---|---|---|
| committer | Jens Axboe <axboe@kernel.dk> | 2026-01-23 06:05:40 +0300 |
| commit | 29d0a927f9efd5f87ac8a2d291d2782384d1bee2 (patch) | |
| tree | 284ab6883b86b34b52513d7eb21f1fd4b901b207 | |
| parent | 3ac4796b888a2574cb982c89076ed717f122289d (diff) | |
| download | linux-29d0a927f9efd5f87ac8a2d291d2782384d1bee2.tar.xz | |
ublk: abort requests filled in event kfifo
In case of BATCH_IO, any request filled in event kfifo, they don't get
chance to be dispatched any more when releasing ublk char device, so
we have to abort them too.
Add ublk_abort_batch_queue() for aborting this kind of requests.
Reviewed-by: Caleb Sander Mateos <csander@purestorage.com>
Signed-off-by: Ming Lei <ming.lei@redhat.com>
Signed-off-by: Jens Axboe <axboe@kernel.dk>
| -rw-r--r-- | drivers/block/ublk_drv.c | 24 |
1 files changed, 23 insertions, 1 deletions
diff --git a/drivers/block/ublk_drv.c b/drivers/block/ublk_drv.c index 90a6d6a12303..564cf44c238f 100644 --- a/drivers/block/ublk_drv.c +++ b/drivers/block/ublk_drv.c @@ -2470,7 +2470,8 @@ static int ublk_ch_mmap(struct file *filp, struct vm_area_struct *vma) static void __ublk_fail_req(struct ublk_device *ub, struct ublk_io *io, struct request *req) { - WARN_ON_ONCE(io->flags & UBLK_IO_FLAG_ACTIVE); + WARN_ON_ONCE(!ublk_dev_support_batch_io(ub) && + io->flags & UBLK_IO_FLAG_ACTIVE); if (ublk_nosrv_should_reissue_outstanding(ub)) blk_mq_requeue_request(req, false); @@ -2481,6 +2482,24 @@ static void __ublk_fail_req(struct ublk_device *ub, struct ublk_io *io, } /* + * Request tag may just be filled to event kfifo, not get chance to + * dispatch, abort these requests too + */ +static void ublk_abort_batch_queue(struct ublk_device *ub, + struct ublk_queue *ubq) +{ + unsigned short tag; + + while (kfifo_out(&ubq->evts_fifo, &tag, 1)) { + struct request *req = blk_mq_tag_to_rq( + ub->tag_set.tags[ubq->q_id], tag); + + if (!WARN_ON_ONCE(!req || !blk_mq_request_started(req))) + __ublk_fail_req(ub, &ubq->ios[tag], req); + } +} + +/* * Called from ublk char device release handler, when any uring_cmd is * done, meantime request queue is "quiesced" since all inflight requests * can't be completed because ublk server is dead. @@ -2498,6 +2517,9 @@ static void ublk_abort_queue(struct ublk_device *ub, struct ublk_queue *ubq) if (io->flags & UBLK_IO_FLAG_OWNED_BY_SRV) __ublk_fail_req(ub, io, io->req); } + + if (ublk_support_batch_io(ubq)) + ublk_abort_batch_queue(ub, ubq); } static void ublk_start_cancel(struct ublk_device *ub) |
