diff options
author | Jens Axboe <axboe@kernel.dk> | 2018-01-09 22:47:24 +0300 |
---|---|---|
committer | Jens Axboe <axboe@kernel.dk> | 2018-01-10 00:59:19 +0300 |
commit | 5448aca41cd58e1a20574b6f29a8478bbb123dc3 (patch) | |
tree | f28276d8138b9298c6782f5a8fad937a846e661e /drivers/block | |
parent | 8abef10b3de1144cfe968f454946f13eb1ac3d0a (diff) | |
download | linux-5448aca41cd58e1a20574b6f29a8478bbb123dc3.tar.xz |
null_blk: wire up timeouts
This is needed to ensure that we actually handle timeouts.
Without it, the queue_mode=1 path will never call blk_add_timer(),
and the queue_mode=2 path will continually just return
EH_RESET_TIMER and we never actually complete the offending request.
This was used to test the new timeout code, and the changes around
killing off REQ_ATOM_COMPLETE.
Signed-off-by: Jens Axboe <axboe@kernel.dk>
Diffstat (limited to 'drivers/block')
-rw-r--r-- | drivers/block/null_blk.c | 16 |
1 files changed, 16 insertions, 0 deletions
diff --git a/drivers/block/null_blk.c b/drivers/block/null_blk.c index 1e1981c6c557..78267e3e4fa5 100644 --- a/drivers/block/null_blk.c +++ b/drivers/block/null_blk.c @@ -1341,6 +1341,12 @@ static blk_qc_t null_queue_bio(struct request_queue *q, struct bio *bio) return BLK_QC_T_NONE; } +static enum blk_eh_timer_return null_rq_timed_out_fn(struct request *rq) +{ + pr_info("null: rq %p timed out\n", rq); + return BLK_EH_HANDLED; +} + static int null_rq_prep_fn(struct request_queue *q, struct request *req) { struct nullb *nullb = q->queuedata; @@ -1371,6 +1377,12 @@ static void null_request_fn(struct request_queue *q) } } +static enum blk_eh_timer_return null_timeout_rq(struct request *rq, bool res) +{ + pr_info("null: rq %p timed out\n", rq); + return BLK_EH_HANDLED; +} + static blk_status_t null_queue_rq(struct blk_mq_hw_ctx *hctx, const struct blk_mq_queue_data *bd) { @@ -1394,6 +1406,7 @@ static blk_status_t null_queue_rq(struct blk_mq_hw_ctx *hctx, static const struct blk_mq_ops null_mq_ops = { .queue_rq = null_queue_rq, .complete = null_softirq_done_fn, + .timeout = null_timeout_rq, }; static void cleanup_queue(struct nullb_queue *nq) @@ -1654,6 +1667,7 @@ static int null_add_dev(struct nullb_device *dev) if (rv) goto out_cleanup_queues; + nullb->tag_set->timeout = 5 * HZ; nullb->q = blk_mq_init_queue(nullb->tag_set); if (IS_ERR(nullb->q)) { rv = -ENOMEM; @@ -1679,6 +1693,8 @@ static int null_add_dev(struct nullb_device *dev) } blk_queue_prep_rq(nullb->q, null_rq_prep_fn); blk_queue_softirq_done(nullb->q, null_softirq_done_fn); + blk_queue_rq_timed_out(nullb->q, null_rq_timed_out_fn); + nullb->q->rq_timeout = 5 * HZ; rv = init_driver_queues(nullb); if (rv) goto out_cleanup_blk_queue; |