diff options
author | Ming Lei <tom.leiming@gmail.com> | 2013-12-26 17:31:35 +0400 |
---|---|---|
committer | Jens Axboe <axboe@kernel.dk> | 2013-12-31 20:53:05 +0400 |
commit | 43a5e4e21964a6efb4d14a34644ec7109d0ae891 (patch) | |
tree | 9ff635ec990583c0877d4056841d35e6018825a1 /block/blk-mq.c | |
parent | b28bc9b38c52f63f43e3fd875af982f2240a2859 (diff) | |
download | linux-43a5e4e21964a6efb4d14a34644ec7109d0ae891.tar.xz |
block: blk-mq: support draining mq queue
blk_mq_drain_queue() is introduced so that we can drain
mq queue inside blk_cleanup_queue().
Also don't accept new requests any more if queue is marked
as dying.
Cc: Jens Axboe <axboe@kernel.dk>
Signed-off-by: Ming Lei <tom.leiming@gmail.com>
Signed-off-by: Jens Axboe <axboe@kernel.dk>
Diffstat (limited to 'block/blk-mq.c')
-rw-r--r-- | block/blk-mq.c | 43 |
1 files changed, 27 insertions, 16 deletions
diff --git a/block/blk-mq.c b/block/blk-mq.c index 3929f43d0b03..e2f811cba417 100644 --- a/block/blk-mq.c +++ b/block/blk-mq.c @@ -106,10 +106,13 @@ static int blk_mq_queue_enter(struct request_queue *q) spin_lock_irq(q->queue_lock); ret = wait_event_interruptible_lock_irq(q->mq_freeze_wq, - !blk_queue_bypass(q), *q->queue_lock); + !blk_queue_bypass(q) || blk_queue_dying(q), + *q->queue_lock); /* inc usage with lock hold to avoid freeze_queue runs here */ - if (!ret) + if (!ret && !blk_queue_dying(q)) __percpu_counter_add(&q->mq_usage_counter, 1, 1000000); + else if (blk_queue_dying(q)) + ret = -ENODEV; spin_unlock_irq(q->queue_lock); return ret; @@ -120,6 +123,22 @@ static void blk_mq_queue_exit(struct request_queue *q) __percpu_counter_add(&q->mq_usage_counter, -1, 1000000); } +static void __blk_mq_drain_queue(struct request_queue *q) +{ + while (true) { + s64 count; + + spin_lock_irq(q->queue_lock); + count = percpu_counter_sum(&q->mq_usage_counter); + spin_unlock_irq(q->queue_lock); + + if (count == 0) + break; + blk_mq_run_queues(q, false); + msleep(10); + } +} + /* * Guarantee no request is in use, so we can change any data structure of * the queue afterward. @@ -133,21 +152,13 @@ static void blk_mq_freeze_queue(struct request_queue *q) queue_flag_set(QUEUE_FLAG_BYPASS, q); spin_unlock_irq(q->queue_lock); - if (!drain) - return; - - while (true) { - s64 count; - - spin_lock_irq(q->queue_lock); - count = percpu_counter_sum(&q->mq_usage_counter); - spin_unlock_irq(q->queue_lock); + if (drain) + __blk_mq_drain_queue(q); +} - if (count == 0) - break; - blk_mq_run_queues(q, false); - msleep(10); - } +void blk_mq_drain_queue(struct request_queue *q) +{ + __blk_mq_drain_queue(q); } static void blk_mq_unfreeze_queue(struct request_queue *q) |