diff options
author | Christoph Hellwig <hch@lst.de> | 2018-11-14 19:02:04 +0300 |
---|---|---|
committer | Jens Axboe <axboe@kernel.dk> | 2018-11-15 22:13:15 +0300 |
commit | 8f4236d9008b0973a8281256ccfde6913cdec6cb (patch) | |
tree | df22388e2396b014899b7db04d0e08c303adb1da /block/blk-core.c | |
parent | e96c0d8336fdc5f1a6cae798bfa9c6e730001ad4 (diff) | |
download | linux-8f4236d9008b0973a8281256ccfde6913cdec6cb.tar.xz |
block: remove QUEUE_FLAG_BYPASS and ->bypass
Unused since the removal of the legacy request code.
Reviewed-by: Hannes Reinecke <hare@suse.com>
Signed-off-by: Christoph Hellwig <hch@lst.de>
Signed-off-by: Jens Axboe <axboe@kernel.dk>
Diffstat (limited to 'block/blk-core.c')
-rw-r--r-- | block/blk-core.c | 21 |
1 files changed, 0 insertions, 21 deletions
diff --git a/block/blk-core.c b/block/blk-core.c index fdc0ad2686c4..1c9b6975cf0a 100644 --- a/block/blk-core.c +++ b/block/blk-core.c @@ -370,18 +370,6 @@ void blk_cleanup_queue(struct request_queue *q) blk_set_queue_dying(q); spin_lock_irq(lock); - /* - * A dying queue is permanently in bypass mode till released. Note - * that, unlike blk_queue_bypass_start(), we aren't performing - * synchronize_rcu() after entering bypass mode to avoid the delay - * as some drivers create and destroy a lot of queues while - * probing. This is still safe because blk_release_queue() will be - * called only after the queue refcnt drops to zero and nothing, - * RCU or not, would be traversing the queue by then. - */ - q->bypass_depth++; - queue_flag_set(QUEUE_FLAG_BYPASS, q); - queue_flag_set(QUEUE_FLAG_NOMERGES, q); queue_flag_set(QUEUE_FLAG_NOXMERGES, q); queue_flag_set(QUEUE_FLAG_DYING, q); @@ -589,15 +577,6 @@ struct request_queue *blk_alloc_queue_node(gfp_t gfp_mask, int node_id, q->queue_lock = lock ? : &q->__queue_lock; - /* - * A queue starts its life with bypass turned on to avoid - * unnecessary bypass on/off overhead and nasty surprises during - * init. The initial bypass will be finished when the queue is - * registered by blk_register_queue(). - */ - q->bypass_depth = 1; - queue_flag_set_unlocked(QUEUE_FLAG_BYPASS, q); - init_waitqueue_head(&q->mq_freeze_wq); /* |