diff options
author | Christoph Hellwig <hch@lst.de> | 2018-11-14 19:02:08 +0300 |
---|---|---|
committer | Jens Axboe <axboe@kernel.dk> | 2018-11-15 22:13:21 +0300 |
commit | 373e4af34ec13c17a6b80227c7d5d3719122eb77 (patch) | |
tree | 89c93f218d6949e6f802a1703bd0ac0f2c9c2790 /block | |
parent | 57d74df90783f6a6b3e79dfdd2a567ce5db3b790 (diff) | |
download | linux-373e4af34ec13c17a6b80227c7d5d3719122eb77.tar.xz |
block: remove queue_lockdep_assert_held
The only remaining user unconditionally drops and reacquires the lock,
which means we really don't need any additional (conditional) annotation.
Reviewed-by: Hannes Reinecke <hare@suse.com>
Signed-off-by: Christoph Hellwig <hch@lst.de>
Signed-off-by: Jens Axboe <axboe@kernel.dk>
Diffstat (limited to 'block')
-rw-r--r-- | block/blk-throttle.c | 1 | ||||
-rw-r--r-- | block/blk.h | 13 |
2 files changed, 0 insertions, 14 deletions
diff --git a/block/blk-throttle.c b/block/blk-throttle.c index 8e6f3c9821c2..a665b0950369 100644 --- a/block/blk-throttle.c +++ b/block/blk-throttle.c @@ -2353,7 +2353,6 @@ void blk_throtl_drain(struct request_queue *q) struct bio *bio; int rw; - queue_lockdep_assert_held(q); rcu_read_lock(); /* diff --git a/block/blk.h b/block/blk.h index f2ddc71e93da..027a0ccc175e 100644 --- a/block/blk.h +++ b/block/blk.h @@ -35,19 +35,6 @@ extern struct kmem_cache *blk_requestq_cachep; extern struct kobj_type blk_queue_ktype; extern struct ida blk_queue_ida; -/* - * @q->queue_lock is set while a queue is being initialized. Since we know - * that no other threads access the queue object before @q->queue_lock has - * been set, it is safe to manipulate queue flags without holding the - * queue_lock if @q->queue_lock == NULL. See also blk_alloc_queue_node() and - * blk_init_allocated_queue(). - */ -static inline void queue_lockdep_assert_held(struct request_queue *q) -{ - if (q->queue_lock) - lockdep_assert_held(q->queue_lock); -} - static inline struct blk_flush_queue * blk_get_flush_queue(struct request_queue *q, struct blk_mq_ctx *ctx) { |