diff options
author | Christoph Hellwig <hch@lst.de> | 2014-05-28 18:08:02 +0400 |
---|---|---|
committer | Jens Axboe <axboe@fb.com> | 2014-05-28 18:08:02 +0400 |
commit | 6fca6a611c27f1f0d90fbe1cc3c229dbf8c09e48 (patch) | |
tree | d3348f3ab1169db9b5a1fca67a8fd2164152530c /include | |
parent | 7738dac4f697ffbd0ed4c4aeb69a714ef9d876da (diff) | |
download | linux-6fca6a611c27f1f0d90fbe1cc3c229dbf8c09e48.tar.xz |
blk-mq: add helper to insert requests from irq context
Both the cache flush state machine and the SCSI midlayer want to submit
requests from irq context, and the current per-request requeue_work
unfortunately causes corruption due to sharing with the csd field for
flushes. Replace them with a per-request_queue list of requests to
be requeued.
Based on an earlier test by Ming Lei.
Signed-off-by: Christoph Hellwig <hch@lst.de>
Reported-by: Ming Lei <tom.leiming@gmail.com>
Tested-by: Ming Lei <tom.leiming@gmail.com>
Signed-off-by: Jens Axboe <axboe@fb.com>
Diffstat (limited to 'include')
-rw-r--r-- | include/linux/blk-mq.h | 2 | ||||
-rw-r--r-- | include/linux/blkdev.h | 5 |
2 files changed, 6 insertions, 1 deletions
diff --git a/include/linux/blk-mq.h b/include/linux/blk-mq.h index 5b171fbe95c5..b9a74a386dbc 100644 --- a/include/linux/blk-mq.h +++ b/include/linux/blk-mq.h @@ -172,6 +172,8 @@ void blk_mq_end_io(struct request *rq, int error); void __blk_mq_end_io(struct request *rq, int error); void blk_mq_requeue_request(struct request *rq); +void blk_mq_add_to_requeue_list(struct request *rq, bool at_head); +void blk_mq_kick_requeue_list(struct request_queue *q); void blk_mq_complete_request(struct request *rq); void blk_mq_stop_hw_queue(struct blk_mq_hw_ctx *hctx); diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h index 6bc011a09e82..913f1c2d3be0 100644 --- a/include/linux/blkdev.h +++ b/include/linux/blkdev.h @@ -99,7 +99,6 @@ struct request { struct list_head queuelist; union { struct call_single_data csd; - struct work_struct requeue_work; unsigned long fifo_time; }; @@ -463,6 +462,10 @@ struct request_queue { struct request *flush_rq; spinlock_t mq_flush_lock; + struct list_head requeue_list; + spinlock_t requeue_lock; + struct work_struct requeue_work; + struct mutex sysfs_lock; int bypass_depth; |