summaryrefslogtreecommitdiff
path: root/include/linux/blk-mq.h
diff options
context:
space:
mode:
authorChristoph Hellwig <hch@lst.de>2013-10-28 23:33:58 +0400
committerJens Axboe <axboe@kernel.dk>2013-10-28 23:33:58 +0400
commit3228f48be2d19b2dd90db96ec16a40187a2946f3 (patch)
tree2f148dd8e82b287fc7b682cabc5614e3046d8949 /include/linux/blk-mq.h
parent280d45f6c35d8d7a0fe20c36caf426e3ac139cf9 (diff)
downloadlinux-3228f48be2d19b2dd90db96ec16a40187a2946f3.tar.xz
blk-mq: fix for flush deadlock
The flush state machine takes in a struct request, which then is submitted multiple times to the underling driver. The old block code requeses the same request for each of those, so it does not have an issue with tapping into the request pool. The new one on the other hand allocates a new request for each of the actualy steps of the flush sequence. If have already allocated all of the tags for IO, we will fail allocating the flush request. Set aside a reserved request just for flushes. Signed-off-by: Jens Axboe <axboe@kernel.dk>
Diffstat (limited to 'include/linux/blk-mq.h')
-rw-r--r--include/linux/blk-mq.h2
1 files changed, 1 insertions, 1 deletions
diff --git a/include/linux/blk-mq.h b/include/linux/blk-mq.h
index 3368b97bee73..ab0e9b2025b3 100644
--- a/include/linux/blk-mq.h
+++ b/include/linux/blk-mq.h
@@ -124,7 +124,7 @@ void blk_mq_insert_request(struct request_queue *, struct request *, bool);
void blk_mq_run_queues(struct request_queue *q, bool async);
void blk_mq_free_request(struct request *rq);
bool blk_mq_can_queue(struct blk_mq_hw_ctx *);
-struct request *blk_mq_alloc_request(struct request_queue *q, int rw, gfp_t gfp);
+struct request *blk_mq_alloc_request(struct request_queue *q, int rw, gfp_t gfp, bool reserved);
struct request *blk_mq_alloc_reserved_request(struct request_queue *q, int rw, gfp_t gfp);
struct request *blk_mq_rq_from_tag(struct request_queue *q, unsigned int tag);