summaryrefslogtreecommitdiff
path: root/include/linux
diff options
context:
space:
mode:
authorMing Lei <ming.lei@redhat.com>2018-07-03 18:03:16 +0300
committerJens Axboe <axboe@kernel.dk>2018-07-09 18:07:53 +0300
commit6e768717304bdbe8d2897ca8298f6b58863fdc41 (patch)
tree64f259002c9bcca0bd1f187cdd7d067e68a29d45 /include/linux
parentd893ff86034f7107f89d8b740c2b5902a21a49db (diff)
downloadlinux-6e768717304bdbe8d2897ca8298f6b58863fdc41.tar.xz
blk-mq: dequeue request one by one from sw queue if hctx is busy
It won't be efficient to dequeue request one by one from sw queue, but we have to do that when queue is busy for better merge performance. This patch takes the Exponential Weighted Moving Average(EWMA) to figure out if queue is busy, then only dequeue request one by one from sw queue when queue is busy. Fixes: b347689ffbca ("blk-mq-sched: improve dispatching from sw queue") Cc: Kashyap Desai <kashyap.desai@broadcom.com> Cc: Laurence Oberman <loberman@redhat.com> Cc: Omar Sandoval <osandov@fb.com> Cc: Christoph Hellwig <hch@lst.de> Cc: Bart Van Assche <bart.vanassche@wdc.com> Cc: Hannes Reinecke <hare@suse.de> Reported-by: Kashyap Desai <kashyap.desai@broadcom.com> Tested-by: Kashyap Desai <kashyap.desai@broadcom.com> Signed-off-by: Ming Lei <ming.lei@redhat.com> Signed-off-by: Jens Axboe <axboe@kernel.dk>
Diffstat (limited to 'include/linux')
-rw-r--r--include/linux/blk-mq.h3
1 files changed, 2 insertions, 1 deletions
diff --git a/include/linux/blk-mq.h b/include/linux/blk-mq.h
index ea690254dab7..d710e92874cc 100644
--- a/include/linux/blk-mq.h
+++ b/include/linux/blk-mq.h
@@ -35,9 +35,10 @@ struct blk_mq_hw_ctx {
struct sbitmap ctx_map;
struct blk_mq_ctx *dispatch_from;
+ unsigned int dispatch_busy;
- struct blk_mq_ctx **ctxs;
unsigned int nr_ctx;
+ struct blk_mq_ctx **ctxs;
spinlock_t dispatch_wait_lock;
wait_queue_entry_t dispatch_wait;