diff options
author | Jens Axboe <jens.axboe@oracle.com> | 2009-07-30 10:18:24 +0400 |
---|---|---|
committer | Jens Axboe <jens.axboe@oracle.com> | 2009-09-11 16:33:31 +0400 |
commit | fb1e75389bd06fd5987e9cda1b4e0305c782f854 (patch) | |
tree | 6658e13f80d4f6450f5a69c82d3bf1b590ecf234 /block | |
parent | 1f98a13f623e0ef666690a18c1250335fc6d7ef1 (diff) | |
download | linux-fb1e75389bd06fd5987e9cda1b4e0305c782f854.tar.xz |
block: improve queue_should_plug() by looking at IO depths
Instead of just checking whether this device uses block layer
tagging, we can improve the detection by looking at the maximum
queue depth it has reached. If that crosses 4, then deem it a
queuing device.
This is important on high IOPS devices, since plugging hurts
the performance there (it can be as much as 10-15% of the sys
time).
Signed-off-by: Jens Axboe <jens.axboe@oracle.com>
Diffstat (limited to 'block')
-rw-r--r-- | block/blk-core.c | 11 |
1 files changed, 9 insertions, 2 deletions
diff --git a/block/blk-core.c b/block/blk-core.c index 52559715cb90..93051d151635 100644 --- a/block/blk-core.c +++ b/block/blk-core.c @@ -1146,7 +1146,7 @@ void init_request_from_bio(struct request *req, struct bio *bio) */ static inline bool queue_should_plug(struct request_queue *q) { - return !(blk_queue_nonrot(q) && blk_queue_tagged(q)); + return !(blk_queue_nonrot(q) && blk_queue_queuing(q)); } static int __make_request(struct request_queue *q, struct bio *bio) @@ -1857,8 +1857,15 @@ void blk_dequeue_request(struct request *rq) * and to it is freed is accounted as io that is in progress at * the driver side. */ - if (blk_account_rq(rq)) + if (blk_account_rq(rq)) { q->in_flight[rq_is_sync(rq)]++; + /* + * Mark this device as supporting hardware queuing, if + * we have more IOs in flight than 4. + */ + if (!blk_queue_queuing(q) && queue_in_flight(q) > 4) + set_bit(QUEUE_FLAG_CQ, &q->queue_flags); + } } /** |