diff options
author | Shaohua Li <shli@fusionio.com> | 2012-11-09 11:44:27 +0400 |
---|---|---|
committer | Jens Axboe <axboe@kernel.dk> | 2012-11-09 11:44:27 +0400 |
commit | bee0393cc12b6d8f10e884e555a095e050e0b2b9 (patch) | |
tree | eabe7526c63a6a965cf0aa5574c222ff2f431e13 | |
parent | 3d106fba2e7eb6967b1e2cc147a6894ec4307cef (diff) | |
download | linux-bee0393cc12b6d8f10e884e555a095e050e0b2b9.tar.xz |
block: recursive merge requests
In a workload, thread 1 accesses a, a+2, ..., thread 2 accesses a+1, a+3,....
When the requests are flushed to queue, a and a+1 are merged to (a, a+1), a+2
and a+3 too to (a+2, a+3), but (a, a+1) and (a+2, a+3) aren't merged.
If we do recursive merge for such interleave access, some workloads throughput
get improvement. A recent worload I'm checking on is swap, below change
boostes the throughput around 5% ~ 10%.
Signed-off-by: Shaohua Li <shli@fusionio.com>
Signed-off-by: Jens Axboe <axboe@kernel.dk>
-rw-r--r-- | block/elevator.c | 16 |
1 files changed, 12 insertions, 4 deletions
diff --git a/block/elevator.c b/block/elevator.c index 9b1d42b62f20..9edba1b8323e 100644 --- a/block/elevator.c +++ b/block/elevator.c @@ -458,6 +458,7 @@ static bool elv_attempt_insert_merge(struct request_queue *q, struct request *rq) { struct request *__rq; + bool ret; if (blk_queue_nomerges(q)) return false; @@ -471,14 +472,21 @@ static bool elv_attempt_insert_merge(struct request_queue *q, if (blk_queue_noxmerges(q)) return false; + ret = false; /* * See if our hash lookup can find a potential backmerge. */ - __rq = elv_rqhash_find(q, blk_rq_pos(rq)); - if (__rq && blk_attempt_req_merge(q, __rq, rq)) - return true; + while (1) { + __rq = elv_rqhash_find(q, blk_rq_pos(rq)); + if (!__rq || !blk_attempt_req_merge(q, __rq, rq)) + break; - return false; + /* The merged request could be merged with others, try again */ + ret = true; + rq = __rq; + } + + return ret; } void elv_merged_request(struct request_queue *q, struct request *rq, int type) |