diff options
author | Ming Lei <ming.lei@redhat.com> | 2018-01-10 05:51:29 +0300 |
---|---|---|
committer | Jens Axboe <axboe@kernel.dk> | 2018-01-10 06:23:19 +0300 |
commit | b4b6cb613519b7449da510bccf08986371b328cb (patch) | |
tree | 5d20eb6cae58d2e0b0a1dc84935cad88244827b5 /block | |
parent | 5448aca41cd58e1a20574b6f29a8478bbb123dc3 (diff) | |
download | linux-b4b6cb613519b7449da510bccf08986371b328cb.tar.xz |
Revert "block: blk-merge: try to make front segments in full size"
This reverts commit a2d37968d784363842f87820a21e106741d28004.
If max segment size isn't 512-aligned, this patch won't work well.
Also once multipage bvec is enabled, adjacent bvecs won't be physically
contiguous if page is added via bio_add_page(), so we don't need this
kind of complicated logic.
Reported-by: Dmitry Osipenko <digetx@gmail.com>
Signed-off-by: Ming Lei <ming.lei@redhat.com>
Signed-off-by: Jens Axboe <axboe@kernel.dk>
Diffstat (limited to 'block')
-rw-r--r-- | block/blk-merge.c | 54 |
1 files changed, 5 insertions, 49 deletions
diff --git a/block/blk-merge.c b/block/blk-merge.c index 446f63e076aa..8452fc7164cc 100644 --- a/block/blk-merge.c +++ b/block/blk-merge.c @@ -109,7 +109,6 @@ static struct bio *blk_bio_segment_split(struct request_queue *q, bool do_split = true; struct bio *new = NULL; const unsigned max_sectors = get_max_io_size(q, bio); - unsigned advance = 0; bio_for_each_segment(bv, bio, iter) { /* @@ -133,32 +132,12 @@ static struct bio *blk_bio_segment_split(struct request_queue *q, } if (bvprvp && blk_queue_cluster(q)) { + if (seg_size + bv.bv_len > queue_max_segment_size(q)) + goto new_segment; if (!BIOVEC_PHYS_MERGEABLE(bvprvp, &bv)) goto new_segment; if (!BIOVEC_SEG_BOUNDARY(q, bvprvp, &bv)) goto new_segment; - if (seg_size + bv.bv_len > queue_max_segment_size(q)) { - /* - * One assumption is that initial value of - * @seg_size(equals to bv.bv_len) won't be - * bigger than max segment size, but this - * becomes false after multipage bvecs. - */ - advance = queue_max_segment_size(q) - seg_size; - - if (advance > 0) { - seg_size += advance; - sectors += advance >> 9; - bv.bv_len -= advance; - bv.bv_offset += advance; - } - - /* - * Still need to put remainder of current - * bvec into a new segment. - */ - goto new_segment; - } seg_size += bv.bv_len; bvprv = bv; @@ -180,12 +159,6 @@ new_segment: seg_size = bv.bv_len; sectors += bv.bv_len >> 9; - /* restore the bvec for iterator */ - if (advance) { - bv.bv_len += advance; - bv.bv_offset -= advance; - advance = 0; - } } do_split = false; @@ -386,29 +359,16 @@ __blk_segment_map_sg(struct request_queue *q, struct bio_vec *bvec, { int nbytes = bvec->bv_len; - unsigned advance = 0; if (*sg && *cluster) { + if ((*sg)->length + nbytes > queue_max_segment_size(q)) + goto new_segment; + if (!BIOVEC_PHYS_MERGEABLE(bvprv, bvec)) goto new_segment; if (!BIOVEC_SEG_BOUNDARY(q, bvprv, bvec)) goto new_segment; - /* - * try best to merge part of the bvec into previous - * segment and follow same policy with - * blk_bio_segment_split() - */ - if ((*sg)->length + nbytes > queue_max_segment_size(q)) { - advance = queue_max_segment_size(q) - (*sg)->length; - if (advance) { - (*sg)->length += advance; - bvec->bv_offset += advance; - bvec->bv_len -= advance; - } - goto new_segment; - } - (*sg)->length += nbytes; } else { new_segment: @@ -431,10 +391,6 @@ new_segment: sg_set_page(*sg, bvec->bv_page, nbytes, bvec->bv_offset); (*nsegs)++; - - /* for making iterator happy */ - bvec->bv_offset -= advance; - bvec->bv_len += advance; } *bvprv = *bvec; } |