diff options
author | Ming Lei <ming.lei@redhat.com> | 2019-02-27 15:40:11 +0300 |
---|---|---|
committer | Jens Axboe <axboe@kernel.dk> | 2019-02-27 16:18:54 +0300 |
commit | 48d7727cae1209235700ed90f8f11426027b333b (patch) | |
tree | b4232539d43b720be2c0ff40017c14457000e0e2 /block/blk-merge.c | |
parent | 4d633062c1c0794a6b3836b7b55afba4599736e8 (diff) | |
download | linux-48d7727cae1209235700ed90f8f11426027b333b.tar.xz |
block: optimize __blk_segment_map_sg() for single-page bvec
Introduce a fast path for single-page bvec IO, then blk_bvec_map_sg()
can be avoided.
Signed-off-by: Ming Lei <ming.lei@redhat.com>
Signed-off-by: Jens Axboe <axboe@kernel.dk>
Diffstat (limited to 'block/blk-merge.c')
-rw-r--r-- | block/blk-merge.c | 9 |
1 files changed, 7 insertions, 2 deletions
diff --git a/block/blk-merge.c b/block/blk-merge.c index c7e8a8273460..c1ad8abbd9d6 100644 --- a/block/blk-merge.c +++ b/block/blk-merge.c @@ -447,7 +447,7 @@ static int blk_phys_contig_segment(struct request_queue *q, struct bio *bio, return biovec_phys_mergeable(q, &end_bv, &nxt_bv); } -static struct scatterlist *blk_next_sg(struct scatterlist **sg, +static inline struct scatterlist *blk_next_sg(struct scatterlist **sg, struct scatterlist *sglist) { if (!*sg) @@ -512,7 +512,12 @@ __blk_segment_map_sg(struct request_queue *q, struct bio_vec *bvec, (*sg)->length += nbytes; } else { new_segment: - (*nsegs) += blk_bvec_map_sg(q, bvec, sglist, sg); + if (bvec->bv_offset + bvec->bv_len <= PAGE_SIZE) { + *sg = blk_next_sg(sg, sglist); + sg_set_page(*sg, bvec->bv_page, nbytes, bvec->bv_offset); + (*nsegs) += 1; + } else + (*nsegs) += blk_bvec_map_sg(q, bvec, sglist, sg); } *bvprv = *bvec; } |