diff options
author | Christoph Hellwig <hch@lst.de> | 2019-06-06 13:29:01 +0300 |
---|---|---|
committer | Jens Axboe <axboe@kernel.dk> | 2019-06-20 19:29:22 +0300 |
commit | 14ccb66b3f585b2bc21e7256c96090abed5a512c (patch) | |
tree | 7b0d48d59ee474ac1a590352507c7890c16f1e8d /block/blk-merge.c | |
parent | f924cddebc900f7cb10d5538d69523e558fa681c (diff) | |
download | linux-14ccb66b3f585b2bc21e7256c96090abed5a512c.tar.xz |
block: remove the bi_phys_segments field in struct bio
We only need the number of segments in the blk-mq submission path.
Remove the field from struct bio, and return it from a variant of
blk_queue_split instead of that it can passed as an argument to
those functions that need the value.
This also means we stop recounting segments except for cloning
and partial segments.
To keep the number of arguments in this how path down remove
pointless struct request_queue arguments from any of the functions
that had it and grew a nr_segs argument.
Signed-off-by: Christoph Hellwig <hch@lst.de>
Signed-off-by: Jens Axboe <axboe@kernel.dk>
Diffstat (limited to 'block/blk-merge.c')
-rw-r--r-- | block/blk-merge.c | 75 |
1 files changed, 27 insertions, 48 deletions
diff --git a/block/blk-merge.c b/block/blk-merge.c index 17713d7d98d5..72b4fd89a22d 100644 --- a/block/blk-merge.c +++ b/block/blk-merge.c @@ -258,32 +258,29 @@ split: return do_split ? new : NULL; } -void blk_queue_split(struct request_queue *q, struct bio **bio) +void __blk_queue_split(struct request_queue *q, struct bio **bio, + unsigned int *nr_segs) { - struct bio *split, *res; - unsigned nsegs; + struct bio *split; switch (bio_op(*bio)) { case REQ_OP_DISCARD: case REQ_OP_SECURE_ERASE: - split = blk_bio_discard_split(q, *bio, &q->bio_split, &nsegs); + split = blk_bio_discard_split(q, *bio, &q->bio_split, nr_segs); break; case REQ_OP_WRITE_ZEROES: - split = blk_bio_write_zeroes_split(q, *bio, &q->bio_split, &nsegs); + split = blk_bio_write_zeroes_split(q, *bio, &q->bio_split, + nr_segs); break; case REQ_OP_WRITE_SAME: - split = blk_bio_write_same_split(q, *bio, &q->bio_split, &nsegs); + split = blk_bio_write_same_split(q, *bio, &q->bio_split, + nr_segs); break; default: - split = blk_bio_segment_split(q, *bio, &q->bio_split, &nsegs); + split = blk_bio_segment_split(q, *bio, &q->bio_split, nr_segs); break; } - /* physical segments can be figured out during splitting */ - res = split ? split : *bio; - res->bi_phys_segments = nsegs; - bio_set_flag(res, BIO_SEG_VALID); - if (split) { /* there isn't chance to merge the splitted bio */ split->bi_opf |= REQ_NOMERGE; @@ -304,6 +301,13 @@ void blk_queue_split(struct request_queue *q, struct bio **bio) *bio = split; } } + +void blk_queue_split(struct request_queue *q, struct bio **bio) +{ + unsigned int nr_segs; + + __blk_queue_split(q, bio, &nr_segs); +} EXPORT_SYMBOL(blk_queue_split); static unsigned int __blk_recalc_rq_segments(struct request_queue *q, @@ -338,17 +342,6 @@ void blk_recalc_rq_segments(struct request *rq) rq->nr_phys_segments = __blk_recalc_rq_segments(rq->q, rq->bio); } -void blk_recount_segments(struct request_queue *q, struct bio *bio) -{ - struct bio *nxt = bio->bi_next; - - bio->bi_next = NULL; - bio->bi_phys_segments = __blk_recalc_rq_segments(q, bio); - bio->bi_next = nxt; - - bio_set_flag(bio, BIO_SEG_VALID); -} - static inline struct scatterlist *blk_next_sg(struct scatterlist **sg, struct scatterlist *sglist) { @@ -519,16 +512,13 @@ int blk_rq_map_sg(struct request_queue *q, struct request *rq, } EXPORT_SYMBOL(blk_rq_map_sg); -static inline int ll_new_hw_segment(struct request_queue *q, - struct request *req, - struct bio *bio) +static inline int ll_new_hw_segment(struct request *req, struct bio *bio, + unsigned int nr_phys_segs) { - int nr_phys_segs = bio_phys_segments(q, bio); - - if (req->nr_phys_segments + nr_phys_segs > queue_max_segments(q)) + if (req->nr_phys_segments + nr_phys_segs > queue_max_segments(req->q)) goto no_merge; - if (blk_integrity_merge_bio(q, req, bio) == false) + if (blk_integrity_merge_bio(req->q, req, bio) == false) goto no_merge; /* @@ -539,12 +529,11 @@ static inline int ll_new_hw_segment(struct request_queue *q, return 1; no_merge: - req_set_nomerge(q, req); + req_set_nomerge(req->q, req); return 0; } -int ll_back_merge_fn(struct request_queue *q, struct request *req, - struct bio *bio) +int ll_back_merge_fn(struct request *req, struct bio *bio, unsigned int nr_segs) { if (req_gap_back_merge(req, bio)) return 0; @@ -553,21 +542,15 @@ int ll_back_merge_fn(struct request_queue *q, struct request *req, return 0; if (blk_rq_sectors(req) + bio_sectors(bio) > blk_rq_get_max_sectors(req, blk_rq_pos(req))) { - req_set_nomerge(q, req); + req_set_nomerge(req->q, req); return 0; } - if (!bio_flagged(req->biotail, BIO_SEG_VALID)) - blk_recount_segments(q, req->biotail); - if (!bio_flagged(bio, BIO_SEG_VALID)) - blk_recount_segments(q, bio); - return ll_new_hw_segment(q, req, bio); + return ll_new_hw_segment(req, bio, nr_segs); } -int ll_front_merge_fn(struct request_queue *q, struct request *req, - struct bio *bio) +int ll_front_merge_fn(struct request *req, struct bio *bio, unsigned int nr_segs) { - if (req_gap_front_merge(req, bio)) return 0; if (blk_integrity_rq(req) && @@ -575,15 +558,11 @@ int ll_front_merge_fn(struct request_queue *q, struct request *req, return 0; if (blk_rq_sectors(req) + bio_sectors(bio) > blk_rq_get_max_sectors(req, bio->bi_iter.bi_sector)) { - req_set_nomerge(q, req); + req_set_nomerge(req->q, req); return 0; } - if (!bio_flagged(bio, BIO_SEG_VALID)) - blk_recount_segments(q, bio); - if (!bio_flagged(req->bio, BIO_SEG_VALID)) - blk_recount_segments(q, req->bio); - return ll_new_hw_segment(q, req, bio); + return ll_new_hw_segment(req, bio, nr_segs); } static bool req_attempt_discard_merge(struct request_queue *q, struct request *req, |