summaryrefslogtreecommitdiff
path: root/block/blk-merge.c
diff options
context:
space:
mode:
authorChristoph Hellwig <hch@lst.de>2024-11-04 09:26:29 +0300
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>2024-12-05 16:01:14 +0300
commit7044259018f2ac79c322e22e29d18beab4d04c31 (patch)
tree8b1659044f2da9e051ed7b50c7bcdd75ace0495b /block/blk-merge.c
parent8cc1df3113cb71a0df2c46dd5b102c9e11c8a8c6 (diff)
downloadlinux-7044259018f2ac79c322e22e29d18beab4d04c31.tar.xz
block: take chunk_sectors into account in bio_split_write_zeroes
[ Upstream commit 60dc5ea6bcfd078b71419640d49afa649acf9450 ] For zoned devices, write zeroes must be split at the zone boundary which is represented as chunk_sectors. For other uses like the internally RAIDed NVMe devices it is probably at least useful. Enhance get_max_io_size to know about write zeroes and use it in bio_split_write_zeroes. Also add a comment about the seemingly nonsensical zero max_write_zeroes limit. Fixes: 885fa13f6559 ("block: implement splitting of REQ_OP_WRITE_ZEROES bios") Signed-off-by: Christoph Hellwig <hch@lst.de> Reviewed-by: Damien Le Moal <dlemoal@kernel.org> Link: https://lore.kernel.org/r/20241104062647.91160-2-hch@lst.de Signed-off-by: Jens Axboe <axboe@kernel.dk> Signed-off-by: Sasha Levin <sashal@kernel.org>
Diffstat (limited to 'block/blk-merge.c')
-rw-r--r--block/blk-merge.c35
1 files changed, 23 insertions, 12 deletions
diff --git a/block/blk-merge.c b/block/blk-merge.c
index ad763ec313b6..75d2461b69e4 100644
--- a/block/blk-merge.c
+++ b/block/blk-merge.c
@@ -166,17 +166,6 @@ struct bio *bio_split_discard(struct bio *bio, const struct queue_limits *lim,
return bio_submit_split(bio, split_sectors);
}
-struct bio *bio_split_write_zeroes(struct bio *bio,
- const struct queue_limits *lim, unsigned *nsegs)
-{
- *nsegs = 0;
- if (!lim->max_write_zeroes_sectors)
- return bio;
- if (bio_sectors(bio) <= lim->max_write_zeroes_sectors)
- return bio;
- return bio_submit_split(bio, lim->max_write_zeroes_sectors);
-}
-
static inline unsigned int blk_boundary_sectors(const struct queue_limits *lim,
bool is_atomic)
{
@@ -211,7 +200,9 @@ static inline unsigned get_max_io_size(struct bio *bio,
* We ignore lim->max_sectors for atomic writes because it may less
* than the actual bio size, which we cannot tolerate.
*/
- if (is_atomic)
+ if (bio_op(bio) == REQ_OP_WRITE_ZEROES)
+ max_sectors = lim->max_write_zeroes_sectors;
+ else if (is_atomic)
max_sectors = lim->atomic_write_max_sectors;
else
max_sectors = lim->max_sectors;
@@ -398,6 +389,26 @@ struct bio *bio_split_zone_append(struct bio *bio,
return bio_submit_split(bio, split_sectors);
}
+struct bio *bio_split_write_zeroes(struct bio *bio,
+ const struct queue_limits *lim, unsigned *nsegs)
+{
+ unsigned int max_sectors = get_max_io_size(bio, lim);
+
+ *nsegs = 0;
+
+ /*
+ * An unset limit should normally not happen, as bio submission is keyed
+ * off having a non-zero limit. But SCSI can clear the limit in the
+ * I/O completion handler, and we can race and see this. Splitting to a
+ * zero limit obviously doesn't make sense, so band-aid it here.
+ */
+ if (!max_sectors)
+ return bio;
+ if (bio_sectors(bio) <= max_sectors)
+ return bio;
+ return bio_submit_split(bio, max_sectors);
+}
+
/**
* bio_split_to_limits - split a bio to fit the queue limits
* @bio: bio to be split