diff options
author | Keith Busch <kbusch@kernel.org> | 2022-11-10 21:44:57 +0300 |
---|---|---|
committer | Jens Axboe <axboe@kernel.dk> | 2022-11-17 01:58:11 +0300 |
commit | c964d62f5cab7b43dd0534f22a96eab386c6ec5d (patch) | |
tree | 5a443eb1b56ce9e0961cf4afa5f2dce93607332e /block | |
parent | a7a1598189228b5007369a9622ccdf587be0730f (diff) | |
download | linux-c964d62f5cab7b43dd0534f22a96eab386c6ec5d.tar.xz |
block: make dma_alignment a stacking queue_limit
Device mappers had always been getting the default 511 dma mask, but
the underlying device might have a larger alignment requirement. Since
this value is used to determine alloweable direct-io alignment, this
needs to be a stackable limit.
Signed-off-by: Keith Busch <kbusch@kernel.org>
Reviewed-by: Christoph Hellwig <hch@lst.de>
Link: https://lore.kernel.org/r/20221110184501.2451620-2-kbusch@meta.com
Signed-off-by: Jens Axboe <axboe@kernel.dk>
Diffstat (limited to 'block')
-rw-r--r-- | block/blk-core.c | 1 | ||||
-rw-r--r-- | block/blk-settings.c | 8 |
2 files changed, 5 insertions, 4 deletions
diff --git a/block/blk-core.c b/block/blk-core.c index 17667159482e..5487912befe8 100644 --- a/block/blk-core.c +++ b/block/blk-core.c @@ -425,7 +425,6 @@ struct request_queue *blk_alloc_queue(int node_id, bool alloc_srcu) PERCPU_REF_INIT_ATOMIC, GFP_KERNEL)) goto fail_stats; - blk_queue_dma_alignment(q, 511); blk_set_default_limits(&q->limits); q->nr_requests = BLKDEV_DEFAULT_RQ; diff --git a/block/blk-settings.c b/block/blk-settings.c index 8bb9eef5310e..4949ed3ce7c9 100644 --- a/block/blk-settings.c +++ b/block/blk-settings.c @@ -57,6 +57,7 @@ void blk_set_default_limits(struct queue_limits *lim) lim->misaligned = 0; lim->zoned = BLK_ZONED_NONE; lim->zone_write_granularity = 0; + lim->dma_alignment = 511; } EXPORT_SYMBOL(blk_set_default_limits); @@ -600,6 +601,7 @@ int blk_stack_limits(struct queue_limits *t, struct queue_limits *b, t->io_min = max(t->io_min, b->io_min); t->io_opt = lcm_not_zero(t->io_opt, b->io_opt); + t->dma_alignment = max(t->dma_alignment, b->dma_alignment); /* Set non-power-of-2 compatible chunk_sectors boundary */ if (b->chunk_sectors) @@ -773,7 +775,7 @@ EXPORT_SYMBOL(blk_queue_virt_boundary); **/ void blk_queue_dma_alignment(struct request_queue *q, int mask) { - q->dma_alignment = mask; + q->limits.dma_alignment = mask; } EXPORT_SYMBOL(blk_queue_dma_alignment); @@ -795,8 +797,8 @@ void blk_queue_update_dma_alignment(struct request_queue *q, int mask) { BUG_ON(mask > PAGE_SIZE); - if (mask > q->dma_alignment) - q->dma_alignment = mask; + if (mask > q->limits.dma_alignment) + q->limits.dma_alignment = mask; } EXPORT_SYMBOL(blk_queue_update_dma_alignment); |