summaryrefslogtreecommitdiff
path: root/block/blk-settings.c
diff options
context:
space:
mode:
Diffstat (limited to 'block/blk-settings.c')
-rw-r--r--block/blk-settings.c64
1 files changed, 27 insertions, 37 deletions
diff --git a/block/blk-settings.c b/block/blk-settings.c
index 292aadf8d807..a7fe8e90240a 100644
--- a/block/blk-settings.c
+++ b/block/blk-settings.c
@@ -146,8 +146,7 @@ static int blk_validate_limits(struct queue_limits *lim)
max_hw_sectors = min_not_zero(lim->max_hw_sectors,
lim->max_dev_sectors);
if (lim->max_user_sectors) {
- if (lim->max_user_sectors > max_hw_sectors ||
- lim->max_user_sectors < PAGE_SIZE / SECTOR_SIZE)
+ if (lim->max_user_sectors < PAGE_SIZE / SECTOR_SIZE)
return -EINVAL;
lim->max_sectors = min(max_hw_sectors, lim->max_user_sectors);
} else {
@@ -183,16 +182,15 @@ static int blk_validate_limits(struct queue_limits *lim)
return -EINVAL;
/*
- * Devices that require a virtual boundary do not support scatter/gather
- * I/O natively, but instead require a descriptor list entry for each
- * page (which might not be identical to the Linux PAGE_SIZE). Because
- * of that they are not limited by our notion of "segment size".
+ * Stacking device may have both virtual boundary and max segment
+ * size limit, so allow this setting now, and long-term the two
+ * might need to move out of stacking limits since we have immutable
+ * bvec and lower layer bio splitting is supposed to handle the two
+ * correctly.
*/
if (lim->virt_boundary_mask) {
- if (WARN_ON_ONCE(lim->max_segment_size &&
- lim->max_segment_size != UINT_MAX))
- return -EINVAL;
- lim->max_segment_size = UINT_MAX;
+ if (!lim->max_segment_size)
+ lim->max_segment_size = UINT_MAX;
} else {
/*
* The maximum segment size has an odd historic 64k default that
@@ -347,24 +345,32 @@ EXPORT_SYMBOL(blk_queue_max_write_zeroes_sectors);
* blk_queue_max_zone_append_sectors - set max sectors for a single zone append
* @q: the request queue for the device
* @max_zone_append_sectors: maximum number of sectors to write per command
+ *
+ * Sets the maximum number of sectors allowed for zone append commands. If
+ * Specifying 0 for @max_zone_append_sectors indicates that the queue does
+ * not natively support zone append operations and that the block layer must
+ * emulate these operations using regular writes.
**/
void blk_queue_max_zone_append_sectors(struct request_queue *q,
unsigned int max_zone_append_sectors)
{
- unsigned int max_sectors;
+ unsigned int max_sectors = 0;
if (WARN_ON(!blk_queue_is_zoned(q)))
return;
- max_sectors = min(q->limits.max_hw_sectors, max_zone_append_sectors);
- max_sectors = min(q->limits.chunk_sectors, max_sectors);
+ if (max_zone_append_sectors) {
+ max_sectors = min(q->limits.max_hw_sectors,
+ max_zone_append_sectors);
+ max_sectors = min(q->limits.chunk_sectors, max_sectors);
- /*
- * Signal eventual driver bugs resulting in the max_zone_append sectors limit
- * being 0 due to a 0 argument, the chunk_sectors limit (zone size) not set,
- * or the max_hw_sectors limit not set.
- */
- WARN_ON(!max_sectors);
+ /*
+ * Signal eventual driver bugs resulting in the max_zone_append
+ * sectors limit being 0 due to the chunk_sectors limit (zone
+ * size) not set or the max_hw_sectors limit not set.
+ */
+ WARN_ON_ONCE(!max_sectors);
+ }
q->limits.max_zone_append_sectors = max_sectors;
}
@@ -609,8 +615,8 @@ int blk_stack_limits(struct queue_limits *t, struct queue_limits *b,
t->max_dev_sectors = min_not_zero(t->max_dev_sectors, b->max_dev_sectors);
t->max_write_zeroes_sectors = min(t->max_write_zeroes_sectors,
b->max_write_zeroes_sectors);
- t->max_zone_append_sectors = min(t->max_zone_append_sectors,
- b->max_zone_append_sectors);
+ t->max_zone_append_sectors = min(queue_limits_max_zone_append_sectors(t),
+ queue_limits_max_zone_append_sectors(b));
t->bounce = max(t->bounce, b->bounce);
t->seg_boundary_mask = min_not_zero(t->seg_boundary_mask,
@@ -823,22 +829,6 @@ void blk_queue_write_cache(struct request_queue *q, bool wc, bool fua)
EXPORT_SYMBOL_GPL(blk_queue_write_cache);
/**
- * blk_queue_required_elevator_features - Set a queue required elevator features
- * @q: the request queue for the target device
- * @features: Required elevator features OR'ed together
- *
- * Tell the block layer that for the device controlled through @q, only the
- * only elevators that can be used are those that implement at least the set of
- * features specified by @features.
- */
-void blk_queue_required_elevator_features(struct request_queue *q,
- unsigned int features)
-{
- q->required_elevator_features = features;
-}
-EXPORT_SYMBOL_GPL(blk_queue_required_elevator_features);
-
-/**
* disk_set_zoned - inidicate a zoned device
* @disk: gendisk to configure
*/