diff options
Diffstat (limited to 'block/blk-settings.c')
-rw-r--r-- | block/blk-settings.c | 112 |
1 files changed, 68 insertions, 44 deletions
diff --git a/block/blk-settings.c b/block/blk-settings.c index 64f2e67238d7..44dabc636a59 100644 --- a/block/blk-settings.c +++ b/block/blk-settings.c @@ -21,7 +21,7 @@ void blk_queue_rq_timeout(struct request_queue *q, unsigned int timeout) { - q->rq_timeout = timeout; + WRITE_ONCE(q->rq_timeout, timeout); } EXPORT_SYMBOL_GPL(blk_queue_rq_timeout); @@ -61,8 +61,14 @@ void blk_apply_bdi_limits(struct backing_dev_info *bdi, /* * For read-ahead of large files to be effective, we need to read ahead * at least twice the optimal I/O size. + * + * There is no hardware limitation for the read-ahead size and the user + * might have increased the read-ahead size through sysfs, so don't ever + * decrease it. */ - bdi->ra_pages = max(lim->io_opt * 2 / PAGE_SIZE, VM_READAHEAD_PAGES); + bdi->ra_pages = max3(bdi->ra_pages, + lim->io_opt * 2 / PAGE_SIZE, + VM_READAHEAD_PAGES); bdi->io_pages = lim->max_sectors >> PAGE_SECTORS_SHIFT; } @@ -114,6 +120,7 @@ static int blk_validate_integrity_limits(struct queue_limits *lim) pr_warn("invalid PI settings.\n"); return -EINVAL; } + bi->flags |= BLK_INTEGRITY_NOGENERATE | BLK_INTEGRITY_NOVERIFY; return 0; } @@ -174,6 +181,11 @@ static void blk_atomic_writes_update_limits(struct queue_limits *lim) static void blk_validate_atomic_write_limits(struct queue_limits *lim) { unsigned int boundary_sectors; + unsigned int atomic_write_hw_max_sectors = + lim->atomic_write_hw_max >> SECTOR_SHIFT; + + if (!(lim->features & BLK_FEAT_ATOMIC_WRITES)) + goto unsupported; if (!lim->atomic_write_hw_max) goto unsupported; @@ -192,6 +204,10 @@ static void blk_validate_atomic_write_limits(struct queue_limits *lim) lim->atomic_write_hw_max)) goto unsupported; + if (WARN_ON_ONCE(lim->chunk_sectors && + atomic_write_hw_max_sectors > lim->chunk_sectors)) + goto unsupported; + boundary_sectors = lim->atomic_write_hw_boundary >> SECTOR_SHIFT; if (boundary_sectors) { @@ -243,6 +259,7 @@ int blk_validate_limits(struct queue_limits *lim) { unsigned int max_hw_sectors; unsigned int logical_block_sectors; + unsigned long seg_size; int err; /* @@ -300,7 +317,7 @@ int blk_validate_limits(struct queue_limits *lim) max_hw_sectors = min_not_zero(lim->max_hw_sectors, lim->max_dev_sectors); if (lim->max_user_sectors) { - if (lim->max_user_sectors < PAGE_SIZE / SECTOR_SIZE) + if (lim->max_user_sectors < BLK_MIN_SEGMENT_SIZE / SECTOR_SIZE) return -EINVAL; lim->max_sectors = min(max_hw_sectors, lim->max_user_sectors); } else if (lim->io_opt > (BLK_DEF_MAX_SECTORS_CAP << SECTOR_SHIFT)) { @@ -325,12 +342,19 @@ int blk_validate_limits(struct queue_limits *lim) lim->max_discard_sectors = min(lim->max_hw_discard_sectors, lim->max_user_discard_sectors); + /* + * When discard is not supported, discard_granularity should be reported + * as 0 to userspace. + */ + if (lim->max_discard_sectors) + lim->discard_granularity = + max(lim->discard_granularity, lim->physical_block_size); + else + lim->discard_granularity = 0; + if (!lim->max_discard_segments) lim->max_discard_segments = 1; - if (lim->discard_granularity < lim->physical_block_size) - lim->discard_granularity = lim->physical_block_size; - /* * By default there is no limit on the segment boundary alignment, * but if there is one it can't be smaller than the page size as @@ -338,7 +362,7 @@ int blk_validate_limits(struct queue_limits *lim) */ if (!lim->seg_boundary_mask) lim->seg_boundary_mask = BLK_SEG_BOUNDARY_MASK; - if (WARN_ON_ONCE(lim->seg_boundary_mask < PAGE_SIZE - 1)) + if (WARN_ON_ONCE(lim->seg_boundary_mask < BLK_MIN_SEGMENT_SIZE - 1)) return -EINVAL; /* @@ -359,10 +383,17 @@ int blk_validate_limits(struct queue_limits *lim) */ if (!lim->max_segment_size) lim->max_segment_size = BLK_MAX_SEGMENT_SIZE; - if (WARN_ON_ONCE(lim->max_segment_size < PAGE_SIZE)) + if (WARN_ON_ONCE(lim->max_segment_size < BLK_MIN_SEGMENT_SIZE)) return -EINVAL; } + /* setup min segment size for building new segment in fast path */ + if (lim->seg_boundary_mask > lim->max_segment_size - 1) + seg_size = lim->max_segment_size; + else + seg_size = lim->seg_boundary_mask + 1; + lim->min_segment_size = min_t(unsigned int, seg_size, PAGE_SIZE); + /* * We require drivers to at least do logical block aligned I/O, but * historically could not check for that due to the separate calls @@ -413,7 +444,8 @@ int blk_set_default_limits(struct queue_limits *lim) * @lim: limits to apply * * Apply the limits in @lim that were obtained from queue_limits_start_update() - * and updated by the caller to @q. + * and updated by the caller to @q. The caller must have frozen the queue or + * ensure that there are no outstanding I/Os by other means. * * Returns 0 if successful, else a negative error code. */ @@ -457,11 +489,12 @@ EXPORT_SYMBOL_GPL(queue_limits_commit_update); int queue_limits_commit_update_frozen(struct request_queue *q, struct queue_limits *lim) { + unsigned int memflags; int ret; - blk_mq_freeze_queue(q); + memflags = blk_mq_freeze_queue(q); ret = queue_limits_commit_update(q, lim); - blk_mq_unfreeze_queue(q); + blk_mq_unfreeze_queue(q, memflags); return ret; } @@ -610,10 +643,10 @@ static bool blk_stack_atomic_writes_head(struct queue_limits *t, static void blk_stack_atomic_writes_limits(struct queue_limits *t, struct queue_limits *b, sector_t start) { - if (!(t->features & BLK_FEAT_ATOMIC_WRITES_STACKED)) + if (!(b->features & BLK_FEAT_ATOMIC_WRITES)) goto unsupported; - if (!b->atomic_write_unit_min) + if (!b->atomic_write_hw_unit_min) goto unsupported; if (!blk_atomic_write_start_sect_aligned(start, b)) @@ -638,7 +671,6 @@ unsupported: t->atomic_write_hw_unit_max = 0; t->atomic_write_hw_unit_min = 0; t->atomic_write_hw_boundary = 0; - t->features &= ~BLK_FEAT_ATOMIC_WRITES_STACKED; } /** @@ -760,7 +792,7 @@ int blk_stack_limits(struct queue_limits *t, struct queue_limits *b, } /* chunk_sectors a multiple of the physical block size? */ - if ((t->chunk_sectors << 9) & (t->physical_block_size - 1)) { + if (t->chunk_sectors % (t->physical_block_size >> SECTOR_SHIFT)) { t->chunk_sectors = 0; t->flags |= BLK_FLAG_MISALIGNED; ret = -1; @@ -855,36 +887,28 @@ bool queue_limits_stack_integrity(struct queue_limits *t, if (!IS_ENABLED(CONFIG_BLK_DEV_INTEGRITY)) return true; - if (!ti->tuple_size) { - /* inherit the settings from the first underlying device */ - if (!(ti->flags & BLK_INTEGRITY_STACKED)) { - ti->flags = BLK_INTEGRITY_DEVICE_CAPABLE | - (bi->flags & BLK_INTEGRITY_REF_TAG); - ti->csum_type = bi->csum_type; - ti->tuple_size = bi->tuple_size; - ti->pi_offset = bi->pi_offset; - ti->interval_exp = bi->interval_exp; - ti->tag_size = bi->tag_size; - goto done; - } - if (!bi->tuple_size) - goto done; + if (ti->flags & BLK_INTEGRITY_STACKED) { + if (ti->tuple_size != bi->tuple_size) + goto incompatible; + if (ti->interval_exp != bi->interval_exp) + goto incompatible; + if (ti->tag_size != bi->tag_size) + goto incompatible; + if (ti->csum_type != bi->csum_type) + goto incompatible; + if ((ti->flags & BLK_INTEGRITY_REF_TAG) != + (bi->flags & BLK_INTEGRITY_REF_TAG)) + goto incompatible; + } else { + ti->flags = BLK_INTEGRITY_STACKED; + ti->flags |= (bi->flags & BLK_INTEGRITY_DEVICE_CAPABLE) | + (bi->flags & BLK_INTEGRITY_REF_TAG); + ti->csum_type = bi->csum_type; + ti->tuple_size = bi->tuple_size; + ti->pi_offset = bi->pi_offset; + ti->interval_exp = bi->interval_exp; + ti->tag_size = bi->tag_size; } - - if (ti->tuple_size != bi->tuple_size) - goto incompatible; - if (ti->interval_exp != bi->interval_exp) - goto incompatible; - if (ti->tag_size != bi->tag_size) - goto incompatible; - if (ti->csum_type != bi->csum_type) - goto incompatible; - if ((ti->flags & BLK_INTEGRITY_REF_TAG) != - (bi->flags & BLK_INTEGRITY_REF_TAG)) - goto incompatible; - -done: - ti->flags |= BLK_INTEGRITY_STACKED; return true; incompatible: |