diff options
Diffstat (limited to 'block/blk-zoned.c')
-rw-r--r-- | block/blk-zoned.c | 453 |
1 files changed, 198 insertions, 255 deletions
diff --git a/block/blk-zoned.c b/block/blk-zoned.c index 4bc5f260248a..6fad6f3f6980 100644 --- a/block/blk-zoned.c +++ b/block/blk-zoned.c @@ -93,172 +93,85 @@ unsigned int blkdev_nr_zones(struct block_device *bdev) if (!blk_queue_is_zoned(q)) return 0; - return __blkdev_nr_zones(q, bdev->bd_part->nr_sects); + return __blkdev_nr_zones(q, get_capacity(bdev->bd_disk)); } EXPORT_SYMBOL_GPL(blkdev_nr_zones); -/* - * Check that a zone report belongs to this partition, and if yes, fix its start - * sector and write pointer and return true. Return false otherwise. - */ -static bool blkdev_report_zone(struct block_device *bdev, struct blk_zone *rep) -{ - sector_t offset = get_start_sect(bdev); - - if (rep->start < offset) - return false; - - rep->start -= offset; - if (rep->start + rep->len > bdev->bd_part->nr_sects) - return false; - - if (rep->type == BLK_ZONE_TYPE_CONVENTIONAL) - rep->wp = rep->start + rep->len; - else - rep->wp -= offset; - return true; -} - -static int blk_report_zones(struct gendisk *disk, sector_t sector, - struct blk_zone *zones, unsigned int *nr_zones) -{ - struct request_queue *q = disk->queue; - unsigned int z = 0, n, nrz = *nr_zones; - sector_t capacity = get_capacity(disk); - int ret; - - while (z < nrz && sector < capacity) { - n = nrz - z; - ret = disk->fops->report_zones(disk, sector, &zones[z], &n); - if (ret) - return ret; - if (!n) - break; - sector += blk_queue_zone_sectors(q) * n; - z += n; - } - - WARN_ON(z > *nr_zones); - *nr_zones = z; - - return 0; -} - /** * blkdev_report_zones - Get zones information * @bdev: Target block device * @sector: Sector from which to report zones - * @zones: Array of zone structures where to return the zones information - * @nr_zones: Number of zone structures in the zone array + * @nr_zones: Maximum number of zones to report + * @cb: Callback function called for each reported zone + * @data: Private data for the callback * * Description: - * Get zone information starting from the zone containing @sector. - * The number of zone information reported may be less than the number - * requested by @nr_zones. The number of zones actually reported is - * returned in @nr_zones. - * The caller must use memalloc_noXX_save/restore() calls to control - * memory allocations done within this function (zone array and command - * buffer allocation by the device driver). + * Get zone information starting from the zone containing @sector for at most + * @nr_zones, and call @cb for each zone reported by the device. + * To report all zones in a device starting from @sector, the BLK_ALL_ZONES + * constant can be passed to @nr_zones. + * Returns the number of zones reported by the device, or a negative errno + * value in case of failure. + * + * Note: The caller must use memalloc_noXX_save/restore() calls to control + * memory allocations done within this function. */ int blkdev_report_zones(struct block_device *bdev, sector_t sector, - struct blk_zone *zones, unsigned int *nr_zones) + unsigned int nr_zones, report_zones_cb cb, void *data) { - struct request_queue *q = bdev_get_queue(bdev); - unsigned int i, nrz; - int ret; - - if (!blk_queue_is_zoned(q)) - return -EOPNOTSUPP; + struct gendisk *disk = bdev->bd_disk; + sector_t capacity = get_capacity(disk); - /* - * A block device that advertized itself as zoned must have a - * report_zones method. If it does not have one defined, the device - * driver has a bug. So warn about that. - */ - if (WARN_ON_ONCE(!bdev->bd_disk->fops->report_zones)) + if (!blk_queue_is_zoned(bdev_get_queue(bdev)) || + WARN_ON_ONCE(!disk->fops->report_zones)) return -EOPNOTSUPP; - if (!*nr_zones || sector >= bdev->bd_part->nr_sects) { - *nr_zones = 0; + if (!nr_zones || sector >= capacity) return 0; - } - - nrz = min(*nr_zones, - __blkdev_nr_zones(q, bdev->bd_part->nr_sects - sector)); - ret = blk_report_zones(bdev->bd_disk, get_start_sect(bdev) + sector, - zones, &nrz); - if (ret) - return ret; - - for (i = 0; i < nrz; i++) { - if (!blkdev_report_zone(bdev, zones)) - break; - zones++; - } - *nr_zones = i; - - return 0; + return disk->fops->report_zones(disk, sector, nr_zones, cb, data); } EXPORT_SYMBOL_GPL(blkdev_report_zones); -/* - * Special case of zone reset operation to reset all zones in one command, - * useful for applications like mkfs. - */ -static int __blkdev_reset_all_zones(struct block_device *bdev, gfp_t gfp_mask) -{ - struct bio *bio = bio_alloc(gfp_mask, 0); - int ret; - - /* across the zones operations, don't need any sectors */ - bio_set_dev(bio, bdev); - bio_set_op_attrs(bio, REQ_OP_ZONE_RESET_ALL, 0); - - ret = submit_bio_wait(bio); - bio_put(bio); - - return ret; -} - static inline bool blkdev_allow_reset_all_zones(struct block_device *bdev, + sector_t sector, sector_t nr_sectors) { if (!blk_queue_zone_resetall(bdev_get_queue(bdev))) return false; - if (nr_sectors != part_nr_sects_read(bdev->bd_part)) - return false; /* - * REQ_OP_ZONE_RESET_ALL can be executed only if the block device is - * the entire disk, that is, if the blocks device start offset is 0 and - * its capacity is the same as the entire disk. + * REQ_OP_ZONE_RESET_ALL can be executed only if the number of sectors + * of the applicable zone range is the entire disk. */ - return get_start_sect(bdev) == 0 && - part_nr_sects_read(bdev->bd_part) == get_capacity(bdev->bd_disk); + return !sector && nr_sectors == get_capacity(bdev->bd_disk); } /** - * blkdev_reset_zones - Reset zones write pointer + * blkdev_zone_mgmt - Execute a zone management operation on a range of zones * @bdev: Target block device - * @sector: Start sector of the first zone to reset - * @nr_sectors: Number of sectors, at least the length of one zone + * @op: Operation to be performed on the zones + * @sector: Start sector of the first zone to operate on + * @nr_sectors: Number of sectors, should be at least the length of one zone and + * must be zone size aligned. * @gfp_mask: Memory allocation flags (for bio_alloc) * * Description: - * Reset the write pointer of the zones contained in the range + * Perform the specified operation on the range of zones specified by * @sector..@sector+@nr_sectors. Specifying the entire disk sector range * is valid, but the specified range should not contain conventional zones. + * The operation to execute on each zone can be a zone reset, open, close + * or finish request. */ -int blkdev_reset_zones(struct block_device *bdev, - sector_t sector, sector_t nr_sectors, - gfp_t gfp_mask) +int blkdev_zone_mgmt(struct block_device *bdev, enum req_opf op, + sector_t sector, sector_t nr_sectors, + gfp_t gfp_mask) { struct request_queue *q = bdev_get_queue(bdev); - sector_t zone_sectors; + sector_t zone_sectors = blk_queue_zone_sectors(q); + sector_t capacity = get_capacity(bdev->bd_disk); sector_t end_sector = sector + nr_sectors; struct bio *bio = NULL; - struct blk_plug plug; int ret; if (!blk_queue_is_zoned(q)) @@ -267,45 +180,62 @@ int blkdev_reset_zones(struct block_device *bdev, if (bdev_read_only(bdev)) return -EPERM; - if (!nr_sectors || end_sector > bdev->bd_part->nr_sects) + if (!op_is_zone_mgmt(op)) + return -EOPNOTSUPP; + + if (!nr_sectors || end_sector > capacity) /* Out of range */ return -EINVAL; - if (blkdev_allow_reset_all_zones(bdev, nr_sectors)) - return __blkdev_reset_all_zones(bdev, gfp_mask); - /* Check alignment (handle eventual smaller last zone) */ - zone_sectors = blk_queue_zone_sectors(q); if (sector & (zone_sectors - 1)) return -EINVAL; - if ((nr_sectors & (zone_sectors - 1)) && - end_sector != bdev->bd_part->nr_sects) + if ((nr_sectors & (zone_sectors - 1)) && end_sector != capacity) return -EINVAL; - blk_start_plug(&plug); while (sector < end_sector) { - bio = blk_next_bio(bio, 0, gfp_mask); - bio->bi_iter.bi_sector = sector; bio_set_dev(bio, bdev); - bio_set_op_attrs(bio, REQ_OP_ZONE_RESET, 0); + /* + * Special case for the zone reset operation that reset all + * zones, this is useful for applications like mkfs. + */ + if (op == REQ_OP_ZONE_RESET && + blkdev_allow_reset_all_zones(bdev, sector, nr_sectors)) { + bio->bi_opf = REQ_OP_ZONE_RESET_ALL; + break; + } + + bio->bi_opf = op; + bio->bi_iter.bi_sector = sector; sector += zone_sectors; /* This may take a while, so be nice to others */ cond_resched(); - } ret = submit_bio_wait(bio); bio_put(bio); - blk_finish_plug(&plug); - return ret; } -EXPORT_SYMBOL_GPL(blkdev_reset_zones); +EXPORT_SYMBOL_GPL(blkdev_zone_mgmt); + +struct zone_report_args { + struct blk_zone __user *zones; +}; + +static int blkdev_copy_zone_to_user(struct blk_zone *zone, unsigned int idx, + void *data) +{ + struct zone_report_args *args = data; + + if (copy_to_user(&args->zones[idx], zone, sizeof(struct blk_zone))) + return -EFAULT; + return 0; +} /* * BLKREPORTZONE ioctl processing. @@ -315,9 +245,9 @@ int blkdev_report_zones_ioctl(struct block_device *bdev, fmode_t mode, unsigned int cmd, unsigned long arg) { void __user *argp = (void __user *)arg; + struct zone_report_args args; struct request_queue *q; struct blk_zone_report rep; - struct blk_zone *zones; int ret; if (!argp) @@ -339,44 +269,29 @@ int blkdev_report_zones_ioctl(struct block_device *bdev, fmode_t mode, if (!rep.nr_zones) return -EINVAL; - rep.nr_zones = min(blkdev_nr_zones(bdev), rep.nr_zones); - - zones = kvmalloc_array(rep.nr_zones, sizeof(struct blk_zone), - GFP_KERNEL | __GFP_ZERO); - if (!zones) - return -ENOMEM; - - ret = blkdev_report_zones(bdev, rep.sector, zones, &rep.nr_zones); - if (ret) - goto out; - - if (copy_to_user(argp, &rep, sizeof(struct blk_zone_report))) { - ret = -EFAULT; - goto out; - } - - if (rep.nr_zones) { - if (copy_to_user(argp + sizeof(struct blk_zone_report), zones, - sizeof(struct blk_zone) * rep.nr_zones)) - ret = -EFAULT; - } - - out: - kvfree(zones); + args.zones = argp + sizeof(struct blk_zone_report); + ret = blkdev_report_zones(bdev, rep.sector, rep.nr_zones, + blkdev_copy_zone_to_user, &args); + if (ret < 0) + return ret; - return ret; + rep.nr_zones = ret; + if (copy_to_user(argp, &rep, sizeof(struct blk_zone_report))) + return -EFAULT; + return 0; } /* - * BLKRESETZONE ioctl processing. + * BLKRESETZONE, BLKOPENZONE, BLKCLOSEZONE and BLKFINISHZONE ioctl processing. * Called from blkdev_ioctl. */ -int blkdev_reset_zones_ioctl(struct block_device *bdev, fmode_t mode, - unsigned int cmd, unsigned long arg) +int blkdev_zone_mgmt_ioctl(struct block_device *bdev, fmode_t mode, + unsigned int cmd, unsigned long arg) { void __user *argp = (void __user *)arg; struct request_queue *q; struct blk_zone_range zrange; + enum req_opf op; if (!argp) return -EINVAL; @@ -397,8 +312,25 @@ int blkdev_reset_zones_ioctl(struct block_device *bdev, fmode_t mode, if (copy_from_user(&zrange, argp, sizeof(struct blk_zone_range))) return -EFAULT; - return blkdev_reset_zones(bdev, zrange.sector, zrange.nr_sectors, - GFP_KERNEL); + switch (cmd) { + case BLKRESETZONE: + op = REQ_OP_ZONE_RESET; + break; + case BLKOPENZONE: + op = REQ_OP_ZONE_OPEN; + break; + case BLKCLOSEZONE: + op = REQ_OP_ZONE_CLOSE; + break; + case BLKFINISHZONE: + op = REQ_OP_ZONE_FINISH; + break; + default: + return -ENOTTY; + } + + return blkdev_zone_mgmt(bdev, op, zrange.sector, zrange.nr_sectors, + GFP_KERNEL); } static inline unsigned long *blk_alloc_zone_bitmap(int node, @@ -408,37 +340,99 @@ static inline unsigned long *blk_alloc_zone_bitmap(int node, GFP_NOIO, node); } +void blk_queue_free_zone_bitmaps(struct request_queue *q) +{ + kfree(q->seq_zones_bitmap); + q->seq_zones_bitmap = NULL; + kfree(q->seq_zones_wlock); + q->seq_zones_wlock = NULL; +} + +struct blk_revalidate_zone_args { + struct gendisk *disk; + unsigned long *seq_zones_bitmap; + unsigned long *seq_zones_wlock; + sector_t sector; +}; + /* - * Allocate an array of struct blk_zone to get nr_zones zone information. - * The allocated array may be smaller than nr_zones. + * Helper function to check the validity of zones of a zoned block device. */ -static struct blk_zone *blk_alloc_zones(unsigned int *nr_zones) +static int blk_revalidate_zone_cb(struct blk_zone *zone, unsigned int idx, + void *data) { - struct blk_zone *zones; - size_t nrz = min(*nr_zones, BLK_ZONED_REPORT_MAX_ZONES); + struct blk_revalidate_zone_args *args = data; + struct gendisk *disk = args->disk; + struct request_queue *q = disk->queue; + sector_t zone_sectors = blk_queue_zone_sectors(q); + sector_t capacity = get_capacity(disk); /* - * GFP_KERNEL here is meaningless as the caller task context has - * the PF_MEMALLOC_NOIO flag set in blk_revalidate_disk_zones() - * with memalloc_noio_save(). + * All zones must have the same size, with the exception on an eventual + * smaller last zone. */ - zones = kvcalloc(nrz, sizeof(struct blk_zone), GFP_KERNEL); - if (!zones) { - *nr_zones = 0; - return NULL; + if (zone->start + zone_sectors < capacity && + zone->len != zone_sectors) { + pr_warn("%s: Invalid zoned device with non constant zone size\n", + disk->disk_name); + return false; } - *nr_zones = nrz; + if (zone->start + zone->len >= capacity && + zone->len > zone_sectors) { + pr_warn("%s: Invalid zoned device with larger last zone size\n", + disk->disk_name); + return -ENODEV; + } + + /* Check for holes in the zone report */ + if (zone->start != args->sector) { + pr_warn("%s: Zone gap at sectors %llu..%llu\n", + disk->disk_name, args->sector, zone->start); + return -ENODEV; + } - return zones; + /* Check zone type */ + switch (zone->type) { + case BLK_ZONE_TYPE_CONVENTIONAL: + case BLK_ZONE_TYPE_SEQWRITE_REQ: + case BLK_ZONE_TYPE_SEQWRITE_PREF: + break; + default: + pr_warn("%s: Invalid zone type 0x%x at sectors %llu\n", + disk->disk_name, (int)zone->type, zone->start); + return -ENODEV; + } + + if (zone->type != BLK_ZONE_TYPE_CONVENTIONAL) + set_bit(idx, args->seq_zones_bitmap); + + args->sector += zone->len; + return 0; } -void blk_queue_free_zone_bitmaps(struct request_queue *q) +static int blk_update_zone_info(struct gendisk *disk, unsigned int nr_zones, + struct blk_revalidate_zone_args *args) { - kfree(q->seq_zones_bitmap); - q->seq_zones_bitmap = NULL; - kfree(q->seq_zones_wlock); - q->seq_zones_wlock = NULL; + /* + * Ensure that all memory allocations in this context are done as + * if GFP_NOIO was specified. + */ + unsigned int noio_flag = memalloc_noio_save(); + struct request_queue *q = disk->queue; + int ret; + + args->seq_zones_wlock = blk_alloc_zone_bitmap(q->node, nr_zones); + if (!args->seq_zones_wlock) + return -ENOMEM; + args->seq_zones_bitmap = blk_alloc_zone_bitmap(q->node, nr_zones); + if (!args->seq_zones_bitmap) + return -ENOMEM; + + ret = disk->fops->report_zones(disk, 0, nr_zones, + blk_revalidate_zone_cb, args); + memalloc_noio_restore(noio_flag); + return ret; } /** @@ -454,13 +448,12 @@ int blk_revalidate_disk_zones(struct gendisk *disk) { struct request_queue *q = disk->queue; unsigned int nr_zones = __blkdev_nr_zones(q, get_capacity(disk)); - unsigned long *seq_zones_wlock = NULL, *seq_zones_bitmap = NULL; - unsigned int i, rep_nr_zones = 0, z = 0, nrz; - struct blk_zone *zones = NULL; - unsigned int noio_flag; - sector_t sector = 0; + struct blk_revalidate_zone_args args = { .disk = disk }; int ret = 0; + if (WARN_ON_ONCE(!blk_queue_is_zoned(q))) + return -EIO; + /* * BIO based queues do not use a scheduler so only q->nr_zones * needs to be updated so that the sysfs exposed value is correct. @@ -470,78 +463,28 @@ int blk_revalidate_disk_zones(struct gendisk *disk) return 0; } - /* - * Ensure that all memory allocations in this context are done as - * if GFP_NOIO was specified. - */ - noio_flag = memalloc_noio_save(); - - if (!blk_queue_is_zoned(q) || !nr_zones) { - nr_zones = 0; - goto update; - } - - /* Allocate bitmaps */ - ret = -ENOMEM; - seq_zones_wlock = blk_alloc_zone_bitmap(q->node, nr_zones); - if (!seq_zones_wlock) - goto out; - seq_zones_bitmap = blk_alloc_zone_bitmap(q->node, nr_zones); - if (!seq_zones_bitmap) - goto out; - - /* Get zone information and initialize seq_zones_bitmap */ - rep_nr_zones = nr_zones; - zones = blk_alloc_zones(&rep_nr_zones); - if (!zones) - goto out; - - while (z < nr_zones) { - nrz = min(nr_zones - z, rep_nr_zones); - ret = blk_report_zones(disk, sector, zones, &nrz); - if (ret) - goto out; - if (!nrz) - break; - for (i = 0; i < nrz; i++) { - if (zones[i].type != BLK_ZONE_TYPE_CONVENTIONAL) - set_bit(z, seq_zones_bitmap); - z++; - } - sector += nrz * blk_queue_zone_sectors(q); - } - - if (WARN_ON(z != nr_zones)) { - ret = -EIO; - goto out; - } + if (nr_zones) + ret = blk_update_zone_info(disk, nr_zones, &args); -update: /* * Install the new bitmaps, making sure the queue is stopped and * all I/Os are completed (i.e. a scheduler is not referencing the * bitmaps). */ blk_mq_freeze_queue(q); - q->nr_zones = nr_zones; - swap(q->seq_zones_wlock, seq_zones_wlock); - swap(q->seq_zones_bitmap, seq_zones_bitmap); - blk_mq_unfreeze_queue(q); - -out: - memalloc_noio_restore(noio_flag); - - kvfree(zones); - kfree(seq_zones_wlock); - kfree(seq_zones_bitmap); - - if (ret) { + if (ret >= 0) { + q->nr_zones = nr_zones; + swap(q->seq_zones_wlock, args.seq_zones_wlock); + swap(q->seq_zones_bitmap, args.seq_zones_bitmap); + ret = 0; + } else { pr_warn("%s: failed to revalidate zones\n", disk->disk_name); - blk_mq_freeze_queue(q); blk_queue_free_zone_bitmaps(q); - blk_mq_unfreeze_queue(q); } + blk_mq_unfreeze_queue(q); + kfree(args.seq_zones_wlock); + kfree(args.seq_zones_bitmap); return ret; } EXPORT_SYMBOL_GPL(blk_revalidate_disk_zones); |