diff options
Diffstat (limited to 'drivers/block')
29 files changed, 434 insertions, 301 deletions
diff --git a/drivers/block/amiflop.c b/drivers/block/amiflop.c index 226219da3da6..71c2b1564558 100644 --- a/drivers/block/amiflop.c +++ b/drivers/block/amiflop.c @@ -1670,7 +1670,7 @@ static int floppy_open(struct block_device *bdev, fmode_t mode) } if (mode & (FMODE_READ|FMODE_WRITE)) { - check_disk_change(bdev); + bdev_check_media_change(bdev); if (mode & FMODE_WRITE) { int wrprot; diff --git a/drivers/block/aoe/aoeblk.c b/drivers/block/aoe/aoeblk.c index 5ca7216e9e01..c34e71b0c4a9 100644 --- a/drivers/block/aoe/aoeblk.c +++ b/drivers/block/aoe/aoeblk.c @@ -347,7 +347,6 @@ aoeblk_gdalloc(void *vp) mempool_t *mp; struct request_queue *q; struct blk_mq_tag_set *set; - enum { KB = 1024, MB = KB * KB, READ_AHEAD = 2 * MB, }; ulong flags; int late = 0; int err; @@ -407,7 +406,7 @@ aoeblk_gdalloc(void *vp) WARN_ON(d->gd); WARN_ON(d->flags & DEVFL_UP); blk_queue_max_hw_sectors(q, BLK_DEF_MAX_SECTORS); - q->backing_dev_info->ra_pages = READ_AHEAD / PAGE_SIZE; + blk_queue_io_opt(q, SZ_2M); d->bufpool = mp; d->blkq = gd->queue = q; q->queuedata = d; diff --git a/drivers/block/aoe/aoecmd.c b/drivers/block/aoe/aoecmd.c index 6dba41395155..313f0b946fe2 100644 --- a/drivers/block/aoe/aoecmd.c +++ b/drivers/block/aoe/aoecmd.c @@ -900,9 +900,7 @@ aoecmd_sleepwork(struct work_struct *work) ssize = get_capacity(d->gd); bd = bdget_disk(d->gd, 0); if (bd) { - inode_lock(bd->bd_inode); - i_size_write(bd->bd_inode, (loff_t)ssize<<9); - inode_unlock(bd->bd_inode); + bd_set_nr_sectors(bd, ssize); bdput(bd); } spin_lock_irq(&d->lock); diff --git a/drivers/block/ataflop.c b/drivers/block/ataflop.c index a50e13af0305..3e881fdb06e0 100644 --- a/drivers/block/ataflop.c +++ b/drivers/block/ataflop.c @@ -1732,7 +1732,8 @@ static int fd_locked_ioctl(struct block_device *bdev, fmode_t mode, /* invalidate the buffer track to force a reread */ BufferDrive = -1; set_bit(drive, &fake_change); - check_disk_change(bdev); + if (bdev_check_media_change(bdev)) + floppy_revalidate(bdev->bd_disk); return 0; default: return -EINVAL; @@ -1909,7 +1910,8 @@ static int floppy_open(struct block_device *bdev, fmode_t mode) return 0; if (mode & (FMODE_READ|FMODE_WRITE)) { - check_disk_change(bdev); + if (bdev_check_media_change(bdev)) + floppy_revalidate(bdev->bd_disk); if (mode & FMODE_WRITE) { if (p->wpstat) { if (p->ref < 0) @@ -1953,7 +1955,6 @@ static const struct block_device_operations floppy_fops = { .release = floppy_release, .ioctl = fd_ioctl, .check_events = floppy_check_events, - .revalidate_disk= floppy_revalidate, }; static const struct blk_mq_ops ataflop_mq_ops = { diff --git a/drivers/block/brd.c b/drivers/block/brd.c index 2723a70eb855..cc49a921339f 100644 --- a/drivers/block/brd.c +++ b/drivers/block/brd.c @@ -403,7 +403,6 @@ static struct brd_device *brd_alloc(int i) disk->flags = GENHD_FL_EXT_DEVT; sprintf(disk->disk_name, "ram%d", i); set_capacity(disk, rd_size * 2); - brd->brd_queue->backing_dev_info->capabilities |= BDI_CAP_SYNCHRONOUS_IO; /* Tell the block layer that this is not a rotational device */ blk_queue_flag_set(QUEUE_FLAG_NONROT, brd->brd_queue); diff --git a/drivers/block/drbd/drbd_actlog.c b/drivers/block/drbd/drbd_actlog.c index b41897dceb2b..7227fc7ab8ed 100644 --- a/drivers/block/drbd/drbd_actlog.c +++ b/drivers/block/drbd/drbd_actlog.c @@ -865,7 +865,7 @@ int __drbd_change_sync(struct drbd_device *device, sector_t sector, int size, if (!get_ldev(device)) return 0; /* no disk, no metadata, no bitmap to manipulate bits in */ - nr_sectors = drbd_get_capacity(device->this_bdev); + nr_sectors = get_capacity(device->vdisk); esector = sector + (size >> 9) - 1; if (!expect(sector < nr_sectors)) diff --git a/drivers/block/drbd/drbd_int.h b/drivers/block/drbd/drbd_int.h index 740e93bad21f..8f879e5c2f67 100644 --- a/drivers/block/drbd/drbd_int.h +++ b/drivers/block/drbd/drbd_int.h @@ -841,7 +841,6 @@ struct drbd_device { sector_t p_size; /* partner's disk size */ struct request_queue *rq_queue; - struct block_device *this_bdev; struct gendisk *vdisk; unsigned long last_reattach_jif; diff --git a/drivers/block/drbd/drbd_main.c b/drivers/block/drbd/drbd_main.c index 04b6bde9419d..65b95aef8dbc 100644 --- a/drivers/block/drbd/drbd_main.c +++ b/drivers/block/drbd/drbd_main.c @@ -984,7 +984,10 @@ int drbd_send_sizes(struct drbd_peer_device *peer_device, int trigger_reply, enu p->d_size = cpu_to_be64(d_size); p->u_size = cpu_to_be64(u_size); - p->c_size = cpu_to_be64(trigger_reply ? 0 : drbd_get_capacity(device->this_bdev)); + if (trigger_reply) + p->c_size = 0; + else + p->c_size = cpu_to_be64(get_capacity(device->vdisk)); p->max_bio_size = cpu_to_be32(max_bio_size); p->queue_order_type = cpu_to_be16(q_order_type); p->dds_flags = cpu_to_be16(flags); @@ -1553,7 +1556,7 @@ static int _drbd_send_page(struct drbd_peer_device *peer_device, struct page *pa * put_page(); and would cause either a VM_BUG directly, or * __page_cache_release a page that would actually still be referenced * by someone, leading to some obscure delayed Oops somewhere else. */ - if (drbd_disable_sendpage || (page_count(page) < 1) || PageSlab(page)) + if (drbd_disable_sendpage || !sendpage_ok(page)) return _drbd_no_send_page(peer_device, page, offset, size, msg_flags); msg_flags |= MSG_NOSIGNAL; @@ -2029,17 +2032,13 @@ void drbd_init_set_defaults(struct drbd_device *device) device->local_max_bio_size = DRBD_MAX_BIO_SIZE_SAFE; } -static void _drbd_set_my_capacity(struct drbd_device *device, sector_t size) -{ - /* set_capacity(device->this_bdev->bd_disk, size); */ - set_capacity(device->vdisk, size); - device->this_bdev->bd_inode->i_size = (loff_t)size << 9; -} - void drbd_set_my_capacity(struct drbd_device *device, sector_t size) { char ppb[10]; - _drbd_set_my_capacity(device, size); + + set_capacity(device->vdisk, size); + revalidate_disk_size(device->vdisk, false); + drbd_info(device, "size = %s (%llu KB)\n", ppsize(ppb, size>>1), (unsigned long long)size>>1); } @@ -2069,7 +2068,8 @@ void drbd_device_cleanup(struct drbd_device *device) } D_ASSERT(device, first_peer_device(device)->connection->net_conf == NULL); - _drbd_set_my_capacity(device, 0); + set_capacity(device->vdisk, 0); + revalidate_disk_size(device->vdisk, false); if (device->bitmap) { /* maybe never allocated. */ drbd_bm_resize(device, 0, 1); @@ -2236,9 +2236,6 @@ void drbd_destroy_device(struct kref *kref) /* cleanup stuff that may have been allocated during * device (re-)configuration or state changes */ - if (device->this_bdev) - bdput(device->this_bdev); - drbd_backing_dev_free(device, device->ldev); device->ldev = NULL; @@ -2765,10 +2762,6 @@ enum drbd_ret_code drbd_create_device(struct drbd_config_context *adm_ctx, unsig sprintf(disk->disk_name, "drbd%d", minor); disk->private_data = device; - device->this_bdev = bdget(MKDEV(DRBD_MAJOR, minor)); - /* we have no partitions. we contain only ourselves. */ - device->this_bdev->bd_contains = device->this_bdev; - blk_queue_write_cache(q, true, true); /* Setting the max_hw_sectors to an odd value of 8kibyte here This triggers a max_bio_size message upon first attach or connect */ @@ -3044,7 +3037,7 @@ void drbd_md_write(struct drbd_device *device, void *b) memset(buffer, 0, sizeof(*buffer)); - buffer->la_size_sect = cpu_to_be64(drbd_get_capacity(device->this_bdev)); + buffer->la_size_sect = cpu_to_be64(get_capacity(device->vdisk)); for (i = UI_CURRENT; i < UI_SIZE; i++) buffer->uuid[i] = cpu_to_be64(device->ldev->md.uuid[i]); buffer->flags = cpu_to_be32(device->ldev->md.flags); @@ -3102,7 +3095,7 @@ void drbd_md_sync(struct drbd_device *device) /* Update device->ldev->md.la_size_sect, * since we updated it on metadata. */ - device->ldev->md.la_size_sect = drbd_get_capacity(device->this_bdev); + device->ldev->md.la_size_sect = get_capacity(device->vdisk); drbd_md_put_buffer(device); out: diff --git a/drivers/block/drbd/drbd_nl.c b/drivers/block/drbd/drbd_nl.c index 43c8ae4d9fca..bf7de4c7b96c 100644 --- a/drivers/block/drbd/drbd_nl.c +++ b/drivers/block/drbd/drbd_nl.c @@ -996,7 +996,7 @@ drbd_determine_dev_size(struct drbd_device *device, enum dds_flags flags, struct goto err_out; } - if (drbd_get_capacity(device->this_bdev) != size || + if (get_capacity(device->vdisk) != size || drbd_bm_capacity(device) != size) { int err; err = drbd_bm_resize(device, size, !(flags & DDSF_NO_RESYNC)); @@ -1362,15 +1362,7 @@ static void drbd_setup_queue_param(struct drbd_device *device, struct drbd_backi if (b) { blk_stack_limits(&q->limits, &b->limits, 0); - - if (q->backing_dev_info->ra_pages != - b->backing_dev_info->ra_pages) { - drbd_info(device, "Adjusting my ra_pages to backing device's (%lu -> %lu)\n", - q->backing_dev_info->ra_pages, - b->backing_dev_info->ra_pages); - q->backing_dev_info->ra_pages = - b->backing_dev_info->ra_pages; - } + blk_queue_update_readahead(q); } fixup_discard_if_not_supported(q); fixup_write_zeroes(device, q); @@ -1941,8 +1933,7 @@ int drbd_adm_attach(struct sk_buff *skb, struct genl_info *info) /* Make sure the new disk is big enough * (we may currently be R_PRIMARY with no local disk...) */ - if (drbd_get_max_capacity(nbc) < - drbd_get_capacity(device->this_bdev)) { + if (drbd_get_max_capacity(nbc) < get_capacity(device->vdisk)) { retcode = ERR_DISK_TOO_SMALL; goto fail; } @@ -3370,7 +3361,6 @@ static void device_to_statistics(struct device_statistics *s, if (get_ldev(device)) { struct drbd_md *md = &device->ldev->md; u64 *history_uuids = (u64 *)s->history_uuids; - struct request_queue *q; int n; spin_lock_irq(&md->uuid_lock); @@ -3384,14 +3374,9 @@ static void device_to_statistics(struct device_statistics *s, spin_unlock_irq(&md->uuid_lock); s->dev_disk_flags = md->flags; - q = bdev_get_queue(device->ldev->backing_bdev); - s->dev_lower_blocked = - bdi_congested(q->backing_dev_info, - (1 << WB_async_congested) | - (1 << WB_sync_congested)); put_ldev(device); } - s->dev_size = drbd_get_capacity(device->this_bdev); + s->dev_size = get_capacity(device->vdisk); s->dev_read = device->read_cnt; s->dev_write = device->writ_cnt; s->dev_al_writes = device->al_writ_cnt; @@ -3831,8 +3816,7 @@ static int nla_put_status_info(struct sk_buff *skb, struct drbd_device *device, if (nla_put_u32(skb, T_sib_reason, sib ? sib->sib_reason : SIB_GET_STATUS_REPLY) || nla_put_u32(skb, T_current_state, device->state.i) || nla_put_u64_0pad(skb, T_ed_uuid, device->ed_uuid) || - nla_put_u64_0pad(skb, T_capacity, - drbd_get_capacity(device->this_bdev)) || + nla_put_u64_0pad(skb, T_capacity, get_capacity(device->vdisk)) || nla_put_u64_0pad(skb, T_send_cnt, device->send_cnt) || nla_put_u64_0pad(skb, T_recv_cnt, device->recv_cnt) || nla_put_u64_0pad(skb, T_read_cnt, device->read_cnt) || diff --git a/drivers/block/drbd/drbd_receiver.c b/drivers/block/drbd/drbd_receiver.c index 422363daa618..dc333dbe5232 100644 --- a/drivers/block/drbd/drbd_receiver.c +++ b/drivers/block/drbd/drbd_receiver.c @@ -1860,7 +1860,7 @@ read_in_block(struct drbd_peer_device *peer_device, u64 id, sector_t sector, struct packet_info *pi) __must_hold(local) { struct drbd_device *device = peer_device->device; - const sector_t capacity = drbd_get_capacity(device->this_bdev); + const sector_t capacity = get_capacity(device->vdisk); struct drbd_peer_request *peer_req; struct page *page; int digest_size, err; @@ -2789,7 +2789,7 @@ bool drbd_rs_should_slow_down(struct drbd_device *device, sector_t sector, bool drbd_rs_c_min_rate_throttle(struct drbd_device *device) { - struct gendisk *disk = device->ldev->backing_bdev->bd_contains->bd_disk; + struct gendisk *disk = device->ldev->backing_bdev->bd_disk; unsigned long db, dt, dbdt; unsigned int c_min_rate; int curr_events; @@ -2849,7 +2849,7 @@ static int receive_DataRequest(struct drbd_connection *connection, struct packet if (!peer_device) return -EIO; device = peer_device->device; - capacity = drbd_get_capacity(device->this_bdev); + capacity = get_capacity(device->vdisk); sector = be64_to_cpu(p->sector); size = be32_to_cpu(p->blksize); @@ -4117,7 +4117,7 @@ static int receive_sizes(struct drbd_connection *connection, struct packet_info if (!peer_device) return config_unknown_volume(connection, pi); device = peer_device->device; - cur_size = drbd_get_capacity(device->this_bdev); + cur_size = get_capacity(device->vdisk); p_size = be64_to_cpu(p->d_size); p_usize = be64_to_cpu(p->u_size); @@ -4252,8 +4252,8 @@ static int receive_sizes(struct drbd_connection *connection, struct packet_info } if (device->state.conn > C_WF_REPORT_PARAMS) { - if (be64_to_cpu(p->c_size) != - drbd_get_capacity(device->this_bdev) || ldsc) { + if (be64_to_cpu(p->c_size) != get_capacity(device->vdisk) || + ldsc) { /* we have different sizes, probably peer * needs to know my new size... */ drbd_send_sizes(peer_device, 0, ddsf); diff --git a/drivers/block/drbd/drbd_req.c b/drivers/block/drbd/drbd_req.c index 5c975af9c15f..330f851cb8f0 100644 --- a/drivers/block/drbd/drbd_req.c +++ b/drivers/block/drbd/drbd_req.c @@ -888,7 +888,7 @@ static bool drbd_may_do_local_read(struct drbd_device *device, sector_t sector, if (device->state.disk != D_INCONSISTENT) return false; esector = sector + (size >> 9) - 1; - nr_sectors = drbd_get_capacity(device->this_bdev); + nr_sectors = get_capacity(device->vdisk); D_ASSERT(device, sector < nr_sectors); D_ASSERT(device, esector < nr_sectors); diff --git a/drivers/block/drbd/drbd_worker.c b/drivers/block/drbd/drbd_worker.c index 7c903de5c4e1..ba56f3f05312 100644 --- a/drivers/block/drbd/drbd_worker.c +++ b/drivers/block/drbd/drbd_worker.c @@ -591,7 +591,7 @@ static int make_resync_request(struct drbd_device *const device, int cancel) struct drbd_connection *const connection = peer_device ? peer_device->connection : NULL; unsigned long bit; sector_t sector; - const sector_t capacity = drbd_get_capacity(device->this_bdev); + const sector_t capacity = get_capacity(device->vdisk); int max_bio_size; int number, rollback_i, size; int align, requeue = 0; @@ -769,7 +769,7 @@ static int make_ov_request(struct drbd_device *device, int cancel) { int number, i, size; sector_t sector; - const sector_t capacity = drbd_get_capacity(device->this_bdev); + const sector_t capacity = get_capacity(device->vdisk); bool stop_sector_reached = false; if (unlikely(cancel)) @@ -1672,7 +1672,7 @@ void drbd_resync_after_changed(struct drbd_device *device) void drbd_rs_controller_reset(struct drbd_device *device) { - struct gendisk *disk = device->ldev->backing_bdev->bd_contains->bd_disk; + struct gendisk *disk = device->ldev->backing_bdev->bd_disk; struct fifo_buffer *plan; atomic_set(&device->rs_sect_in, 0); diff --git a/drivers/block/floppy.c b/drivers/block/floppy.c index a563b023458a..7df79ae6b0a1 100644 --- a/drivers/block/floppy.c +++ b/drivers/block/floppy.c @@ -561,6 +561,7 @@ static void floppy_release_irq_and_dma(void); * output_byte is automatically disabled when reset is set. */ static void reset_fdc(void); +static int floppy_revalidate(struct gendisk *disk); /* * These are global variables, as that's the easiest way to give @@ -3275,7 +3276,8 @@ static int invalidate_drive(struct block_device *bdev) /* invalidate the buffer track to force a reread */ set_bit((long)bdev->bd_disk->private_data, &fake_change); process_fd_request(); - check_disk_change(bdev); + if (bdev_check_media_change(bdev)) + floppy_revalidate(bdev->bd_disk); return 0; } @@ -4123,7 +4125,8 @@ static int floppy_open(struct block_device *bdev, fmode_t mode) drive_state[drive].last_checked = 0; clear_bit(FD_OPEN_SHOULD_FAIL_BIT, &drive_state[drive].flags); - check_disk_change(bdev); + if (bdev_check_media_change(bdev)) + floppy_revalidate(bdev->bd_disk); if (test_bit(FD_DISK_CHANGED_BIT, &drive_state[drive].flags)) goto out; if (test_bit(FD_OPEN_SHOULD_FAIL_BIT, &drive_state[drive].flags)) @@ -4291,7 +4294,6 @@ static const struct block_device_operations floppy_fops = { .ioctl = fd_ioctl, .getgeo = fd_getgeo, .check_events = floppy_check_events, - .revalidate_disk = floppy_revalidate, #ifdef CONFIG_COMPAT .compat_ioctl = fd_compat_ioctl, #endif diff --git a/drivers/block/loop.c b/drivers/block/loop.c index d3394191e168..cb1191d6e945 100644 --- a/drivers/block/loop.c +++ b/drivers/block/loop.c @@ -253,7 +253,7 @@ static void loop_set_size(struct loop_device *lo, loff_t size) { struct block_device *bdev = lo->lo_device; - bd_set_size(bdev, size << SECTOR_SHIFT); + bd_set_nr_sectors(bdev, size); set_capacity_revalidate_and_notify(lo->lo_disk, size, false); } @@ -1251,7 +1251,7 @@ static int __loop_clr_fd(struct loop_device *lo, bool release) set_capacity(lo->lo_disk, 0); loop_sysfs_exit(lo); if (bdev) { - bd_set_size(bdev, 0); + bd_set_nr_sectors(bdev, 0); /* let user-space know about this change */ kobject_uevent(&disk_to_dev(bdev->bd_disk)->kobj, KOBJ_CHANGE); } diff --git a/drivers/block/nbd.c b/drivers/block/nbd.c index edf8b632e3d2..0bed21c0c81b 100644 --- a/drivers/block/nbd.c +++ b/drivers/block/nbd.c @@ -300,6 +300,7 @@ static void nbd_size_update(struct nbd_device *nbd) { struct nbd_config *config = nbd->config; struct block_device *bdev = bdget_disk(nbd->disk, 0); + sector_t nr_sectors = config->bytesize >> 9; if (config->flags & NBD_FLAG_SEND_TRIM) { nbd->disk->queue->limits.discard_granularity = config->blksize; @@ -308,13 +309,13 @@ static void nbd_size_update(struct nbd_device *nbd) } blk_queue_logical_block_size(nbd->disk->queue, config->blksize); blk_queue_physical_block_size(nbd->disk->queue, config->blksize); - set_capacity(nbd->disk, config->bytesize >> 9); + set_capacity(nbd->disk, nr_sectors); if (bdev) { if (bdev->bd_disk) { - bd_set_size(bdev, config->bytesize); + bd_set_nr_sectors(bdev, nr_sectors); set_blocksize(bdev, config->blksize); } else - bdev->bd_invalidated = 1; + set_bit(GD_NEED_PART_SCAN, &nbd->disk->state); bdput(bdev); } kobject_uevent(&nbd_to_dev(nbd)->kobj, KOBJ_CHANGE); @@ -801,9 +802,9 @@ static void recv_work(struct work_struct *work) if (likely(!blk_should_fake_timeout(rq->q))) blk_mq_complete_request(rq); } + nbd_config_put(nbd); atomic_dec(&config->recv_threads); wake_up(&config->recv_wq); - nbd_config_put(nbd); kfree(args); } @@ -1138,7 +1139,7 @@ static void nbd_bdev_reset(struct block_device *bdev) { if (bdev->bd_openers > 1) return; - bd_set_size(bdev, 0); + bd_set_nr_sectors(bdev, 0); } static void nbd_parse_flags(struct nbd_device *nbd) @@ -1321,7 +1322,7 @@ static int nbd_start_device_ioctl(struct nbd_device *nbd, struct block_device *b return ret; if (max_part) - bdev->bd_invalidated = 1; + set_bit(GD_NEED_PART_SCAN, &nbd->disk->state); mutex_unlock(&nbd->config_lock); ret = wait_event_interruptible(config->recv_wq, atomic_read(&config->recv_threads) == 0); @@ -1499,9 +1500,9 @@ static int nbd_open(struct block_device *bdev, fmode_t mode) refcount_set(&nbd->config_refs, 1); refcount_inc(&nbd->refs); mutex_unlock(&nbd->config_lock); - bdev->bd_invalidated = 1; + set_bit(GD_NEED_PART_SCAN, &bdev->bd_disk->state); } else if (nbd_disconnected(nbd->config)) { - bdev->bd_invalidated = 1; + set_bit(GD_NEED_PART_SCAN, &bdev->bd_disk->state); } out: mutex_unlock(&nbd_index_mutex); @@ -2183,7 +2184,7 @@ out: return ret; } -static const struct genl_ops nbd_connect_genl_ops[] = { +static const struct genl_small_ops nbd_connect_genl_ops[] = { { .cmd = NBD_CMD_CONNECT, .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP, @@ -2215,8 +2216,8 @@ static struct genl_family nbd_genl_family __ro_after_init = { .name = NBD_GENL_FAMILY_NAME, .version = NBD_GENL_VERSION, .module = THIS_MODULE, - .ops = nbd_connect_genl_ops, - .n_ops = ARRAY_SIZE(nbd_connect_genl_ops), + .small_ops = nbd_connect_genl_ops, + .n_small_ops = ARRAY_SIZE(nbd_connect_genl_ops), .maxattr = NBD_ATTR_MAX, .policy = nbd_attr_policy, .mcgrps = nbd_mcast_grps, diff --git a/drivers/block/null_blk.h b/drivers/block/null_blk.h index daed4a9c3436..d2e7db43a52a 100644 --- a/drivers/block/null_blk.h +++ b/drivers/block/null_blk.h @@ -42,6 +42,9 @@ struct nullb_device { struct badblocks badblocks; unsigned int nr_zones; + unsigned int nr_zones_imp_open; + unsigned int nr_zones_exp_open; + unsigned int nr_zones_closed; struct blk_zone *zones; sector_t zone_size_sects; @@ -51,6 +54,8 @@ struct nullb_device { unsigned long zone_size; /* zone size in MB if device is zoned */ unsigned long zone_capacity; /* zone capacity in MB if device is zoned */ unsigned int zone_nr_conv; /* number of conventional zones */ + unsigned int zone_max_open; /* max number of open zones */ + unsigned int zone_max_active; /* max number of active zones */ unsigned int submit_queues; /* number of submission queues */ unsigned int home_node; /* home node for the device */ unsigned int queue_mode; /* block interface */ diff --git a/drivers/block/null_blk_main.c b/drivers/block/null_blk_main.c index d74443a9c8fa..4685ea401d5b 100644 --- a/drivers/block/null_blk_main.c +++ b/drivers/block/null_blk_main.c @@ -164,6 +164,10 @@ static bool shared_tags; module_param(shared_tags, bool, 0444); MODULE_PARM_DESC(shared_tags, "Share tag set between devices for blk-mq"); +static bool g_shared_tag_bitmap; +module_param_named(shared_tag_bitmap, g_shared_tag_bitmap, bool, 0444); +MODULE_PARM_DESC(shared_tag_bitmap, "Use shared tag bitmap for all submission queues for blk-mq"); + static int g_irqmode = NULL_IRQ_SOFTIRQ; static int null_set_irqmode(const char *str, const struct kernel_param *kp) @@ -208,6 +212,14 @@ static unsigned int g_zone_nr_conv; module_param_named(zone_nr_conv, g_zone_nr_conv, uint, 0444); MODULE_PARM_DESC(zone_nr_conv, "Number of conventional zones when block device is zoned. Default: 0"); +static unsigned int g_zone_max_open; +module_param_named(zone_max_open, g_zone_max_open, uint, 0444); +MODULE_PARM_DESC(zone_max_open, "Maximum number of open zones when block device is zoned. Default: 0 (no limit)"); + +static unsigned int g_zone_max_active; +module_param_named(zone_max_active, g_zone_max_active, uint, 0444); +MODULE_PARM_DESC(zone_max_active, "Maximum number of active zones when block device is zoned. Default: 0 (no limit)"); + static struct nullb_device *null_alloc_dev(void); static void null_free_dev(struct nullb_device *dev); static void null_del_dev(struct nullb *nullb); @@ -347,6 +359,8 @@ NULLB_DEVICE_ATTR(zoned, bool, NULL); NULLB_DEVICE_ATTR(zone_size, ulong, NULL); NULLB_DEVICE_ATTR(zone_capacity, ulong, NULL); NULLB_DEVICE_ATTR(zone_nr_conv, uint, NULL); +NULLB_DEVICE_ATTR(zone_max_open, uint, NULL); +NULLB_DEVICE_ATTR(zone_max_active, uint, NULL); static ssize_t nullb_device_power_show(struct config_item *item, char *page) { @@ -464,6 +478,8 @@ static struct configfs_attribute *nullb_device_attrs[] = { &nullb_device_attr_zone_size, &nullb_device_attr_zone_capacity, &nullb_device_attr_zone_nr_conv, + &nullb_device_attr_zone_max_open, + &nullb_device_attr_zone_max_active, NULL, }; @@ -517,7 +533,7 @@ nullb_group_drop_item(struct config_group *group, struct config_item *item) static ssize_t memb_group_features_show(struct config_item *item, char *page) { return snprintf(page, PAGE_SIZE, - "memory_backed,discard,bandwidth,cache,badblocks,zoned,zone_size,zone_capacity,zone_nr_conv\n"); + "memory_backed,discard,bandwidth,cache,badblocks,zoned,zone_size,zone_capacity,zone_nr_conv,zone_max_open,zone_max_active\n"); } CONFIGFS_ATTR_RO(memb_group_, features); @@ -580,6 +596,8 @@ static struct nullb_device *null_alloc_dev(void) dev->zone_size = g_zone_size; dev->zone_capacity = g_zone_capacity; dev->zone_nr_conv = g_zone_nr_conv; + dev->zone_max_open = g_zone_max_open; + dev->zone_max_active = g_zone_max_active; return dev; } @@ -1692,6 +1710,8 @@ static int null_init_tag_set(struct nullb *nullb, struct blk_mq_tag_set *set) set->flags = BLK_MQ_F_SHOULD_MERGE; if (g_no_sched) set->flags |= BLK_MQ_F_NO_SCHED; + if (g_shared_tag_bitmap) + set->flags |= BLK_MQ_F_TAG_HCTX_SHARED; set->driver_data = NULL; if ((nullb && nullb->dev->blocking) || g_blocking) diff --git a/drivers/block/null_blk_zoned.c b/drivers/block/null_blk_zoned.c index 3d25c9ad2383..7d94f2d47a6a 100644 --- a/drivers/block/null_blk_zoned.c +++ b/drivers/block/null_blk_zoned.c @@ -51,6 +51,22 @@ int null_init_zoned_dev(struct nullb_device *dev, struct request_queue *q) dev->zone_nr_conv); } + /* Max active zones has to be < nbr of seq zones in order to be enforceable */ + if (dev->zone_max_active >= dev->nr_zones - dev->zone_nr_conv) { + dev->zone_max_active = 0; + pr_info("zone_max_active limit disabled, limit >= zone count\n"); + } + + /* Max open zones has to be <= max active zones */ + if (dev->zone_max_active && dev->zone_max_open > dev->zone_max_active) { + dev->zone_max_open = dev->zone_max_active; + pr_info("changed the maximum number of open zones to %u\n", + dev->nr_zones); + } else if (dev->zone_max_open >= dev->nr_zones - dev->zone_nr_conv) { + dev->zone_max_open = 0; + pr_info("zone_max_open limit disabled, limit >= zone count\n"); + } + for (i = 0; i < dev->zone_nr_conv; i++) { struct blk_zone *zone = &dev->zones[i]; @@ -99,6 +115,8 @@ int null_register_zoned_dev(struct nullb *nullb) } blk_queue_max_zone_append_sectors(q, dev->zone_size_sects); + blk_queue_max_open_zones(q, dev->zone_max_open); + blk_queue_max_active_zones(q, dev->zone_max_active); return 0; } @@ -159,6 +177,111 @@ size_t null_zone_valid_read_len(struct nullb *nullb, return (zone->wp - sector) << SECTOR_SHIFT; } +static blk_status_t null_close_zone(struct nullb_device *dev, struct blk_zone *zone) +{ + if (zone->type == BLK_ZONE_TYPE_CONVENTIONAL) + return BLK_STS_IOERR; + + switch (zone->cond) { + case BLK_ZONE_COND_CLOSED: + /* close operation on closed is not an error */ + return BLK_STS_OK; + case BLK_ZONE_COND_IMP_OPEN: + dev->nr_zones_imp_open--; + break; + case BLK_ZONE_COND_EXP_OPEN: + dev->nr_zones_exp_open--; + break; + case BLK_ZONE_COND_EMPTY: + case BLK_ZONE_COND_FULL: + default: + return BLK_STS_IOERR; + } + + if (zone->wp == zone->start) { + zone->cond = BLK_ZONE_COND_EMPTY; + } else { + zone->cond = BLK_ZONE_COND_CLOSED; + dev->nr_zones_closed++; + } + + return BLK_STS_OK; +} + +static void null_close_first_imp_zone(struct nullb_device *dev) +{ + unsigned int i; + + for (i = dev->zone_nr_conv; i < dev->nr_zones; i++) { + if (dev->zones[i].cond == BLK_ZONE_COND_IMP_OPEN) { + null_close_zone(dev, &dev->zones[i]); + return; + } + } +} + +static blk_status_t null_check_active(struct nullb_device *dev) +{ + if (!dev->zone_max_active) + return BLK_STS_OK; + + if (dev->nr_zones_exp_open + dev->nr_zones_imp_open + + dev->nr_zones_closed < dev->zone_max_active) + return BLK_STS_OK; + + return BLK_STS_ZONE_ACTIVE_RESOURCE; +} + +static blk_status_t null_check_open(struct nullb_device *dev) +{ + if (!dev->zone_max_open) + return BLK_STS_OK; + + if (dev->nr_zones_exp_open + dev->nr_zones_imp_open < dev->zone_max_open) + return BLK_STS_OK; + + if (dev->nr_zones_imp_open) { + if (null_check_active(dev) == BLK_STS_OK) { + null_close_first_imp_zone(dev); + return BLK_STS_OK; + } + } + + return BLK_STS_ZONE_OPEN_RESOURCE; +} + +/* + * This function matches the manage open zone resources function in the ZBC standard, + * with the addition of max active zones support (added in the ZNS standard). + * + * The function determines if a zone can transition to implicit open or explicit open, + * while maintaining the max open zone (and max active zone) limit(s). It may close an + * implicit open zone in order to make additional zone resources available. + * + * ZBC states that an implicit open zone shall be closed only if there is not + * room within the open limit. However, with the addition of an active limit, + * it is not certain that closing an implicit open zone will allow a new zone + * to be opened, since we might already be at the active limit capacity. + */ +static blk_status_t null_check_zone_resources(struct nullb_device *dev, struct blk_zone *zone) +{ + blk_status_t ret; + + switch (zone->cond) { + case BLK_ZONE_COND_EMPTY: + ret = null_check_active(dev); + if (ret != BLK_STS_OK) + return ret; + fallthrough; + case BLK_ZONE_COND_CLOSED: + return null_check_open(dev); + default: + /* Should never be called for other states */ + WARN_ON(1); + return BLK_STS_IOERR; + } +} + static blk_status_t null_zone_write(struct nullb_cmd *cmd, sector_t sector, unsigned int nr_sectors, bool append) { @@ -177,43 +300,164 @@ static blk_status_t null_zone_write(struct nullb_cmd *cmd, sector_t sector, /* Cannot write to a full zone */ return BLK_STS_IOERR; case BLK_ZONE_COND_EMPTY: + case BLK_ZONE_COND_CLOSED: + ret = null_check_zone_resources(dev, zone); + if (ret != BLK_STS_OK) + return ret; + break; case BLK_ZONE_COND_IMP_OPEN: case BLK_ZONE_COND_EXP_OPEN: + break; + default: + /* Invalid zone condition */ + return BLK_STS_IOERR; + } + + /* + * Regular writes must be at the write pointer position. + * Zone append writes are automatically issued at the write + * pointer and the position returned using the request or BIO + * sector. + */ + if (append) { + sector = zone->wp; + if (cmd->bio) + cmd->bio->bi_iter.bi_sector = sector; + else + cmd->rq->__sector = sector; + } else if (sector != zone->wp) { + return BLK_STS_IOERR; + } + + if (zone->wp + nr_sectors > zone->start + zone->capacity) + return BLK_STS_IOERR; + + if (zone->cond == BLK_ZONE_COND_CLOSED) { + dev->nr_zones_closed--; + dev->nr_zones_imp_open++; + } else if (zone->cond == BLK_ZONE_COND_EMPTY) { + dev->nr_zones_imp_open++; + } + if (zone->cond != BLK_ZONE_COND_EXP_OPEN) + zone->cond = BLK_ZONE_COND_IMP_OPEN; + + ret = null_process_cmd(cmd, REQ_OP_WRITE, sector, nr_sectors); + if (ret != BLK_STS_OK) + return ret; + + zone->wp += nr_sectors; + if (zone->wp == zone->start + zone->capacity) { + if (zone->cond == BLK_ZONE_COND_EXP_OPEN) + dev->nr_zones_exp_open--; + else if (zone->cond == BLK_ZONE_COND_IMP_OPEN) + dev->nr_zones_imp_open--; + zone->cond = BLK_ZONE_COND_FULL; + } + return BLK_STS_OK; +} + +static blk_status_t null_open_zone(struct nullb_device *dev, struct blk_zone *zone) +{ + blk_status_t ret; + + if (zone->type == BLK_ZONE_TYPE_CONVENTIONAL) + return BLK_STS_IOERR; + + switch (zone->cond) { + case BLK_ZONE_COND_EXP_OPEN: + /* open operation on exp open is not an error */ + return BLK_STS_OK; + case BLK_ZONE_COND_EMPTY: + ret = null_check_zone_resources(dev, zone); + if (ret != BLK_STS_OK) + return ret; + break; + case BLK_ZONE_COND_IMP_OPEN: + dev->nr_zones_imp_open--; + break; case BLK_ZONE_COND_CLOSED: - /* - * Regular writes must be at the write pointer position. - * Zone append writes are automatically issued at the write - * pointer and the position returned using the request or BIO - * sector. - */ - if (append) { - sector = zone->wp; - if (cmd->bio) - cmd->bio->bi_iter.bi_sector = sector; - else - cmd->rq->__sector = sector; - } else if (sector != zone->wp) { - return BLK_STS_IOERR; - } + ret = null_check_zone_resources(dev, zone); + if (ret != BLK_STS_OK) + return ret; + dev->nr_zones_closed--; + break; + case BLK_ZONE_COND_FULL: + default: + return BLK_STS_IOERR; + } - if (zone->wp + nr_sectors > zone->start + zone->capacity) - return BLK_STS_IOERR; + zone->cond = BLK_ZONE_COND_EXP_OPEN; + dev->nr_zones_exp_open++; - if (zone->cond != BLK_ZONE_COND_EXP_OPEN) - zone->cond = BLK_ZONE_COND_IMP_OPEN; + return BLK_STS_OK; +} + +static blk_status_t null_finish_zone(struct nullb_device *dev, struct blk_zone *zone) +{ + blk_status_t ret; - ret = null_process_cmd(cmd, REQ_OP_WRITE, sector, nr_sectors); + if (zone->type == BLK_ZONE_TYPE_CONVENTIONAL) + return BLK_STS_IOERR; + + switch (zone->cond) { + case BLK_ZONE_COND_FULL: + /* finish operation on full is not an error */ + return BLK_STS_OK; + case BLK_ZONE_COND_EMPTY: + ret = null_check_zone_resources(dev, zone); if (ret != BLK_STS_OK) return ret; + break; + case BLK_ZONE_COND_IMP_OPEN: + dev->nr_zones_imp_open--; + break; + case BLK_ZONE_COND_EXP_OPEN: + dev->nr_zones_exp_open--; + break; + case BLK_ZONE_COND_CLOSED: + ret = null_check_zone_resources(dev, zone); + if (ret != BLK_STS_OK) + return ret; + dev->nr_zones_closed--; + break; + default: + return BLK_STS_IOERR; + } - zone->wp += nr_sectors; - if (zone->wp == zone->start + zone->capacity) - zone->cond = BLK_ZONE_COND_FULL; + zone->cond = BLK_ZONE_COND_FULL; + zone->wp = zone->start + zone->len; + + return BLK_STS_OK; +} + +static blk_status_t null_reset_zone(struct nullb_device *dev, struct blk_zone *zone) +{ + if (zone->type == BLK_ZONE_TYPE_CONVENTIONAL) + return BLK_STS_IOERR; + + switch (zone->cond) { + case BLK_ZONE_COND_EMPTY: + /* reset operation on empty is not an error */ return BLK_STS_OK; + case BLK_ZONE_COND_IMP_OPEN: + dev->nr_zones_imp_open--; + break; + case BLK_ZONE_COND_EXP_OPEN: + dev->nr_zones_exp_open--; + break; + case BLK_ZONE_COND_CLOSED: + dev->nr_zones_closed--; + break; + case BLK_ZONE_COND_FULL: + break; default: - /* Invalid zone condition */ return BLK_STS_IOERR; } + + zone->cond = BLK_ZONE_COND_EMPTY; + zone->wp = zone->start; + + return BLK_STS_OK; } static blk_status_t null_zone_mgmt(struct nullb_cmd *cmd, enum req_opf op, @@ -222,56 +466,34 @@ static blk_status_t null_zone_mgmt(struct nullb_cmd *cmd, enum req_opf op, struct nullb_device *dev = cmd->nq->dev; unsigned int zone_no = null_zone_no(dev, sector); struct blk_zone *zone = &dev->zones[zone_no]; + blk_status_t ret = BLK_STS_OK; size_t i; switch (op) { case REQ_OP_ZONE_RESET_ALL: - for (i = 0; i < dev->nr_zones; i++) { - if (zone[i].type == BLK_ZONE_TYPE_CONVENTIONAL) - continue; - zone[i].cond = BLK_ZONE_COND_EMPTY; - zone[i].wp = zone[i].start; - } + for (i = dev->zone_nr_conv; i < dev->nr_zones; i++) + null_reset_zone(dev, &dev->zones[i]); break; case REQ_OP_ZONE_RESET: - if (zone->type == BLK_ZONE_TYPE_CONVENTIONAL) - return BLK_STS_IOERR; - - zone->cond = BLK_ZONE_COND_EMPTY; - zone->wp = zone->start; + ret = null_reset_zone(dev, zone); break; case REQ_OP_ZONE_OPEN: - if (zone->type == BLK_ZONE_TYPE_CONVENTIONAL) - return BLK_STS_IOERR; - if (zone->cond == BLK_ZONE_COND_FULL) - return BLK_STS_IOERR; - - zone->cond = BLK_ZONE_COND_EXP_OPEN; + ret = null_open_zone(dev, zone); break; case REQ_OP_ZONE_CLOSE: - if (zone->type == BLK_ZONE_TYPE_CONVENTIONAL) - return BLK_STS_IOERR; - if (zone->cond == BLK_ZONE_COND_FULL) - return BLK_STS_IOERR; - - if (zone->wp == zone->start) - zone->cond = BLK_ZONE_COND_EMPTY; - else - zone->cond = BLK_ZONE_COND_CLOSED; + ret = null_close_zone(dev, zone); break; case REQ_OP_ZONE_FINISH: - if (zone->type == BLK_ZONE_TYPE_CONVENTIONAL) - return BLK_STS_IOERR; - - zone->cond = BLK_ZONE_COND_FULL; - zone->wp = zone->start + zone->len; + ret = null_finish_zone(dev, zone); break; default: return BLK_STS_NOTSUPP; } - trace_nullb_zone_op(cmd, zone_no, zone->cond); - return BLK_STS_OK; + if (ret == BLK_STS_OK) + trace_nullb_zone_op(cmd, zone_no, zone->cond); + + return ret; } blk_status_t null_process_zoned_cmd(struct nullb_cmd *cmd, enum req_opf op, diff --git a/drivers/block/paride/pcd.c b/drivers/block/paride/pcd.c index 5124eca90e83..70da8b86ce58 100644 --- a/drivers/block/paride/pcd.c +++ b/drivers/block/paride/pcd.c @@ -233,7 +233,7 @@ static int pcd_block_open(struct block_device *bdev, fmode_t mode) struct pcd_unit *cd = bdev->bd_disk->private_data; int ret; - check_disk_change(bdev); + bdev_check_media_change(bdev); mutex_lock(&pcd_mutex); ret = cdrom_open(&cd->info, bdev, mode); diff --git a/drivers/block/pktcdvd.c b/drivers/block/pktcdvd.c index 1034e445680c..467dbd06b7cd 100644 --- a/drivers/block/pktcdvd.c +++ b/drivers/block/pktcdvd.c @@ -1082,65 +1082,6 @@ static void pkt_put_packet_data(struct pktcdvd_device *pd, struct packet_data *p } } -/* - * recover a failed write, query for relocation if possible - * - * returns 1 if recovery is possible, or 0 if not - * - */ -static int pkt_start_recovery(struct packet_data *pkt) -{ - /* - * FIXME. We need help from the file system to implement - * recovery handling. - */ - return 0; -#if 0 - struct request *rq = pkt->rq; - struct pktcdvd_device *pd = rq->rq_disk->private_data; - struct block_device *pkt_bdev; - struct super_block *sb = NULL; - unsigned long old_block, new_block; - sector_t new_sector; - - pkt_bdev = bdget(kdev_t_to_nr(pd->pkt_dev)); - if (pkt_bdev) { - sb = get_super(pkt_bdev); - bdput(pkt_bdev); - } - - if (!sb) - return 0; - - if (!sb->s_op->relocate_blocks) - goto out; - - old_block = pkt->sector / (CD_FRAMESIZE >> 9); - if (sb->s_op->relocate_blocks(sb, old_block, &new_block)) - goto out; - - new_sector = new_block * (CD_FRAMESIZE >> 9); - pkt->sector = new_sector; - - bio_reset(pkt->bio); - bio_set_dev(pkt->bio, pd->bdev); - bio_set_op_attrs(pkt->bio, REQ_OP_WRITE, 0); - pkt->bio->bi_iter.bi_sector = new_sector; - pkt->bio->bi_iter.bi_size = pkt->frames * CD_FRAMESIZE; - pkt->bio->bi_vcnt = pkt->frames; - - pkt->bio->bi_end_io = pkt_end_io_packet_write; - pkt->bio->bi_private = pkt; - - drop_super(sb); - return 1; - -out: - drop_super(sb); - return 0; -#endif -} - static inline void pkt_set_state(struct packet_data *pkt, enum packet_data_state state) { #if PACKET_DEBUG > 1 @@ -1357,12 +1298,8 @@ static void pkt_run_state_machine(struct pktcdvd_device *pd, struct packet_data break; case PACKET_RECOVERY_STATE: - if (pkt_start_recovery(pkt)) { - pkt_start_write(pd, pkt); - } else { - pkt_dbg(2, pd, "No recovery possible\n"); - pkt_set_state(pkt, PACKET_FINISHED_STATE); - } + pkt_dbg(2, pd, "No recovery possible\n"); + pkt_set_state(pkt, PACKET_FINISHED_STATE); break; case PACKET_FINISHED_STATE: @@ -2173,16 +2110,18 @@ static int pkt_open_dev(struct pktcdvd_device *pd, fmode_t write) int ret; long lba; struct request_queue *q; + struct block_device *bdev; /* * We need to re-open the cdrom device without O_NONBLOCK to be able * to read/write from/to it. It is already opened in O_NONBLOCK mode - * so bdget() can't fail. + * so open should not fail. */ - bdget(pd->bdev->bd_dev); - ret = blkdev_get(pd->bdev, FMODE_READ | FMODE_EXCL, pd); - if (ret) + bdev = blkdev_get_by_dev(pd->bdev->bd_dev, FMODE_READ | FMODE_EXCL, pd); + if (IS_ERR(bdev)) { + ret = PTR_ERR(bdev); goto out; + } ret = pkt_get_last_written(pd, &lba); if (ret) { @@ -2192,7 +2131,7 @@ static int pkt_open_dev(struct pktcdvd_device *pd, fmode_t write) set_capacity(pd->disk, lba << 2); set_capacity(pd->bdev->bd_disk, lba << 2); - bd_set_size(pd->bdev, (loff_t)lba << 11); + bd_set_nr_sectors(pd->bdev, lba << 2); q = bdev_get_queue(pd->bdev); if (write) { @@ -2226,7 +2165,7 @@ static int pkt_open_dev(struct pktcdvd_device *pd, fmode_t write) return 0; out_putdev: - blkdev_put(pd->bdev, FMODE_READ | FMODE_EXCL); + blkdev_put(bdev, FMODE_READ | FMODE_EXCL); out: return ret; } @@ -2563,7 +2502,6 @@ static int pkt_seq_show(struct seq_file *m, void *p) static int pkt_new_dev(struct pktcdvd_device *pd, dev_t dev) { int i; - int ret = 0; char b[BDEVNAME_SIZE]; struct block_device *bdev; @@ -2586,12 +2524,9 @@ static int pkt_new_dev(struct pktcdvd_device *pd, dev_t dev) } } - bdev = bdget(dev); - if (!bdev) - return -ENOMEM; - ret = blkdev_get(bdev, FMODE_READ | FMODE_NDELAY, NULL); - if (ret) - return ret; + bdev = blkdev_get_by_dev(dev, FMODE_READ | FMODE_NDELAY, NULL); + if (IS_ERR(bdev)) + return PTR_ERR(bdev); if (!blk_queue_scsi_passthrough(bdev_get_queue(bdev))) { blkdev_put(bdev, FMODE_READ | FMODE_NDELAY); return -EINVAL; @@ -2609,7 +2544,6 @@ static int pkt_new_dev(struct pktcdvd_device *pd, dev_t dev) pd->cdrw.thread = kthread_run(kcdrwd, pd, "%s", pd->name); if (IS_ERR(pd->cdrw.thread)) { pkt_err(pd, "can't start kernel thread\n"); - ret = -ENOMEM; goto out_mem; } @@ -2621,7 +2555,7 @@ out_mem: blkdev_put(bdev, FMODE_READ | FMODE_NDELAY); /* This is safe: open() is still holding a reference. */ module_put(THIS_MODULE); - return ret; + return -ENOMEM; } static int pkt_ioctl(struct block_device *bdev, fmode_t mode, unsigned int cmd, unsigned long arg) diff --git a/drivers/block/rbd.c b/drivers/block/rbd.c index e77eaab5cf23..f84128abade3 100644 --- a/drivers/block/rbd.c +++ b/drivers/block/rbd.c @@ -4010,10 +4010,10 @@ static int rbd_try_lock(struct rbd_device *rbd_dev) rbd_warn(rbd_dev, "breaking header lock owned by %s%llu", ENTITY_NAME(lockers[0].id.name)); - ret = ceph_monc_blacklist_add(&client->monc, + ret = ceph_monc_blocklist_add(&client->monc, &lockers[0].info.addr); if (ret) { - rbd_warn(rbd_dev, "blacklist of %s%llu failed: %d", + rbd_warn(rbd_dev, "blocklist of %s%llu failed: %d", ENTITY_NAME(lockers[0].id.name), ret); goto out; } @@ -4077,7 +4077,7 @@ static int rbd_try_acquire_lock(struct rbd_device *rbd_dev) ret = rbd_try_lock(rbd_dev); if (ret < 0) { rbd_warn(rbd_dev, "failed to lock header: %d", ret); - if (ret == -EBLACKLISTED) + if (ret == -EBLOCKLISTED) goto out; ret = 1; /* request lock anyway */ @@ -4613,7 +4613,7 @@ static void rbd_reregister_watch(struct work_struct *work) ret = __rbd_register_watch(rbd_dev); if (ret) { rbd_warn(rbd_dev, "failed to reregister watch: %d", ret); - if (ret != -EBLACKLISTED && ret != -ENOENT) { + if (ret != -EBLOCKLISTED && ret != -ENOENT) { queue_delayed_work(rbd_dev->task_wq, &rbd_dev->watch_dwork, RBD_RETRY_DELAY); @@ -4921,7 +4921,7 @@ static void rbd_dev_update_size(struct rbd_device *rbd_dev) size = (sector_t)rbd_dev->mapping.size / SECTOR_SIZE; dout("setting size to %llu sectors", (unsigned long long)size); set_capacity(rbd_dev->disk, size); - revalidate_disk(rbd_dev->disk); + revalidate_disk_size(rbd_dev->disk, true); } } @@ -5022,7 +5022,7 @@ static int rbd_init_disk(struct rbd_device *rbd_dev) } if (!ceph_test_opt(rbd_dev->rbd_client->client, NOCRC)) - q->backing_dev_info->capabilities |= BDI_CAP_STABLE_WRITES; + blk_queue_flag_set(QUEUE_FLAG_STABLE_WRITES, q); /* * disk_release() expects a queue ref from add_disk() and will diff --git a/drivers/block/rnbd/rnbd-clt.c b/drivers/block/rnbd/rnbd-clt.c index cc6a4e2587ae..8b2411ccbda9 100644 --- a/drivers/block/rnbd/rnbd-clt.c +++ b/drivers/block/rnbd/rnbd-clt.c @@ -91,29 +91,18 @@ static int rnbd_clt_set_dev_attr(struct rnbd_clt_dev *dev, dev->max_hw_sectors = sess->max_io_size / SECTOR_SIZE; dev->max_segments = BMAX_SEGMENTS; - dev->max_hw_sectors = min_t(u32, dev->max_hw_sectors, - le32_to_cpu(rsp->max_hw_sectors)); - dev->max_segments = min_t(u16, dev->max_segments, - le16_to_cpu(rsp->max_segments)); - return 0; } static int rnbd_clt_change_capacity(struct rnbd_clt_dev *dev, size_t new_nsectors) { - int err = 0; - rnbd_clt_info(dev, "Device size changed from %zu to %zu sectors\n", dev->nsectors, new_nsectors); dev->nsectors = new_nsectors; set_capacity(dev->gd, dev->nsectors); - err = revalidate_disk(dev->gd); - if (err) - rnbd_clt_err(dev, - "Failed to change device size from %zu to %zu, err: %d\n", - dev->nsectors, new_nsectors, err); - return err; + revalidate_disk_size(dev->gd, true); + return 0; } static int process_msg_open_rsp(struct rnbd_clt_dev *dev, @@ -433,7 +422,7 @@ enum wait_type { }; static int send_usr_msg(struct rtrs_clt *rtrs, int dir, - struct rnbd_iu *iu, struct kvec *vec, size_t nr, + struct rnbd_iu *iu, struct kvec *vec, size_t len, struct scatterlist *sg, unsigned int sg_len, void (*conf)(struct work_struct *work), int *errno, enum wait_type wait) @@ -447,7 +436,7 @@ static int send_usr_msg(struct rtrs_clt *rtrs, int dir, .conf_fn = msg_conf, }; err = rtrs_clt_request(dir, &req_ops, rtrs, iu->permit, - vec, nr, len, sg, sg_len); + vec, 1, len, sg, sg_len); if (!err && wait) { wait_event(iu->comp.wait, iu->comp.errno != INT_MAX); *errno = iu->comp.errno; @@ -492,7 +481,7 @@ static int send_msg_close(struct rnbd_clt_dev *dev, u32 device_id, bool wait) msg.device_id = cpu_to_le32(device_id); WARN_ON(!rnbd_clt_get_dev(dev)); - err = send_usr_msg(sess->rtrs, WRITE, iu, &vec, 1, 0, NULL, 0, + err = send_usr_msg(sess->rtrs, WRITE, iu, &vec, 0, NULL, 0, msg_close_conf, &errno, wait); if (err) { rnbd_clt_put_dev(dev); @@ -581,7 +570,7 @@ static int send_msg_open(struct rnbd_clt_dev *dev, bool wait) WARN_ON(!rnbd_clt_get_dev(dev)); err = send_usr_msg(sess->rtrs, READ, iu, - &vec, 1, sizeof(*rsp), iu->sglist, 1, + &vec, sizeof(*rsp), iu->sglist, 1, msg_open_conf, &errno, wait); if (err) { rnbd_clt_put_dev(dev); @@ -635,7 +624,7 @@ static int send_msg_sess_info(struct rnbd_clt_session *sess, bool wait) goto put_iu; } err = send_usr_msg(sess->rtrs, READ, iu, - &vec, 1, sizeof(*rsp), iu->sglist, 1, + &vec, sizeof(*rsp), iu->sglist, 1, msg_sess_info_conf, &errno, wait); if (err) { rnbd_clt_put_sess(sess); @@ -1180,7 +1169,7 @@ static int setup_mq_tags(struct rnbd_clt_session *sess) tag_set->queue_depth = sess->queue_depth; tag_set->numa_node = NUMA_NO_NODE; tag_set->flags = BLK_MQ_F_SHOULD_MERGE | - BLK_MQ_F_TAG_SHARED; + BLK_MQ_F_TAG_QUEUE_SHARED; tag_set->cmd_size = sizeof(struct rnbd_iu); tag_set->nr_hw_queues = num_online_cpus(); @@ -1520,7 +1509,7 @@ struct rnbd_clt_dev *rnbd_clt_map_device(const char *sessname, "map_device: Failed to configure device, err: %d\n", ret); mutex_unlock(&dev->lock); - goto del_dev; + goto send_close; } rnbd_clt_info(dev, @@ -1539,6 +1528,8 @@ struct rnbd_clt_dev *rnbd_clt_map_device(const char *sessname, return dev; +send_close: + send_msg_close(dev, dev->device_id, WAIT); del_dev: delete_dev(dev); put_dev: diff --git a/drivers/block/rsxx/core.c b/drivers/block/rsxx/core.c index 8799e3bab067..63f549889f87 100644 --- a/drivers/block/rsxx/core.c +++ b/drivers/block/rsxx/core.c @@ -439,7 +439,7 @@ static void card_state_change(struct rsxx_cardinfo *card, case CARD_STATE_FAULT: dev_crit(CARD_TO_DEV(card), "Hardware Fault reported!\n"); - /* Fall through. */ + fallthrough; /* Everything else, detach DMA interface if it's attached. */ case CARD_STATE_SHUTDOWN: diff --git a/drivers/block/skd_main.c b/drivers/block/skd_main.c index ae6454c24594..a962b4551bed 100644 --- a/drivers/block/skd_main.c +++ b/drivers/block/skd_main.c @@ -25,7 +25,6 @@ #include <linux/dma-mapping.h> #include <linux/completion.h> #include <linux/scatterlist.h> -#include <linux/version.h> #include <linux/err.h> #include <linux/aer.h> #include <linux/wait.h> diff --git a/drivers/block/swim.c b/drivers/block/swim.c index dd34504382e5..52dd1efa00f9 100644 --- a/drivers/block/swim.c +++ b/drivers/block/swim.c @@ -638,7 +638,8 @@ static int floppy_open(struct block_device *bdev, fmode_t mode) return 0; if (mode & (FMODE_READ|FMODE_WRITE)) { - check_disk_change(bdev); + if (bdev_check_media_change(bdev) && fs->disk_in) + fs->ejected = 0; if ((mode & FMODE_WRITE) && fs->write_protected) { err = -EROFS; goto out; @@ -735,24 +736,6 @@ static unsigned int floppy_check_events(struct gendisk *disk, return fs->ejected ? DISK_EVENT_MEDIA_CHANGE : 0; } -static int floppy_revalidate(struct gendisk *disk) -{ - struct floppy_state *fs = disk->private_data; - struct swim __iomem *base = fs->swd->base; - - swim_drive(base, fs->location); - - if (fs->ejected) - setup_medium(fs); - - if (!fs->disk_in) - swim_motor(base, OFF); - else - fs->ejected = 0; - - return !fs->disk_in; -} - static const struct block_device_operations floppy_fops = { .owner = THIS_MODULE, .open = floppy_unlocked_open, @@ -760,7 +743,6 @@ static const struct block_device_operations floppy_fops = { .ioctl = floppy_ioctl, .getgeo = floppy_getgeo, .check_events = floppy_check_events, - .revalidate_disk = floppy_revalidate, }; static struct kobject *floppy_find(dev_t dev, int *part, void *data) diff --git a/drivers/block/swim3.c b/drivers/block/swim3.c index aa77eb5fb7de..c2d922d125e2 100644 --- a/drivers/block/swim3.c +++ b/drivers/block/swim3.c @@ -945,7 +945,8 @@ static int floppy_open(struct block_device *bdev, fmode_t mode) if (err == 0 && (mode & FMODE_NDELAY) == 0 && (mode & (FMODE_READ|FMODE_WRITE))) { - check_disk_change(bdev); + if (bdev_check_media_change(bdev)) + floppy_revalidate(bdev->bd_disk); if (fs->ejected) err = -ENXIO; } @@ -1055,7 +1056,6 @@ static const struct block_device_operations floppy_fops = { .release = floppy_release, .ioctl = floppy_ioctl, .check_events = floppy_check_events, - .revalidate_disk= floppy_revalidate, }; static const struct blk_mq_ops swim3_mq_ops = { diff --git a/drivers/block/virtio_blk.c b/drivers/block/virtio_blk.c index b2e48dac1ebd..a314b9382442 100644 --- a/drivers/block/virtio_blk.c +++ b/drivers/block/virtio_blk.c @@ -598,7 +598,7 @@ static void virtblk_update_cache_mode(struct virtio_device *vdev) struct virtio_blk *vblk = vdev->priv; blk_queue_write_cache(vblk->disk->queue, writeback, false); - revalidate_disk(vblk->disk); + revalidate_disk_size(vblk->disk, true); } static const char *const virtblk_cache_types[] = { @@ -646,7 +646,7 @@ static struct attribute *virtblk_attrs[] = { static umode_t virtblk_attrs_are_visible(struct kobject *kobj, struct attribute *a, int n) { - struct device *dev = container_of(kobj, struct device, kobj); + struct device *dev = kobj_to_dev(kobj); struct gendisk *disk = dev_to_disk(dev); struct virtio_blk *vblk = disk->private_data; struct virtio_device *vdev = vblk->vdev; diff --git a/drivers/block/xsysace.c b/drivers/block/xsysace.c index 5d8e0ab3f054..8d581c7536fb 100644 --- a/drivers/block/xsysace.c +++ b/drivers/block/xsysace.c @@ -888,26 +888,20 @@ static unsigned int ace_check_events(struct gendisk *gd, unsigned int clearing) return ace->media_change ? DISK_EVENT_MEDIA_CHANGE : 0; } -static int ace_revalidate_disk(struct gendisk *gd) +static void ace_media_changed(struct ace_device *ace) { - struct ace_device *ace = gd->private_data; unsigned long flags; - dev_dbg(ace->dev, "ace_revalidate_disk()\n"); - - if (ace->media_change) { - dev_dbg(ace->dev, "requesting cf id and scheduling tasklet\n"); + dev_dbg(ace->dev, "requesting cf id and scheduling tasklet\n"); - spin_lock_irqsave(&ace->lock, flags); - ace->id_req_count++; - spin_unlock_irqrestore(&ace->lock, flags); + spin_lock_irqsave(&ace->lock, flags); + ace->id_req_count++; + spin_unlock_irqrestore(&ace->lock, flags); - tasklet_schedule(&ace->fsm_tasklet); - wait_for_completion(&ace->id_completion); - } + tasklet_schedule(&ace->fsm_tasklet); + wait_for_completion(&ace->id_completion); dev_dbg(ace->dev, "revalidate complete\n"); - return ace->id_result; } static int ace_open(struct block_device *bdev, fmode_t mode) @@ -922,7 +916,8 @@ static int ace_open(struct block_device *bdev, fmode_t mode) ace->users++; spin_unlock_irqrestore(&ace->lock, flags); - check_disk_change(bdev); + if (bdev_check_media_change(bdev) && ace->media_change) + ace_media_changed(ace); mutex_unlock(&xsysace_mutex); return 0; @@ -966,7 +961,6 @@ static const struct block_device_operations ace_fops = { .open = ace_open, .release = ace_release, .check_events = ace_check_events, - .revalidate_disk = ace_revalidate_disk, .getgeo = ace_getgeo, }; @@ -1080,7 +1074,7 @@ static int ace_setup(struct ace_device *ace) (unsigned long long) ace->physaddr, ace->baseaddr, ace->irq); ace->media_change = 1; - ace_revalidate_disk(ace->gd); + ace_media_changed(ace); /* Make the sysace device 'live' */ add_disk(ace->gd); diff --git a/drivers/block/zram/zram_drv.c b/drivers/block/zram/zram_drv.c index 9100ac36670a..1b697208d661 100644 --- a/drivers/block/zram/zram_drv.c +++ b/drivers/block/zram/zram_drv.c @@ -52,6 +52,9 @@ static unsigned int num_devices = 1; */ static size_t huge_class_size; +static const struct block_device_operations zram_devops; +static const struct block_device_operations zram_wb_devops; + static void zram_free_page(struct zram *zram, size_t index); static int zram_bvec_read(struct zram *zram, struct bio_vec *bvec, u32 index, int offset, struct bio *bio); @@ -408,8 +411,7 @@ static void reset_bdev(struct zram *zram) zram->backing_dev = NULL; zram->old_block_size = 0; zram->bdev = NULL; - zram->disk->queue->backing_dev_info->capabilities |= - BDI_CAP_SYNCHRONOUS_IO; + zram->disk->fops = &zram_devops; kvfree(zram->bitmap); zram->bitmap = NULL; } @@ -491,9 +493,10 @@ static ssize_t backing_dev_store(struct device *dev, goto out; } - bdev = bdgrab(I_BDEV(inode)); - err = blkdev_get(bdev, FMODE_READ | FMODE_WRITE | FMODE_EXCL, zram); - if (err < 0) { + bdev = blkdev_get_by_dev(inode->i_rdev, + FMODE_READ | FMODE_WRITE | FMODE_EXCL, zram); + if (IS_ERR(bdev)) { + err = PTR_ERR(bdev); bdev = NULL; goto out; } @@ -528,8 +531,7 @@ static ssize_t backing_dev_store(struct device *dev, * freely but in fact, IO is going on so finally could cause * use-after-free when the IO is really done. */ - zram->disk->queue->backing_dev_info->capabilities &= - ~BDI_CAP_SYNCHRONOUS_IO; + zram->disk->fops = &zram_wb_devops; up_write(&zram->init_lock); pr_info("setup backing device %s\n", file_name); @@ -1216,10 +1218,11 @@ out: static int __zram_bvec_read(struct zram *zram, struct page *page, u32 index, struct bio *bio, bool partial_io) { - int ret; + struct zcomp_strm *zstrm; unsigned long handle; unsigned int size; void *src, *dst; + int ret; zram_slot_lock(zram, index); if (zram_test_flag(zram, index, ZRAM_WB)) { @@ -1250,6 +1253,9 @@ static int __zram_bvec_read(struct zram *zram, struct page *page, u32 index, size = zram_get_obj_size(zram, index); + if (size != PAGE_SIZE) + zstrm = zcomp_stream_get(zram->comp); + src = zs_map_object(zram->mem_pool, handle, ZS_MM_RO); if (size == PAGE_SIZE) { dst = kmap_atomic(page); @@ -1257,8 +1263,6 @@ static int __zram_bvec_read(struct zram *zram, struct page *page, u32 index, kunmap_atomic(dst); ret = 0; } else { - struct zcomp_strm *zstrm = zcomp_stream_get(zram->comp); - dst = kmap_atomic(page); ret = zcomp_decompress(zstrm, src, size, dst); kunmap_atomic(dst); @@ -1268,7 +1272,7 @@ static int __zram_bvec_read(struct zram *zram, struct page *page, u32 index, zram_slot_unlock(zram, index); /* Should NEVER happen. Return bio error if it does. */ - if (unlikely(ret)) + if (WARN_ON(ret)) pr_err("Decompression failed! err=%d, page=%u\n", ret, index); return ret; @@ -1739,7 +1743,7 @@ static ssize_t disksize_store(struct device *dev, zram->disksize = disksize; set_capacity(zram->disk, zram->disksize >> SECTOR_SHIFT); - revalidate_disk(zram->disk); + revalidate_disk_size(zram->disk, true); up_write(&zram->init_lock); return len; @@ -1786,7 +1790,7 @@ static ssize_t reset_store(struct device *dev, /* Make sure all the pending I/O are finished */ fsync_bdev(bdev); zram_reset_device(zram); - revalidate_disk(zram->disk); + revalidate_disk_size(zram->disk, true); bdput(bdev); mutex_lock(&bdev->bd_mutex); @@ -1819,6 +1823,13 @@ static const struct block_device_operations zram_devops = { .owner = THIS_MODULE }; +static const struct block_device_operations zram_wb_devops = { + .open = zram_open, + .submit_bio = zram_submit_bio, + .swap_slot_free_notify = zram_slot_free_notify, + .owner = THIS_MODULE +}; + static DEVICE_ATTR_WO(compact); static DEVICE_ATTR_RW(disksize); static DEVICE_ATTR_RO(initstate); @@ -1946,8 +1957,7 @@ static int zram_add(void) if (ZRAM_LOGICAL_BLOCK_SIZE == PAGE_SIZE) blk_queue_max_write_zeroes_sectors(zram->disk->queue, UINT_MAX); - zram->disk->queue->backing_dev_info->capabilities |= - (BDI_CAP_STABLE_WRITES | BDI_CAP_SYNCHRONOUS_IO); + blk_queue_flag_set(QUEUE_FLAG_STABLE_WRITES, zram->disk->queue); device_add_disk(NULL, zram->disk, zram_disk_attr_groups); strlcpy(zram->compressor, default_compressor, sizeof(zram->compressor)); |