From d8115c35bf3ee575cfc9c51ac9853f58a21a43dc Mon Sep 17 00:00:00 2001 From: Bart Van Assche Date: Wed, 28 Feb 2018 10:15:29 -0800 Subject: md: Delete gendisk before cleaning up the request queue Remove the disk, partition and bdi sysfs attributes before cleaning up the request queue associated with the disk. Signed-off-by: Bart Van Assche Reviewed-by: Johannes Thumshirn Reviewed-by: Joseph Qi Reviewed-by: Ming Lei Cc: Shaohua Li Signed-off-by: Jens Axboe --- drivers/md/md.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) (limited to 'drivers/md') diff --git a/drivers/md/md.c b/drivers/md/md.c index bc67ab6844f0..eba7fa2f0abb 100644 --- a/drivers/md/md.c +++ b/drivers/md/md.c @@ -5203,12 +5203,12 @@ static void md_free(struct kobject *ko) if (mddev->sysfs_state) sysfs_put(mddev->sysfs_state); + if (mddev->gendisk) + del_gendisk(mddev->gendisk); if (mddev->queue) blk_cleanup_queue(mddev->queue); - if (mddev->gendisk) { - del_gendisk(mddev->gendisk); + if (mddev->gendisk) put_disk(mddev->gendisk); - } percpu_ref_exit(&mddev->writes_pending); kfree(mddev); -- cgit v1.2.3 From 5ee0524ba137fe928a88b440d014e3c8451fb32c Mon Sep 17 00:00:00 2001 From: Bart Van Assche Date: Wed, 28 Feb 2018 10:15:31 -0800 Subject: block: Add 'lock' as third argument to blk_alloc_queue_node() This patch does not change any functionality. Signed-off-by: Bart Van Assche Reviewed-by: Joseph Qi Cc: Christoph Hellwig Cc: Philipp Reisner Cc: Ulf Hansson Cc: Kees Cook Signed-off-by: Jens Axboe --- block/blk-core.c | 7 ++++--- block/blk-mq.c | 2 +- drivers/block/null_blk.c | 3 ++- drivers/ide/ide-probe.c | 2 +- drivers/lightnvm/core.c | 2 +- drivers/md/dm.c | 2 +- drivers/nvdimm/pmem.c | 2 +- drivers/nvme/host/multipath.c | 2 +- drivers/scsi/scsi_lib.c | 2 +- include/linux/blkdev.h | 3 ++- 10 files changed, 15 insertions(+), 12 deletions(-) (limited to 'drivers/md') diff --git a/block/blk-core.c b/block/blk-core.c index 2d1a7bbe0634..e873a24bf82d 100644 --- a/block/blk-core.c +++ b/block/blk-core.c @@ -810,7 +810,7 @@ void blk_exit_rl(struct request_queue *q, struct request_list *rl) struct request_queue *blk_alloc_queue(gfp_t gfp_mask) { - return blk_alloc_queue_node(gfp_mask, NUMA_NO_NODE); + return blk_alloc_queue_node(gfp_mask, NUMA_NO_NODE, NULL); } EXPORT_SYMBOL(blk_alloc_queue); @@ -888,7 +888,8 @@ static void blk_rq_timed_out_timer(struct timer_list *t) kblockd_schedule_work(&q->timeout_work); } -struct request_queue *blk_alloc_queue_node(gfp_t gfp_mask, int node_id) +struct request_queue *blk_alloc_queue_node(gfp_t gfp_mask, int node_id, + spinlock_t *lock) { struct request_queue *q; @@ -1030,7 +1031,7 @@ blk_init_queue_node(request_fn_proc *rfn, spinlock_t *lock, int node_id) { struct request_queue *q; - q = blk_alloc_queue_node(GFP_KERNEL, node_id); + q = blk_alloc_queue_node(GFP_KERNEL, node_id, NULL); if (!q) return NULL; diff --git a/block/blk-mq.c b/block/blk-mq.c index 9594a0e9f65b..75336848f7a7 100644 --- a/block/blk-mq.c +++ b/block/blk-mq.c @@ -2556,7 +2556,7 @@ struct request_queue *blk_mq_init_queue(struct blk_mq_tag_set *set) { struct request_queue *uninit_q, *q; - uninit_q = blk_alloc_queue_node(GFP_KERNEL, set->numa_node); + uninit_q = blk_alloc_queue_node(GFP_KERNEL, set->numa_node, NULL); if (!uninit_q) return ERR_PTR(-ENOMEM); diff --git a/drivers/block/null_blk.c b/drivers/block/null_blk.c index d12d7a8325ad..6dc7e7cfca4a 100644 --- a/drivers/block/null_blk.c +++ b/drivers/block/null_blk.c @@ -1760,7 +1760,8 @@ static int null_add_dev(struct nullb_device *dev) } null_init_queues(nullb); } else if (dev->queue_mode == NULL_Q_BIO) { - nullb->q = blk_alloc_queue_node(GFP_KERNEL, dev->home_node); + nullb->q = blk_alloc_queue_node(GFP_KERNEL, dev->home_node, + NULL); if (!nullb->q) { rv = -ENOMEM; goto out_cleanup_queues; diff --git a/drivers/ide/ide-probe.c b/drivers/ide/ide-probe.c index caa20eb5f26b..d6b8c7e1545d 100644 --- a/drivers/ide/ide-probe.c +++ b/drivers/ide/ide-probe.c @@ -766,7 +766,7 @@ static int ide_init_queue(ide_drive_t *drive) * limits and LBA48 we could raise it but as yet * do not. */ - q = blk_alloc_queue_node(GFP_KERNEL, hwif_to_node(hwif)); + q = blk_alloc_queue_node(GFP_KERNEL, hwif_to_node(hwif), NULL); if (!q) return 1; diff --git a/drivers/lightnvm/core.c b/drivers/lightnvm/core.c index dcc9e621e651..5f1988df1593 100644 --- a/drivers/lightnvm/core.c +++ b/drivers/lightnvm/core.c @@ -384,7 +384,7 @@ static int nvm_create_tgt(struct nvm_dev *dev, struct nvm_ioctl_create *create) goto err_dev; } - tqueue = blk_alloc_queue_node(GFP_KERNEL, dev->q->node); + tqueue = blk_alloc_queue_node(GFP_KERNEL, dev->q->node, NULL); if (!tqueue) { ret = -ENOMEM; goto err_disk; diff --git a/drivers/md/dm.c b/drivers/md/dm.c index 68136806d365..7586d249266c 100644 --- a/drivers/md/dm.c +++ b/drivers/md/dm.c @@ -1841,7 +1841,7 @@ static struct mapped_device *alloc_dev(int minor) INIT_LIST_HEAD(&md->table_devices); spin_lock_init(&md->uevent_lock); - md->queue = blk_alloc_queue_node(GFP_KERNEL, numa_node_id); + md->queue = blk_alloc_queue_node(GFP_KERNEL, numa_node_id, NULL); if (!md->queue) goto bad; md->queue->queuedata = md; diff --git a/drivers/nvdimm/pmem.c b/drivers/nvdimm/pmem.c index 10041ac4032c..cfb15ac50925 100644 --- a/drivers/nvdimm/pmem.c +++ b/drivers/nvdimm/pmem.c @@ -344,7 +344,7 @@ static int pmem_attach_disk(struct device *dev, return -EBUSY; } - q = blk_alloc_queue_node(GFP_KERNEL, dev_to_node(dev)); + q = blk_alloc_queue_node(GFP_KERNEL, dev_to_node(dev), NULL); if (!q) return -ENOMEM; diff --git a/drivers/nvme/host/multipath.c b/drivers/nvme/host/multipath.c index b7e5c6db4d92..88440562a197 100644 --- a/drivers/nvme/host/multipath.c +++ b/drivers/nvme/host/multipath.c @@ -162,7 +162,7 @@ int nvme_mpath_alloc_disk(struct nvme_ctrl *ctrl, struct nvme_ns_head *head) if (!(ctrl->subsys->cmic & (1 << 1)) || !multipath) return 0; - q = blk_alloc_queue_node(GFP_KERNEL, NUMA_NO_NODE); + q = blk_alloc_queue_node(GFP_KERNEL, NUMA_NO_NODE, NULL); if (!q) goto out; q->queuedata = head; diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c index a86df9ca7d1c..71d1135f94d0 100644 --- a/drivers/scsi/scsi_lib.c +++ b/drivers/scsi/scsi_lib.c @@ -2223,7 +2223,7 @@ struct request_queue *scsi_old_alloc_queue(struct scsi_device *sdev) struct Scsi_Host *shost = sdev->host; struct request_queue *q; - q = blk_alloc_queue_node(GFP_KERNEL, NUMA_NO_NODE); + q = blk_alloc_queue_node(GFP_KERNEL, NUMA_NO_NODE, NULL); if (!q) return NULL; q->cmd_size = sizeof(struct scsi_cmnd) + shost->hostt->cmd_size; diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h index ed63f3b69c12..667a9b0053d9 100644 --- a/include/linux/blkdev.h +++ b/include/linux/blkdev.h @@ -1321,7 +1321,8 @@ extern long nr_blockdev_pages(void); bool __must_check blk_get_queue(struct request_queue *); struct request_queue *blk_alloc_queue(gfp_t); -struct request_queue *blk_alloc_queue_node(gfp_t, int); +struct request_queue *blk_alloc_queue_node(gfp_t gfp_mask, int node_id, + spinlock_t *lock); extern void blk_put_queue(struct request_queue *); extern void blk_set_queue_dying(struct request_queue *); -- cgit v1.2.3 From 44e1ebe2a33b6cf70d6bee6beb1d5a198a841380 Mon Sep 17 00:00:00 2001 From: Bart Van Assche Date: Wed, 7 Mar 2018 17:10:07 -0800 Subject: bcache: Use the blk_queue_flag_{set,clear}() functions Use the blk_queue_flag_{set,clear}() functions instead of open-coding these. Cc: Kent Overstreet Cc: Christoph Hellwig Cc: Hannes Reinecke Cc: Ming Lei Reviewed-by: Michael Lyle Reviewed-by: Johannes Thumshirn Reviewed-by: Martin K. Petersen Signed-off-by: Bart Van Assche Signed-off-by: Jens Axboe --- drivers/md/bcache/super.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) (limited to 'drivers/md') diff --git a/drivers/md/bcache/super.c b/drivers/md/bcache/super.c index 4d1d8dfb2d2a..e8dfa804bd98 100644 --- a/drivers/md/bcache/super.c +++ b/drivers/md/bcache/super.c @@ -833,9 +833,9 @@ static int bcache_device_init(struct bcache_device *d, unsigned block_size, q->limits.io_min = block_size; q->limits.logical_block_size = block_size; q->limits.physical_block_size = block_size; - set_bit(QUEUE_FLAG_NONROT, &d->disk->queue->queue_flags); - clear_bit(QUEUE_FLAG_ADD_RANDOM, &d->disk->queue->queue_flags); - set_bit(QUEUE_FLAG_DISCARD, &d->disk->queue->queue_flags); + blk_queue_flag_set(QUEUE_FLAG_NONROT, d->disk->queue); + blk_queue_flag_clear(QUEUE_FLAG_ADD_RANDOM, d->disk->queue); + blk_queue_flag_set(QUEUE_FLAG_DISCARD, d->disk->queue); blk_queue_write_cache(q, true, true); -- cgit v1.2.3 From 8b904b5b6b58b9a29dcf3f82d936d9e7fd69fda6 Mon Sep 17 00:00:00 2001 From: Bart Van Assche Date: Wed, 7 Mar 2018 17:10:10 -0800 Subject: block: Use blk_queue_flag_*() in drivers instead of queue_flag_*() This patch has been generated as follows: for verb in set_unlocked clear_unlocked set clear; do replace-in-files queue_flag_${verb} blk_queue_flag_${verb%_unlocked} \ $(git grep -lw queue_flag_${verb} drivers block/bsg*) done Except for protecting all queue flag changes with the queue lock this patch does not change any functionality. Cc: Mike Snitzer Cc: Shaohua Li Cc: Christoph Hellwig Cc: Hannes Reinecke Cc: Ming Lei Signed-off-by: Bart Van Assche Reviewed-by: Martin K. Petersen Reviewed-by: Johannes Thumshirn Acked-by: Martin K. Petersen Signed-off-by: Jens Axboe --- block/bsg-lib.c | 4 ++-- drivers/block/drbd/drbd_nl.c | 4 ++-- drivers/block/loop.c | 10 +++++----- drivers/block/nbd.c | 8 ++++---- drivers/block/null_blk.c | 6 +++--- drivers/block/rbd.c | 4 ++-- drivers/block/rsxx/dev.c | 6 +++--- drivers/block/skd_main.c | 4 ++-- drivers/block/xen-blkfront.c | 10 +++++----- drivers/block/zram/zram_drv.c | 6 +++--- drivers/ide/ide-disk.c | 4 ++-- drivers/ide/ide-probe.c | 2 +- drivers/lightnvm/pblk-init.c | 2 +- drivers/md/dm-table.c | 16 ++++++++-------- drivers/md/md-linear.c | 4 ++-- drivers/md/md.c | 4 ++-- drivers/md/raid0.c | 4 ++-- drivers/md/raid1.c | 6 +++--- drivers/md/raid10.c | 6 +++--- drivers/md/raid5.c | 4 ++-- drivers/mmc/core/queue.c | 8 ++++---- drivers/mtd/mtd_blkdevs.c | 6 +++--- drivers/nvdimm/blk.c | 2 +- drivers/nvdimm/btt.c | 2 +- drivers/nvdimm/pmem.c | 4 ++-- drivers/nvme/host/core.c | 4 ++-- drivers/nvme/host/multipath.c | 2 +- drivers/s390/block/dasd.c | 4 ++-- drivers/s390/block/dcssblk.c | 2 +- drivers/s390/block/scm_blk.c | 4 ++-- drivers/s390/block/xpram.c | 4 ++-- drivers/scsi/megaraid/megaraid_sas_base.c | 2 +- drivers/scsi/megaraid/megaraid_sas_fusion.c | 2 +- drivers/scsi/mpt3sas/mpt3sas_scsih.c | 2 +- drivers/scsi/scsi_debug.c | 2 +- drivers/scsi/scsi_lib.c | 2 +- drivers/scsi/scsi_transport_sas.c | 4 ++-- drivers/scsi/sd.c | 8 ++++---- 38 files changed, 89 insertions(+), 89 deletions(-) (limited to 'drivers/md') diff --git a/block/bsg-lib.c b/block/bsg-lib.c index 1474153f73e3..b4fe1a48f111 100644 --- a/block/bsg-lib.c +++ b/block/bsg-lib.c @@ -275,8 +275,8 @@ struct request_queue *bsg_setup_queue(struct device *dev, const char *name, q->queuedata = dev; q->bsg_job_fn = job_fn; - queue_flag_set_unlocked(QUEUE_FLAG_BIDI, q); - queue_flag_set_unlocked(QUEUE_FLAG_SCSI_PASSTHROUGH, q); + blk_queue_flag_set(QUEUE_FLAG_BIDI, q); + blk_queue_flag_set(QUEUE_FLAG_SCSI_PASSTHROUGH, q); blk_queue_softirq_done(q, bsg_softirq_done); blk_queue_rq_timeout(q, BLK_DEFAULT_SG_TIMEOUT); diff --git a/drivers/block/drbd/drbd_nl.c b/drivers/block/drbd/drbd_nl.c index a12f77e6891e..b4f02768ba47 100644 --- a/drivers/block/drbd/drbd_nl.c +++ b/drivers/block/drbd/drbd_nl.c @@ -1212,10 +1212,10 @@ static void decide_on_discard_support(struct drbd_device *device, * topology on all peers. */ blk_queue_discard_granularity(q, 512); q->limits.max_discard_sectors = drbd_max_discard_sectors(connection); - queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, q); + blk_queue_flag_set(QUEUE_FLAG_DISCARD, q); q->limits.max_write_zeroes_sectors = drbd_max_discard_sectors(connection); } else { - queue_flag_clear_unlocked(QUEUE_FLAG_DISCARD, q); + blk_queue_flag_clear(QUEUE_FLAG_DISCARD, q); blk_queue_discard_granularity(q, 0); q->limits.max_discard_sectors = 0; q->limits.max_write_zeroes_sectors = 0; diff --git a/drivers/block/loop.c b/drivers/block/loop.c index 9d29aa6413e5..7952ed5c607b 100644 --- a/drivers/block/loop.c +++ b/drivers/block/loop.c @@ -214,10 +214,10 @@ static void __loop_update_dio(struct loop_device *lo, bool dio) blk_mq_freeze_queue(lo->lo_queue); lo->use_dio = use_dio; if (use_dio) { - queue_flag_clear_unlocked(QUEUE_FLAG_NOMERGES, lo->lo_queue); + blk_queue_flag_clear(QUEUE_FLAG_NOMERGES, lo->lo_queue); lo->lo_flags |= LO_FLAGS_DIRECT_IO; } else { - queue_flag_set_unlocked(QUEUE_FLAG_NOMERGES, lo->lo_queue); + blk_queue_flag_set(QUEUE_FLAG_NOMERGES, lo->lo_queue); lo->lo_flags &= ~LO_FLAGS_DIRECT_IO; } blk_mq_unfreeze_queue(lo->lo_queue); @@ -817,7 +817,7 @@ static void loop_config_discard(struct loop_device *lo) q->limits.discard_alignment = 0; blk_queue_max_discard_sectors(q, 0); blk_queue_max_write_zeroes_sectors(q, 0); - queue_flag_clear_unlocked(QUEUE_FLAG_DISCARD, q); + blk_queue_flag_clear(QUEUE_FLAG_DISCARD, q); return; } @@ -826,7 +826,7 @@ static void loop_config_discard(struct loop_device *lo) blk_queue_max_discard_sectors(q, UINT_MAX >> 9); blk_queue_max_write_zeroes_sectors(q, UINT_MAX >> 9); - queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, q); + blk_queue_flag_set(QUEUE_FLAG_DISCARD, q); } static void loop_unprepare_queue(struct loop_device *lo) @@ -1808,7 +1808,7 @@ static int loop_add(struct loop_device **l, int i) * page. For directio mode, merge does help to dispatch bigger request * to underlayer disk. We will enable merge once directio is enabled. */ - queue_flag_set_unlocked(QUEUE_FLAG_NOMERGES, lo->lo_queue); + blk_queue_flag_set(QUEUE_FLAG_NOMERGES, lo->lo_queue); err = -ENOMEM; disk = lo->lo_disk = alloc_disk(1 << part_shift); diff --git a/drivers/block/nbd.c b/drivers/block/nbd.c index 86258b00a1d4..afbc202ca6fd 100644 --- a/drivers/block/nbd.c +++ b/drivers/block/nbd.c @@ -964,7 +964,7 @@ static void nbd_parse_flags(struct nbd_device *nbd) else set_disk_ro(nbd->disk, false); if (config->flags & NBD_FLAG_SEND_TRIM) - queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, nbd->disk->queue); + blk_queue_flag_set(QUEUE_FLAG_DISCARD, nbd->disk->queue); if (config->flags & NBD_FLAG_SEND_FLUSH) { if (config->flags & NBD_FLAG_SEND_FUA) blk_queue_write_cache(nbd->disk->queue, true, true); @@ -1040,7 +1040,7 @@ static void nbd_config_put(struct nbd_device *nbd) nbd->config = NULL; nbd->tag_set.timeout = 0; - queue_flag_clear_unlocked(QUEUE_FLAG_DISCARD, nbd->disk->queue); + blk_queue_flag_clear(QUEUE_FLAG_DISCARD, nbd->disk->queue); mutex_unlock(&nbd->config_lock); nbd_put(nbd); @@ -1488,8 +1488,8 @@ static int nbd_dev_add(int index) /* * Tell the block layer that we are not a rotational device */ - queue_flag_set_unlocked(QUEUE_FLAG_NONROT, disk->queue); - queue_flag_clear_unlocked(QUEUE_FLAG_ADD_RANDOM, disk->queue); + blk_queue_flag_set(QUEUE_FLAG_NONROT, disk->queue); + blk_queue_flag_clear(QUEUE_FLAG_ADD_RANDOM, disk->queue); disk->queue->limits.discard_granularity = 512; blk_queue_max_discard_sectors(disk->queue, UINT_MAX); blk_queue_max_segment_size(disk->queue, UINT_MAX); diff --git a/drivers/block/null_blk.c b/drivers/block/null_blk.c index d6be7a6d8ca6..0517613afccb 100644 --- a/drivers/block/null_blk.c +++ b/drivers/block/null_blk.c @@ -1525,7 +1525,7 @@ static void null_config_discard(struct nullb *nullb) nullb->q->limits.discard_granularity = nullb->dev->blocksize; nullb->q->limits.discard_alignment = nullb->dev->blocksize; blk_queue_max_discard_sectors(nullb->q, UINT_MAX >> 9); - queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, nullb->q); + blk_queue_flag_set(QUEUE_FLAG_DISCARD, nullb->q); } static int null_open(struct block_device *bdev, fmode_t mode) @@ -1810,8 +1810,8 @@ static int null_add_dev(struct nullb_device *dev) } nullb->q->queuedata = nullb; - queue_flag_set_unlocked(QUEUE_FLAG_NONROT, nullb->q); - queue_flag_clear_unlocked(QUEUE_FLAG_ADD_RANDOM, nullb->q); + blk_queue_flag_set(QUEUE_FLAG_NONROT, nullb->q); + blk_queue_flag_clear(QUEUE_FLAG_ADD_RANDOM, nullb->q); mutex_lock(&lock); nullb->index = ida_simple_get(&nullb_indexes, 0, 0, GFP_KERNEL); diff --git a/drivers/block/rbd.c b/drivers/block/rbd.c index 8e40da093766..0016170cde0a 100644 --- a/drivers/block/rbd.c +++ b/drivers/block/rbd.c @@ -4370,7 +4370,7 @@ static int rbd_init_disk(struct rbd_device *rbd_dev) goto out_tag_set; } - queue_flag_set_unlocked(QUEUE_FLAG_NONROT, q); + blk_queue_flag_set(QUEUE_FLAG_NONROT, q); /* QUEUE_FLAG_ADD_RANDOM is off by default for blk-mq */ /* set io sizes to object size */ @@ -4383,7 +4383,7 @@ static int rbd_init_disk(struct rbd_device *rbd_dev) blk_queue_io_opt(q, segment_size); /* enable the discard support */ - queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, q); + blk_queue_flag_set(QUEUE_FLAG_DISCARD, q); q->limits.discard_granularity = segment_size; blk_queue_max_discard_sectors(q, segment_size / SECTOR_SIZE); blk_queue_max_write_zeroes_sectors(q, segment_size / SECTOR_SIZE); diff --git a/drivers/block/rsxx/dev.c b/drivers/block/rsxx/dev.c index e397d3ee7308..dddb3f2490b6 100644 --- a/drivers/block/rsxx/dev.c +++ b/drivers/block/rsxx/dev.c @@ -287,10 +287,10 @@ int rsxx_setup_dev(struct rsxx_cardinfo *card) blk_queue_max_hw_sectors(card->queue, blkdev_max_hw_sectors); blk_queue_physical_block_size(card->queue, RSXX_HW_BLK_SIZE); - queue_flag_set_unlocked(QUEUE_FLAG_NONROT, card->queue); - queue_flag_clear_unlocked(QUEUE_FLAG_ADD_RANDOM, card->queue); + blk_queue_flag_set(QUEUE_FLAG_NONROT, card->queue); + blk_queue_flag_clear(QUEUE_FLAG_ADD_RANDOM, card->queue); if (rsxx_discard_supported(card)) { - queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, card->queue); + blk_queue_flag_set(QUEUE_FLAG_DISCARD, card->queue); blk_queue_max_discard_sectors(card->queue, RSXX_HW_BLK_SIZE >> 9); card->queue->limits.discard_granularity = RSXX_HW_BLK_SIZE; diff --git a/drivers/block/skd_main.c b/drivers/block/skd_main.c index e41935ab41ef..bc7aea6d7b7c 100644 --- a/drivers/block/skd_main.c +++ b/drivers/block/skd_main.c @@ -2858,8 +2858,8 @@ static int skd_cons_disk(struct skd_device *skdev) /* set optimal I/O size to 8KB */ blk_queue_io_opt(q, 8192); - queue_flag_set_unlocked(QUEUE_FLAG_NONROT, q); - queue_flag_clear_unlocked(QUEUE_FLAG_ADD_RANDOM, q); + blk_queue_flag_set(QUEUE_FLAG_NONROT, q); + blk_queue_flag_clear(QUEUE_FLAG_ADD_RANDOM, q); blk_queue_rq_timeout(q, 8 * HZ); diff --git a/drivers/block/xen-blkfront.c b/drivers/block/xen-blkfront.c index e126e4cac2ca..3fcdc0d8eed3 100644 --- a/drivers/block/xen-blkfront.c +++ b/drivers/block/xen-blkfront.c @@ -931,15 +931,15 @@ static void blkif_set_queue_limits(struct blkfront_info *info) unsigned int segments = info->max_indirect_segments ? : BLKIF_MAX_SEGMENTS_PER_REQUEST; - queue_flag_set_unlocked(QUEUE_FLAG_VIRT, rq); + blk_queue_flag_set(QUEUE_FLAG_VIRT, rq); if (info->feature_discard) { - queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, rq); + blk_queue_flag_set(QUEUE_FLAG_DISCARD, rq); blk_queue_max_discard_sectors(rq, get_capacity(gd)); rq->limits.discard_granularity = info->discard_granularity; rq->limits.discard_alignment = info->discard_alignment; if (info->feature_secdiscard) - queue_flag_set_unlocked(QUEUE_FLAG_SECERASE, rq); + blk_queue_flag_set(QUEUE_FLAG_SECERASE, rq); } /* Hard sector size and max sectors impersonate the equiv. hardware. */ @@ -1610,8 +1610,8 @@ static irqreturn_t blkif_interrupt(int irq, void *dev_id) blkif_req(req)->error = BLK_STS_NOTSUPP; info->feature_discard = 0; info->feature_secdiscard = 0; - queue_flag_clear(QUEUE_FLAG_DISCARD, rq); - queue_flag_clear(QUEUE_FLAG_SECERASE, rq); + blk_queue_flag_clear(QUEUE_FLAG_DISCARD, rq); + blk_queue_flag_clear(QUEUE_FLAG_SECERASE, rq); } break; case BLKIF_OP_FLUSH_DISKCACHE: diff --git a/drivers/block/zram/zram_drv.c b/drivers/block/zram/zram_drv.c index 85110e7931e5..71b449613cfa 100644 --- a/drivers/block/zram/zram_drv.c +++ b/drivers/block/zram/zram_drv.c @@ -1530,8 +1530,8 @@ static int zram_add(void) /* Actual capacity set using syfs (/sys/block/zram/disksize */ set_capacity(zram->disk, 0); /* zram devices sort of resembles non-rotational disks */ - queue_flag_set_unlocked(QUEUE_FLAG_NONROT, zram->disk->queue); - queue_flag_clear_unlocked(QUEUE_FLAG_ADD_RANDOM, zram->disk->queue); + blk_queue_flag_set(QUEUE_FLAG_NONROT, zram->disk->queue); + blk_queue_flag_clear(QUEUE_FLAG_ADD_RANDOM, zram->disk->queue); /* * To ensure that we always get PAGE_SIZE aligned @@ -1544,7 +1544,7 @@ static int zram_add(void) blk_queue_io_opt(zram->disk->queue, PAGE_SIZE); zram->disk->queue->limits.discard_granularity = PAGE_SIZE; blk_queue_max_discard_sectors(zram->disk->queue, UINT_MAX); - queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, zram->disk->queue); + blk_queue_flag_set(QUEUE_FLAG_DISCARD, zram->disk->queue); /* * zram_bio_discard() will clear all logical blocks if logical block diff --git a/drivers/ide/ide-disk.c b/drivers/ide/ide-disk.c index 188d1b03715d..9c47f975567f 100644 --- a/drivers/ide/ide-disk.c +++ b/drivers/ide/ide-disk.c @@ -687,8 +687,8 @@ static void ide_disk_setup(ide_drive_t *drive) queue_max_sectors(q) / 2); if (ata_id_is_ssd(id)) { - queue_flag_set_unlocked(QUEUE_FLAG_NONROT, q); - queue_flag_clear_unlocked(QUEUE_FLAG_ADD_RANDOM, q); + blk_queue_flag_set(QUEUE_FLAG_NONROT, q); + blk_queue_flag_clear(QUEUE_FLAG_ADD_RANDOM, q); } /* calculate drive capacity, and select LBA if possible */ diff --git a/drivers/ide/ide-probe.c b/drivers/ide/ide-probe.c index d6b8c7e1545d..2019e66eada7 100644 --- a/drivers/ide/ide-probe.c +++ b/drivers/ide/ide-probe.c @@ -773,7 +773,7 @@ static int ide_init_queue(ide_drive_t *drive) q->request_fn = do_ide_request; q->initialize_rq_fn = ide_initialize_rq; q->cmd_size = sizeof(struct ide_request); - queue_flag_set_unlocked(QUEUE_FLAG_SCSI_PASSTHROUGH, q); + blk_queue_flag_set(QUEUE_FLAG_SCSI_PASSTHROUGH, q); if (blk_init_allocated_queue(q) < 0) { blk_cleanup_queue(q); return 1; diff --git a/drivers/lightnvm/pblk-init.c b/drivers/lightnvm/pblk-init.c index 93d671ca518e..5b46924ac66c 100644 --- a/drivers/lightnvm/pblk-init.c +++ b/drivers/lightnvm/pblk-init.c @@ -1067,7 +1067,7 @@ static void *pblk_init(struct nvm_tgt_dev *dev, struct gendisk *tdisk, tqueue->limits.discard_granularity = geo->sec_per_chk * geo->sec_size; tqueue->limits.discard_alignment = 0; blk_queue_max_discard_sectors(tqueue, UINT_MAX >> 9); - queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, tqueue); + blk_queue_flag_set(QUEUE_FLAG_DISCARD, tqueue); pr_info("pblk(%s): luns:%u, lines:%d, secs:%llu, buf entries:%u\n", tdisk->disk_name, diff --git a/drivers/md/dm-table.c b/drivers/md/dm-table.c index 5fe7ec356c33..54c39ad4ef01 100644 --- a/drivers/md/dm-table.c +++ b/drivers/md/dm-table.c @@ -1861,7 +1861,7 @@ void dm_table_set_restrictions(struct dm_table *t, struct request_queue *q, q->limits = *limits; if (!dm_table_supports_discards(t)) { - queue_flag_clear_unlocked(QUEUE_FLAG_DISCARD, q); + blk_queue_flag_clear(QUEUE_FLAG_DISCARD, q); /* Must also clear discard limits... */ q->limits.max_discard_sectors = 0; q->limits.max_hw_discard_sectors = 0; @@ -1869,7 +1869,7 @@ void dm_table_set_restrictions(struct dm_table *t, struct request_queue *q, q->limits.discard_alignment = 0; q->limits.discard_misaligned = 0; } else - queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, q); + blk_queue_flag_set(QUEUE_FLAG_DISCARD, q); if (dm_table_supports_flush(t, (1UL << QUEUE_FLAG_WC))) { wc = true; @@ -1879,15 +1879,15 @@ void dm_table_set_restrictions(struct dm_table *t, struct request_queue *q, blk_queue_write_cache(q, wc, fua); if (dm_table_supports_dax(t)) - queue_flag_set_unlocked(QUEUE_FLAG_DAX, q); + blk_queue_flag_set(QUEUE_FLAG_DAX, q); if (dm_table_supports_dax_write_cache(t)) dax_write_cache(t->md->dax_dev, true); /* Ensure that all underlying devices are non-rotational. */ if (dm_table_all_devices_attribute(t, device_is_nonrot)) - queue_flag_set_unlocked(QUEUE_FLAG_NONROT, q); + blk_queue_flag_set(QUEUE_FLAG_NONROT, q); else - queue_flag_clear_unlocked(QUEUE_FLAG_NONROT, q); + blk_queue_flag_clear(QUEUE_FLAG_NONROT, q); if (!dm_table_supports_write_same(t)) q->limits.max_write_same_sectors = 0; @@ -1895,9 +1895,9 @@ void dm_table_set_restrictions(struct dm_table *t, struct request_queue *q, q->limits.max_write_zeroes_sectors = 0; if (dm_table_all_devices_attribute(t, queue_supports_sg_merge)) - queue_flag_clear_unlocked(QUEUE_FLAG_NO_SG_MERGE, q); + blk_queue_flag_clear(QUEUE_FLAG_NO_SG_MERGE, q); else - queue_flag_set_unlocked(QUEUE_FLAG_NO_SG_MERGE, q); + blk_queue_flag_set(QUEUE_FLAG_NO_SG_MERGE, q); dm_table_verify_integrity(t); @@ -1908,7 +1908,7 @@ void dm_table_set_restrictions(struct dm_table *t, struct request_queue *q, * have it set. */ if (blk_queue_add_random(q) && dm_table_all_devices_attribute(t, device_is_not_random)) - queue_flag_clear_unlocked(QUEUE_FLAG_ADD_RANDOM, q); + blk_queue_flag_clear(QUEUE_FLAG_ADD_RANDOM, q); } unsigned int dm_table_get_num_targets(struct dm_table *t) diff --git a/drivers/md/md-linear.c b/drivers/md/md-linear.c index 773fc70dced7..4964323d936b 100644 --- a/drivers/md/md-linear.c +++ b/drivers/md/md-linear.c @@ -138,9 +138,9 @@ static struct linear_conf *linear_conf(struct mddev *mddev, int raid_disks) } if (!discard_supported) - queue_flag_clear_unlocked(QUEUE_FLAG_DISCARD, mddev->queue); + blk_queue_flag_clear(QUEUE_FLAG_DISCARD, mddev->queue); else - queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, mddev->queue); + blk_queue_flag_set(QUEUE_FLAG_DISCARD, mddev->queue); /* * Here we calculate the device offsets. diff --git a/drivers/md/md.c b/drivers/md/md.c index eba7fa2f0abb..de2b26fba5d8 100644 --- a/drivers/md/md.c +++ b/drivers/md/md.c @@ -5608,9 +5608,9 @@ int md_run(struct mddev *mddev) if (mddev->degraded) nonrot = false; if (nonrot) - queue_flag_set_unlocked(QUEUE_FLAG_NONROT, mddev->queue); + blk_queue_flag_set(QUEUE_FLAG_NONROT, mddev->queue); else - queue_flag_clear_unlocked(QUEUE_FLAG_NONROT, mddev->queue); + blk_queue_flag_clear(QUEUE_FLAG_NONROT, mddev->queue); mddev->queue->backing_dev_info->congested_data = mddev; mddev->queue->backing_dev_info->congested_fn = md_congested; } diff --git a/drivers/md/raid0.c b/drivers/md/raid0.c index 5ecba9eef441..584c10347267 100644 --- a/drivers/md/raid0.c +++ b/drivers/md/raid0.c @@ -399,9 +399,9 @@ static int raid0_run(struct mddev *mddev) discard_supported = true; } if (!discard_supported) - queue_flag_clear_unlocked(QUEUE_FLAG_DISCARD, mddev->queue); + blk_queue_flag_clear(QUEUE_FLAG_DISCARD, mddev->queue); else - queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, mddev->queue); + blk_queue_flag_set(QUEUE_FLAG_DISCARD, mddev->queue); } /* calculate array device size */ diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c index b2eae332e1a2..f1635eb9e95a 100644 --- a/drivers/md/raid1.c +++ b/drivers/md/raid1.c @@ -1760,7 +1760,7 @@ static int raid1_add_disk(struct mddev *mddev, struct md_rdev *rdev) } } if (mddev->queue && blk_queue_discard(bdev_get_queue(rdev->bdev))) - queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, mddev->queue); + blk_queue_flag_set(QUEUE_FLAG_DISCARD, mddev->queue); print_conf(conf); return err; } @@ -3099,10 +3099,10 @@ static int raid1_run(struct mddev *mddev) if (mddev->queue) { if (discard_supported) - queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, + blk_queue_flag_set(QUEUE_FLAG_DISCARD, mddev->queue); else - queue_flag_clear_unlocked(QUEUE_FLAG_DISCARD, + blk_queue_flag_clear(QUEUE_FLAG_DISCARD, mddev->queue); } diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c index 99c9207899a7..e9c409c5f344 100644 --- a/drivers/md/raid10.c +++ b/drivers/md/raid10.c @@ -1845,7 +1845,7 @@ static int raid10_add_disk(struct mddev *mddev, struct md_rdev *rdev) break; } if (mddev->queue && blk_queue_discard(bdev_get_queue(rdev->bdev))) - queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, mddev->queue); + blk_queue_flag_set(QUEUE_FLAG_DISCARD, mddev->queue); print_conf(conf); return err; @@ -3844,10 +3844,10 @@ static int raid10_run(struct mddev *mddev) if (mddev->queue) { if (discard_supported) - queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, + blk_queue_flag_set(QUEUE_FLAG_DISCARD, mddev->queue); else - queue_flag_clear_unlocked(QUEUE_FLAG_DISCARD, + blk_queue_flag_clear(QUEUE_FLAG_DISCARD, mddev->queue); } /* need to check that every block has at least one working mirror */ diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c index 50d01144b805..14714b23a2fa 100644 --- a/drivers/md/raid5.c +++ b/drivers/md/raid5.c @@ -7444,10 +7444,10 @@ static int raid5_run(struct mddev *mddev) if (devices_handle_discard_safely && mddev->queue->limits.max_discard_sectors >= (stripe >> 9) && mddev->queue->limits.discard_granularity >= stripe) - queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, + blk_queue_flag_set(QUEUE_FLAG_DISCARD, mddev->queue); else - queue_flag_clear_unlocked(QUEUE_FLAG_DISCARD, + blk_queue_flag_clear(QUEUE_FLAG_DISCARD, mddev->queue); blk_queue_max_hw_sectors(mddev->queue, UINT_MAX); diff --git a/drivers/mmc/core/queue.c b/drivers/mmc/core/queue.c index 421fab7250ac..56e9a803db21 100644 --- a/drivers/mmc/core/queue.c +++ b/drivers/mmc/core/queue.c @@ -185,14 +185,14 @@ static void mmc_queue_setup_discard(struct request_queue *q, if (!max_discard) return; - queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, q); + blk_queue_flag_set(QUEUE_FLAG_DISCARD, q); blk_queue_max_discard_sectors(q, max_discard); q->limits.discard_granularity = card->pref_erase << 9; /* granularity must not be greater than max. discard */ if (card->pref_erase > max_discard) q->limits.discard_granularity = 0; if (mmc_can_secure_erase_trim(card)) - queue_flag_set_unlocked(QUEUE_FLAG_SECERASE, q); + blk_queue_flag_set(QUEUE_FLAG_SECERASE, q); } /** @@ -356,8 +356,8 @@ static void mmc_setup_queue(struct mmc_queue *mq, struct mmc_card *card) if (mmc_dev(host)->dma_mask && *mmc_dev(host)->dma_mask) limit = (u64)dma_max_pfn(mmc_dev(host)) << PAGE_SHIFT; - queue_flag_set_unlocked(QUEUE_FLAG_NONROT, mq->queue); - queue_flag_clear_unlocked(QUEUE_FLAG_ADD_RANDOM, mq->queue); + blk_queue_flag_set(QUEUE_FLAG_NONROT, mq->queue); + blk_queue_flag_clear(QUEUE_FLAG_ADD_RANDOM, mq->queue); if (mmc_can_erase(card)) mmc_queue_setup_discard(mq->queue, card); diff --git a/drivers/mtd/mtd_blkdevs.c b/drivers/mtd/mtd_blkdevs.c index 9ec8f033ac5f..16ae4ae8e8f9 100644 --- a/drivers/mtd/mtd_blkdevs.c +++ b/drivers/mtd/mtd_blkdevs.c @@ -419,11 +419,11 @@ int add_mtd_blktrans_dev(struct mtd_blktrans_dev *new) blk_queue_logical_block_size(new->rq, tr->blksize); blk_queue_bounce_limit(new->rq, BLK_BOUNCE_HIGH); - queue_flag_set_unlocked(QUEUE_FLAG_NONROT, new->rq); - queue_flag_clear_unlocked(QUEUE_FLAG_ADD_RANDOM, new->rq); + blk_queue_flag_set(QUEUE_FLAG_NONROT, new->rq); + blk_queue_flag_clear(QUEUE_FLAG_ADD_RANDOM, new->rq); if (tr->discard) { - queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, new->rq); + blk_queue_flag_set(QUEUE_FLAG_DISCARD, new->rq); blk_queue_max_discard_sectors(new->rq, UINT_MAX); } diff --git a/drivers/nvdimm/blk.c b/drivers/nvdimm/blk.c index 345acca576b3..7bde764f939a 100644 --- a/drivers/nvdimm/blk.c +++ b/drivers/nvdimm/blk.c @@ -266,7 +266,7 @@ static int nsblk_attach_disk(struct nd_namespace_blk *nsblk) blk_queue_make_request(q, nd_blk_make_request); blk_queue_max_hw_sectors(q, UINT_MAX); blk_queue_logical_block_size(q, nsblk_sector_size(nsblk)); - queue_flag_set_unlocked(QUEUE_FLAG_NONROT, q); + blk_queue_flag_set(QUEUE_FLAG_NONROT, q); q->queuedata = nsblk; disk = alloc_disk(0); diff --git a/drivers/nvdimm/btt.c b/drivers/nvdimm/btt.c index 2ef544f10ec8..6f311f88a8e8 100644 --- a/drivers/nvdimm/btt.c +++ b/drivers/nvdimm/btt.c @@ -1542,7 +1542,7 @@ static int btt_blk_init(struct btt *btt) blk_queue_make_request(btt->btt_queue, btt_make_request); blk_queue_logical_block_size(btt->btt_queue, btt->sector_size); blk_queue_max_hw_sectors(btt->btt_queue, UINT_MAX); - queue_flag_set_unlocked(QUEUE_FLAG_NONROT, btt->btt_queue); + blk_queue_flag_set(QUEUE_FLAG_NONROT, btt->btt_queue); btt->btt_queue->queuedata = btt; set_capacity(btt->btt_disk, 0); diff --git a/drivers/nvdimm/pmem.c b/drivers/nvdimm/pmem.c index cfb15ac50925..145db2ad712f 100644 --- a/drivers/nvdimm/pmem.c +++ b/drivers/nvdimm/pmem.c @@ -388,8 +388,8 @@ static int pmem_attach_disk(struct device *dev, blk_queue_physical_block_size(q, PAGE_SIZE); blk_queue_logical_block_size(q, pmem_sector_size(ndns)); blk_queue_max_hw_sectors(q, UINT_MAX); - queue_flag_set_unlocked(QUEUE_FLAG_NONROT, q); - queue_flag_set_unlocked(QUEUE_FLAG_DAX, q); + blk_queue_flag_set(QUEUE_FLAG_NONROT, q); + blk_queue_flag_set(QUEUE_FLAG_DAX, q); q->queuedata = pmem; disk = alloc_disk_node(0, nid); diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c index 817e5e2766da..72e241923e7d 100644 --- a/drivers/nvme/host/core.c +++ b/drivers/nvme/host/core.c @@ -1358,7 +1358,7 @@ static void nvme_config_discard(struct nvme_ctrl *ctrl, blk_queue_max_discard_sectors(queue, UINT_MAX); blk_queue_max_discard_segments(queue, NVME_DSM_MAX_RANGES); - queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, queue); + blk_queue_flag_set(QUEUE_FLAG_DISCARD, queue); if (ctrl->quirks & NVME_QUIRK_DEALLOCATE_ZEROES) blk_queue_max_write_zeroes_sectors(queue, UINT_MAX); @@ -2949,7 +2949,7 @@ static void nvme_alloc_ns(struct nvme_ctrl *ctrl, unsigned nsid) ns->queue = blk_mq_init_queue(ctrl->tagset); if (IS_ERR(ns->queue)) goto out_free_ns; - queue_flag_set_unlocked(QUEUE_FLAG_NONROT, ns->queue); + blk_queue_flag_set(QUEUE_FLAG_NONROT, ns->queue); ns->queue->queuedata = ns; ns->ctrl = ctrl; diff --git a/drivers/nvme/host/multipath.c b/drivers/nvme/host/multipath.c index 88440562a197..7283d7149baf 100644 --- a/drivers/nvme/host/multipath.c +++ b/drivers/nvme/host/multipath.c @@ -168,7 +168,7 @@ int nvme_mpath_alloc_disk(struct nvme_ctrl *ctrl, struct nvme_ns_head *head) q->queuedata = head; blk_queue_make_request(q, nvme_ns_head_make_request); q->poll_fn = nvme_ns_head_poll; - queue_flag_set_unlocked(QUEUE_FLAG_NONROT, q); + blk_queue_flag_set(QUEUE_FLAG_NONROT, q); /* set to a default value for 512 until disk is validated */ blk_queue_logical_block_size(q, 512); diff --git a/drivers/s390/block/dasd.c b/drivers/s390/block/dasd.c index a7c15f0085e2..7be803afcb43 100644 --- a/drivers/s390/block/dasd.c +++ b/drivers/s390/block/dasd.c @@ -3210,7 +3210,7 @@ static void dasd_setup_queue(struct dasd_block *block) } else { max = block->base->discipline->max_blocks << block->s2b_shift; } - queue_flag_set_unlocked(QUEUE_FLAG_NONROT, q); + blk_queue_flag_set(QUEUE_FLAG_NONROT, q); q->limits.max_dev_sectors = max; blk_queue_logical_block_size(q, logical_block_size); blk_queue_max_hw_sectors(q, max); @@ -3233,7 +3233,7 @@ static void dasd_setup_queue(struct dasd_block *block) blk_queue_max_discard_sectors(q, max_discard_sectors); blk_queue_max_write_zeroes_sectors(q, max_discard_sectors); - queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, q); + blk_queue_flag_set(QUEUE_FLAG_DISCARD, q); } } diff --git a/drivers/s390/block/dcssblk.c b/drivers/s390/block/dcssblk.c index 9cae08b36b80..0a312e450207 100644 --- a/drivers/s390/block/dcssblk.c +++ b/drivers/s390/block/dcssblk.c @@ -633,7 +633,7 @@ dcssblk_add_store(struct device *dev, struct device_attribute *attr, const char dev_info->gd->private_data = dev_info; blk_queue_make_request(dev_info->dcssblk_queue, dcssblk_make_request); blk_queue_logical_block_size(dev_info->dcssblk_queue, 4096); - queue_flag_set_unlocked(QUEUE_FLAG_DAX, dev_info->dcssblk_queue); + blk_queue_flag_set(QUEUE_FLAG_DAX, dev_info->dcssblk_queue); seg_byte_size = (dev_info->end - dev_info->start + 1); set_capacity(dev_info->gd, seg_byte_size >> 9); // size in sectors diff --git a/drivers/s390/block/scm_blk.c b/drivers/s390/block/scm_blk.c index b4130c7880d8..b1fcb76dd272 100644 --- a/drivers/s390/block/scm_blk.c +++ b/drivers/s390/block/scm_blk.c @@ -472,8 +472,8 @@ int scm_blk_dev_setup(struct scm_blk_dev *bdev, struct scm_device *scmdev) blk_queue_logical_block_size(rq, 1 << 12); blk_queue_max_hw_sectors(rq, nr_max_blk << 3); /* 8 * 512 = blk_size */ blk_queue_max_segments(rq, nr_max_blk); - queue_flag_set_unlocked(QUEUE_FLAG_NONROT, rq); - queue_flag_clear_unlocked(QUEUE_FLAG_ADD_RANDOM, rq); + blk_queue_flag_set(QUEUE_FLAG_NONROT, rq); + blk_queue_flag_clear(QUEUE_FLAG_ADD_RANDOM, rq); bdev->gendisk = alloc_disk(SCM_NR_PARTS); if (!bdev->gendisk) { diff --git a/drivers/s390/block/xpram.c b/drivers/s390/block/xpram.c index 2a6334ca750e..3df5d68d09f0 100644 --- a/drivers/s390/block/xpram.c +++ b/drivers/s390/block/xpram.c @@ -348,8 +348,8 @@ static int __init xpram_setup_blkdev(void) put_disk(xpram_disks[i]); goto out; } - queue_flag_set_unlocked(QUEUE_FLAG_NONROT, xpram_queues[i]); - queue_flag_clear_unlocked(QUEUE_FLAG_ADD_RANDOM, xpram_queues[i]); + blk_queue_flag_set(QUEUE_FLAG_NONROT, xpram_queues[i]); + blk_queue_flag_clear(QUEUE_FLAG_ADD_RANDOM, xpram_queues[i]); blk_queue_make_request(xpram_queues[i], xpram_make_request); blk_queue_logical_block_size(xpram_queues[i], 4096); } diff --git a/drivers/scsi/megaraid/megaraid_sas_base.c b/drivers/scsi/megaraid/megaraid_sas_base.c index a71ee67df084..dc234650014c 100644 --- a/drivers/scsi/megaraid/megaraid_sas_base.c +++ b/drivers/scsi/megaraid/megaraid_sas_base.c @@ -1864,7 +1864,7 @@ megasas_set_nvme_device_properties(struct scsi_device *sdev, u32 max_io_size) blk_queue_max_hw_sectors(sdev->request_queue, (max_io_size / 512)); - queue_flag_set_unlocked(QUEUE_FLAG_NOMERGES, sdev->request_queue); + blk_queue_flag_set(QUEUE_FLAG_NOMERGES, sdev->request_queue); blk_queue_virt_boundary(sdev->request_queue, mr_nvme_pg_size - 1); } diff --git a/drivers/scsi/megaraid/megaraid_sas_fusion.c b/drivers/scsi/megaraid/megaraid_sas_fusion.c index 073ced07e662..298019cf08a2 100644 --- a/drivers/scsi/megaraid/megaraid_sas_fusion.c +++ b/drivers/scsi/megaraid/megaraid_sas_fusion.c @@ -1908,7 +1908,7 @@ megasas_is_prp_possible(struct megasas_instance *instance, * then sending IOs with holes. * * Though driver can request block layer to disable IO merging by calling- - * queue_flag_set_unlocked(QUEUE_FLAG_NOMERGES, sdev->request_queue) but + * blk_queue_flag_set(QUEUE_FLAG_NOMERGES, sdev->request_queue) but * user may tune sysfs parameter- nomerges again to 0 or 1. * * If in future IO scheduling is enabled with SCSI BLK MQ, diff --git a/drivers/scsi/mpt3sas/mpt3sas_scsih.c b/drivers/scsi/mpt3sas/mpt3sas_scsih.c index 74fca184dba9..e3843828e59a 100644 --- a/drivers/scsi/mpt3sas/mpt3sas_scsih.c +++ b/drivers/scsi/mpt3sas/mpt3sas_scsih.c @@ -2352,7 +2352,7 @@ scsih_slave_configure(struct scsi_device *sdev) ** merged and can eliminate holes created during merging ** operation. **/ - queue_flag_set_unlocked(QUEUE_FLAG_NOMERGES, + blk_queue_flag_set(QUEUE_FLAG_NOMERGES, sdev->request_queue); blk_queue_virt_boundary(sdev->request_queue, ioc->page_size - 1); diff --git a/drivers/scsi/scsi_debug.c b/drivers/scsi/scsi_debug.c index a5986dae9020..1cb353f18d08 100644 --- a/drivers/scsi/scsi_debug.c +++ b/drivers/scsi/scsi_debug.c @@ -3897,7 +3897,7 @@ static int scsi_debug_slave_alloc(struct scsi_device *sdp) if (sdebug_verbose) pr_info("slave_alloc <%u %u %u %llu>\n", sdp->host->host_no, sdp->channel, sdp->id, sdp->lun); - queue_flag_set_unlocked(QUEUE_FLAG_BIDI, sdp->request_queue); + blk_queue_flag_set(QUEUE_FLAG_BIDI, sdp->request_queue); return 0; } diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c index 71d1135f94d0..538152f3528e 100644 --- a/drivers/scsi/scsi_lib.c +++ b/drivers/scsi/scsi_lib.c @@ -2140,7 +2140,7 @@ void __scsi_init_queue(struct Scsi_Host *shost, struct request_queue *q) { struct device *dev = shost->dma_dev; - queue_flag_set_unlocked(QUEUE_FLAG_SCSI_PASSTHROUGH, q); + blk_queue_flag_set(QUEUE_FLAG_SCSI_PASSTHROUGH, q); /* * this limit is imposed by hardware restrictions diff --git a/drivers/scsi/scsi_transport_sas.c b/drivers/scsi/scsi_transport_sas.c index 736a1f4f9676..7c0987616684 100644 --- a/drivers/scsi/scsi_transport_sas.c +++ b/drivers/scsi/scsi_transport_sas.c @@ -227,8 +227,8 @@ static int sas_bsg_initialize(struct Scsi_Host *shost, struct sas_rphy *rphy) * by default assume old behaviour and bounce for any highmem page */ blk_queue_bounce_limit(q, BLK_BOUNCE_HIGH); - queue_flag_set_unlocked(QUEUE_FLAG_BIDI, q); - queue_flag_set_unlocked(QUEUE_FLAG_SCSI_PASSTHROUGH, q); + blk_queue_flag_set(QUEUE_FLAG_BIDI, q); + blk_queue_flag_set(QUEUE_FLAG_SCSI_PASSTHROUGH, q); return 0; } diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c index bff21e636ddd..98de3207ac5d 100644 --- a/drivers/scsi/sd.c +++ b/drivers/scsi/sd.c @@ -714,7 +714,7 @@ static void sd_config_discard(struct scsi_disk *sdkp, unsigned int mode) case SD_LBP_FULL: case SD_LBP_DISABLE: blk_queue_max_discard_sectors(q, 0); - queue_flag_clear_unlocked(QUEUE_FLAG_DISCARD, q); + blk_queue_flag_clear(QUEUE_FLAG_DISCARD, q); return; case SD_LBP_UNMAP: @@ -747,7 +747,7 @@ static void sd_config_discard(struct scsi_disk *sdkp, unsigned int mode) } blk_queue_max_discard_sectors(q, max_blocks * (logical_block_size >> 9)); - queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, q); + blk_queue_flag_set(QUEUE_FLAG_DISCARD, q); } static int sd_setup_unmap_cmnd(struct scsi_cmnd *cmd) @@ -2952,8 +2952,8 @@ static void sd_read_block_characteristics(struct scsi_disk *sdkp) rot = get_unaligned_be16(&buffer[4]); if (rot == 1) { - queue_flag_set_unlocked(QUEUE_FLAG_NONROT, q); - queue_flag_clear_unlocked(QUEUE_FLAG_ADD_RANDOM, q); + blk_queue_flag_set(QUEUE_FLAG_NONROT, q); + blk_queue_flag_clear(QUEUE_FLAG_ADD_RANDOM, q); } if (sdkp->device->type == TYPE_ZBC) { -- cgit v1.2.3 From 804f3c6981f5e4a506a8f14dc284cb218d0659ae Mon Sep 17 00:00:00 2001 From: Coly Li Date: Sun, 18 Mar 2018 17:36:14 -0700 Subject: bcache: fix cached_dev->count usage for bch_cache_set_error() When bcache metadata I/O fails, bcache will call bch_cache_set_error() to retire the whole cache set. The expected behavior to retire a cache set is to unregister the cache set, and unregister all backing device attached to this cache set, then remove sysfs entries of the cache set and all attached backing devices, finally release memory of structs cache_set, cache, cached_dev and bcache_device. In my testing when journal I/O failure triggered by disconnected cache device, sometimes the cache set cannot be retired, and its sysfs entry /sys/fs/bcache/ still exits and the backing device also references it. This is not expected behavior. When metadata I/O failes, the call senquence to retire whole cache set is, bch_cache_set_error() bch_cache_set_unregister() bch_cache_set_stop() __cache_set_unregister() <- called as callback by calling clousre_queue(&c->caching) cache_set_flush() <- called as a callback when refcount of cache_set->caching is 0 cache_set_free() <- called as a callback when refcount of catch_set->cl is 0 bch_cache_set_release() <- called as a callback when refcount of catch_set->kobj is 0 I find if kernel thread bch_writeback_thread() quits while-loop when kthread_should_stop() is true and searched_full_index is false, clousre callback cache_set_flush() set by continue_at() will never be called. The result is, bcache fails to retire whole cache set. cache_set_flush() will be called when refcount of closure c->caching is 0, and in function bcache_device_detach() refcount of closure c->caching is released to 0 by clousre_put(). In metadata error code path, function bcache_device_detach() is called by cached_dev_detach_finish(). This is a callback routine being called when cached_dev->count is 0. This refcount is decreased by cached_dev_put(). The above dependence indicates, cache_set_flush() will be called when refcount of cache_set->cl is 0, and refcount of cache_set->cl to be 0 when refcount of cache_dev->count is 0. The reason why sometimes cache_dev->count is not 0 (when metadata I/O fails and bch_cache_set_error() called) is, in bch_writeback_thread(), refcount of cache_dev is not decreased properly. In bch_writeback_thread(), cached_dev_put() is called only when searched_full_index is true and cached_dev->writeback_keys is empty, a.k.a there is no dirty data on cache. In most of run time it is correct, but when bch_writeback_thread() quits the while-loop while cache is still dirty, current code forget to call cached_dev_put() before this kernel thread exits. This is why sometimes cache_set_flush() is not executed and cache set fails to be retired. The reason to call cached_dev_put() in bch_writeback_rate() is, when the cache device changes from clean to dirty, cached_dev_get() is called, to make sure during writeback operatiions both backing and cache devices won't be released. Adding following code in bch_writeback_thread() does not work, static int bch_writeback_thread(void *arg) } + if (atomic_read(&dc->has_dirty)) + cached_dev_put() + return 0; } because writeback kernel thread can be waken up and start via sysfs entry: echo 1 > /sys/block/bcache/bcache/writeback_running It is difficult to check whether backing device is dirty without race and extra lock. So the above modification will introduce potential refcount underflow in some conditions. The correct fix is, to take cached dev refcount when creating the kernel thread, and put it before the kernel thread exits. Then bcache does not need to take a cached dev refcount when cache turns from clean to dirty, or to put a cached dev refcount when cache turns from ditry to clean. The writeback kernel thread is alwasy safe to reference data structure from cache set, cache and cached device (because a refcount of cache device is taken for it already), and no matter the kernel thread is stopped by I/O errors or system reboot, cached_dev->count can always be used correctly. The patch is simple, but understanding how it works is quite complicated. Changelog: v2: set dc->writeback_thread to NULL in this patch, as suggested by Hannes. v1: initial version for review. Signed-off-by: Coly Li Reviewed-by: Hannes Reinecke Reviewed-by: Michael Lyle Cc: Michael Lyle Cc: Junhui Tang Signed-off-by: Jens Axboe --- drivers/md/bcache/super.c | 1 - drivers/md/bcache/writeback.c | 11 ++++++++--- drivers/md/bcache/writeback.h | 2 -- 3 files changed, 8 insertions(+), 6 deletions(-) (limited to 'drivers/md') diff --git a/drivers/md/bcache/super.c b/drivers/md/bcache/super.c index e8dfa804bd98..020be4f1cd8b 100644 --- a/drivers/md/bcache/super.c +++ b/drivers/md/bcache/super.c @@ -1054,7 +1054,6 @@ int bch_cached_dev_attach(struct cached_dev *dc, struct cache_set *c, if (BDEV_STATE(&dc->sb) == BDEV_STATE_DIRTY) { bch_sectors_dirty_init(&dc->disk); atomic_set(&dc->has_dirty, 1); - refcount_inc(&dc->count); bch_writeback_queue(dc); } diff --git a/drivers/md/bcache/writeback.c b/drivers/md/bcache/writeback.c index f1d2fc15abcc..b280c134dd4d 100644 --- a/drivers/md/bcache/writeback.c +++ b/drivers/md/bcache/writeback.c @@ -572,7 +572,7 @@ static int bch_writeback_thread(void *arg) if (kthread_should_stop()) { set_current_state(TASK_RUNNING); - return 0; + break; } schedule(); @@ -585,7 +585,6 @@ static int bch_writeback_thread(void *arg) if (searched_full_index && RB_EMPTY_ROOT(&dc->writeback_keys.keys)) { atomic_set(&dc->has_dirty, 0); - cached_dev_put(dc); SET_BDEV_STATE(&dc->sb, BDEV_STATE_CLEAN); bch_write_bdev_super(dc, NULL); } @@ -606,6 +605,9 @@ static int bch_writeback_thread(void *arg) } } + dc->writeback_thread = NULL; + cached_dev_put(dc); + return 0; } @@ -669,10 +671,13 @@ int bch_cached_dev_writeback_start(struct cached_dev *dc) if (!dc->writeback_write_wq) return -ENOMEM; + cached_dev_get(dc); dc->writeback_thread = kthread_create(bch_writeback_thread, dc, "bcache_writeback"); - if (IS_ERR(dc->writeback_thread)) + if (IS_ERR(dc->writeback_thread)) { + cached_dev_put(dc); return PTR_ERR(dc->writeback_thread); + } schedule_delayed_work(&dc->writeback_rate_update, dc->writeback_rate_update_seconds * HZ); diff --git a/drivers/md/bcache/writeback.h b/drivers/md/bcache/writeback.h index 587b25599856..0bba8f1c6cdf 100644 --- a/drivers/md/bcache/writeback.h +++ b/drivers/md/bcache/writeback.h @@ -105,8 +105,6 @@ static inline void bch_writeback_add(struct cached_dev *dc) { if (!atomic_read(&dc->has_dirty) && !atomic_xchg(&dc->has_dirty, 1)) { - refcount_inc(&dc->count); - if (BDEV_STATE(&dc->sb) != BDEV_STATE_DIRTY) { SET_BDEV_STATE(&dc->sb, BDEV_STATE_DIRTY); /* XXX: should do this synchronously */ -- cgit v1.2.3 From fadd94e05c02afec7b70b0b14915624f1782f578 Mon Sep 17 00:00:00 2001 From: Coly Li Date: Sun, 18 Mar 2018 17:36:15 -0700 Subject: bcache: quit dc->writeback_thread when BCACHE_DEV_DETACHING is set In patch "bcache: fix cached_dev->count usage for bch_cache_set_error()", cached_dev_get() is called when creating dc->writeback_thread, and cached_dev_put() is called when exiting dc->writeback_thread. This modification works well unless people detach the bcache device manually by 'echo 1 > /sys/block/bcache/bcache/detach' Because this sysfs interface only calls bch_cached_dev_detach() which wakes up dc->writeback_thread but does not stop it. The reason is, before patch "bcache: fix cached_dev->count usage for bch_cache_set_error()", inside bch_writeback_thread(), if cache is not dirty after writeback, cached_dev_put() will be called here. And in cached_dev_make_request() when a new write request makes cache from clean to dirty, cached_dev_get() will be called there. Since we don't operate dc->count in these locations, refcount d->count cannot be dropped after cache becomes clean, and cached_dev_detach_finish() won't be called to detach bcache device. This patch fixes the issue by checking whether BCACHE_DEV_DETACHING is set inside bch_writeback_thread(). If this bit is set and cache is clean (no existing writeback_keys), break the while-loop, call cached_dev_put() and quit the writeback thread. Please note if cache is still dirty, even BCACHE_DEV_DETACHING is set the writeback thread should continue to perform writeback, this is the original design of manually detach. It is safe to do the following check without locking, let me explain why, + if (!test_bit(BCACHE_DEV_DETACHING, &dc->disk.flags) && + (!atomic_read(&dc->has_dirty) || !dc->writeback_running)) { If the kenrel thread does not sleep and continue to run due to conditions are not updated in time on the running CPU core, it just consumes more CPU cycles and has no hurt. This should-sleep-but-run is safe here. We just focus on the should-run-but-sleep condition, which means the writeback thread goes to sleep in mistake while it should continue to run. 1, First of all, no matter the writeback thread is hung or not, kthread_stop() from cached_dev_detach_finish() will wake up it and terminate by making kthread_should_stop() return true. And in normal run time, bit on index BCACHE_DEV_DETACHING is always cleared, the condition !test_bit(BCACHE_DEV_DETACHING, &dc->disk.flags) is always true and can be ignored as constant value. 2, If one of the following conditions is true, the writeback thread should go to sleep, "!atomic_read(&dc->has_dirty)" or "!dc->writeback_running)" each of them independently controls the writeback thread should sleep or not, let's analyse them one by one. 2.1 condition "!atomic_read(&dc->has_dirty)" If dc->has_dirty is set from 0 to 1 on another CPU core, bcache will call bch_writeback_queue() immediately or call bch_writeback_add() which indirectly calls bch_writeback_queue() too. In bch_writeback_queue(), wake_up_process(dc->writeback_thread) is called. It sets writeback thread's task state to TASK_RUNNING and following an implicit memory barrier, then tries to wake up the writeback thread. In writeback thread, its task state is set to TASK_INTERRUPTIBLE before doing the condition check. If other CPU core sets the TASK_RUNNING state after writeback thread setting TASK_INTERRUPTIBLE, the writeback thread will be scheduled to run very soon because its state is not TASK_INTERRUPTIBLE. If other CPU core sets the TASK_RUNNING state before writeback thread setting TASK_INTERRUPTIBLE, the implict memory barrier of wake_up_process() will make sure modification of dc->has_dirty on other CPU core is updated and observed on the CPU core of writeback thread. Therefore the condition check will correctly be false, and continue writeback code without sleeping. 2.2 condition "!dc->writeback_running)" dc->writeback_running can be changed via sysfs file, every time it is modified, a following bch_writeback_queue() is alwasy called. So the change is always observed on the CPU core of writeback thread. If dc->writeback_running is changed from 0 to 1 on other CPU core, this condition check will observe the modification and allow writeback thread to continue to run without sleeping. Now we can see, even without a locking protection, multiple conditions check is safe here, no deadlock or process hang up will happen. I compose a separte patch because that patch "bcache: fix cached_dev->count usage for bch_cache_set_error()" already gets a "Reviewed-by:" from Hannes Reinecke. Also this fix is not trivial and good for a separate patch. Signed-off-by: Coly Li Reviewed-by: Michael Lyle Cc: Hannes Reinecke Cc: Huijun Tang Signed-off-by: Jens Axboe --- drivers/md/bcache/writeback.c | 20 +++++++++++++++++--- 1 file changed, 17 insertions(+), 3 deletions(-) (limited to 'drivers/md') diff --git a/drivers/md/bcache/writeback.c b/drivers/md/bcache/writeback.c index b280c134dd4d..4dbeaaa575bf 100644 --- a/drivers/md/bcache/writeback.c +++ b/drivers/md/bcache/writeback.c @@ -565,9 +565,15 @@ static int bch_writeback_thread(void *arg) while (!kthread_should_stop()) { down_write(&dc->writeback_lock); set_current_state(TASK_INTERRUPTIBLE); - if (!atomic_read(&dc->has_dirty) || - (!test_bit(BCACHE_DEV_DETACHING, &dc->disk.flags) && - !dc->writeback_running)) { + /* + * If the bache device is detaching, skip here and continue + * to perform writeback. Otherwise, if no dirty data on cache, + * or there is dirty data on cache but writeback is disabled, + * the writeback thread should sleep here and wait for others + * to wake up it. + */ + if (!test_bit(BCACHE_DEV_DETACHING, &dc->disk.flags) && + (!atomic_read(&dc->has_dirty) || !dc->writeback_running)) { up_write(&dc->writeback_lock); if (kthread_should_stop()) { @@ -587,6 +593,14 @@ static int bch_writeback_thread(void *arg) atomic_set(&dc->has_dirty, 0); SET_BDEV_STATE(&dc->sb, BDEV_STATE_CLEAN); bch_write_bdev_super(dc, NULL); + /* + * If bcache device is detaching via sysfs interface, + * writeback thread should stop after there is no dirty + * data on cache. BCACHE_DEV_DETACHING flag is set in + * bch_cached_dev_detach(). + */ + if (test_bit(BCACHE_DEV_DETACHING, &dc->disk.flags)) + break; } up_write(&dc->writeback_lock); -- cgit v1.2.3 From 3fd47bfe55b00d5ac7b0a44c9301c07be39b1082 Mon Sep 17 00:00:00 2001 From: Coly Li Date: Sun, 18 Mar 2018 17:36:16 -0700 Subject: bcache: stop dc->writeback_rate_update properly struct delayed_work writeback_rate_update in struct cache_dev is a delayed worker to call function update_writeback_rate() in period (the interval is defined by dc->writeback_rate_update_seconds). When a metadate I/O error happens on cache device, bcache error handling routine bch_cache_set_error() will call bch_cache_set_unregister() to retire whole cache set. On the unregister code path, this delayed work is stopped by calling cancel_delayed_work_sync(&dc->writeback_rate_update). dc->writeback_rate_update is a special delayed work from others in bcache. In its routine update_writeback_rate(), this delayed work is re-armed itself. That means when cancel_delayed_work_sync() returns, this delayed work can still be executed after several seconds defined by dc->writeback_rate_update_seconds. The problem is, after cancel_delayed_work_sync() returns, the cache set unregister code path will continue and release memory of struct cache set. Then the delayed work is scheduled to run, __update_writeback_rate() will reference the already released cache_set memory, and trigger a NULL pointer deference fault. This patch introduces two more bcache device flags, - BCACHE_DEV_WB_RUNNING bit set: bcache device is in writeback mode and running, it is OK for dc->writeback_rate_update to re-arm itself. bit clear:bcache device is trying to stop dc->writeback_rate_update, this delayed work should not re-arm itself and quit. - BCACHE_DEV_RATE_DW_RUNNING bit set: routine update_writeback_rate() is executing. bit clear: routine update_writeback_rate() quits. This patch also adds a function cancel_writeback_rate_update_dwork() to wait for dc->writeback_rate_update quits before cancel it by calling cancel_delayed_work_sync(). In order to avoid a deadlock by unexpected quit dc->writeback_rate_update, after time_out seconds this function will give up and continue to call cancel_delayed_work_sync(). And here I explain how this patch stops self re-armed delayed work properly with the above stuffs. update_writeback_rate() sets BCACHE_DEV_RATE_DW_RUNNING at its beginning and clears BCACHE_DEV_RATE_DW_RUNNING at its end. Before calling cancel_writeback_rate_update_dwork() clear flag BCACHE_DEV_WB_RUNNING. Before calling cancel_delayed_work_sync() wait utill flag BCACHE_DEV_RATE_DW_RUNNING is clear. So when calling cancel_delayed_work_sync(), dc->writeback_rate_update must be already re- armed, or quite by seeing BCACHE_DEV_WB_RUNNING cleared. In both cases delayed work routine update_writeback_rate() won't be executed after cancel_delayed_work_sync() returns. Inside update_writeback_rate() before calling schedule_delayed_work(), flag BCACHE_DEV_WB_RUNNING is checked before. If this flag is cleared, it means someone is about to stop the delayed work. Because flag BCACHE_DEV_RATE_DW_RUNNING is set already and cancel_delayed_work_sync() has to wait for this flag to be cleared, we don't need to worry about race condition here. If update_writeback_rate() is scheduled to run after checking BCACHE_DEV_RATE_DW_RUNNING and before calling cancel_delayed_work_sync() in cancel_writeback_rate_update_dwork(), it is also safe. Because at this moment BCACHE_DEV_WB_RUNNING is cleared with memory barrier. As I mentioned previously, update_writeback_rate() will see BCACHE_DEV_WB_RUNNING is clear and quit immediately. Because there are more dependences inside update_writeback_rate() to struct cache_set memory, dc->writeback_rate_update is not a simple self re-arm delayed work. After trying many different methods (e.g. hold dc->count, or use locks), this is the only way I can find which works to properly stop dc->writeback_rate_update delayed work. Changelog: v3: change values of BCACHE_DEV_WB_RUNNING and BCACHE_DEV_RATE_DW_RUNNING to bit index, for test_bit(). v2: Try to fix the race issue which is pointed out by Junhui. v1: The initial version for review Signed-off-by: Coly Li Reviewed-by: Junhui Tang Reviewed-by: Michael Lyle Cc: Michael Lyle Cc: Hannes Reinecke Signed-off-by: Jens Axboe --- drivers/md/bcache/bcache.h | 9 +++++---- drivers/md/bcache/super.c | 38 ++++++++++++++++++++++++++++++++++---- drivers/md/bcache/sysfs.c | 3 ++- drivers/md/bcache/writeback.c | 29 ++++++++++++++++++++++++++++- 4 files changed, 69 insertions(+), 10 deletions(-) (limited to 'drivers/md') diff --git a/drivers/md/bcache/bcache.h b/drivers/md/bcache/bcache.h index 12e5197f186c..b5ddb848cd31 100644 --- a/drivers/md/bcache/bcache.h +++ b/drivers/md/bcache/bcache.h @@ -258,10 +258,11 @@ struct bcache_device { struct gendisk *disk; unsigned long flags; -#define BCACHE_DEV_CLOSING 0 -#define BCACHE_DEV_DETACHING 1 -#define BCACHE_DEV_UNLINK_DONE 2 - +#define BCACHE_DEV_CLOSING 0 +#define BCACHE_DEV_DETACHING 1 +#define BCACHE_DEV_UNLINK_DONE 2 +#define BCACHE_DEV_WB_RUNNING 3 +#define BCACHE_DEV_RATE_DW_RUNNING 4 unsigned nr_stripes; unsigned stripe_size; atomic_t *stripe_sectors_dirty; diff --git a/drivers/md/bcache/super.c b/drivers/md/bcache/super.c index 020be4f1cd8b..e5be599338c5 100644 --- a/drivers/md/bcache/super.c +++ b/drivers/md/bcache/super.c @@ -899,6 +899,31 @@ void bch_cached_dev_run(struct cached_dev *dc) pr_debug("error creating sysfs link"); } +/* + * If BCACHE_DEV_RATE_DW_RUNNING is set, it means routine of the delayed + * work dc->writeback_rate_update is running. Wait until the routine + * quits (BCACHE_DEV_RATE_DW_RUNNING is clear), then continue to + * cancel it. If BCACHE_DEV_RATE_DW_RUNNING is not clear after time_out + * seconds, give up waiting here and continue to cancel it too. + */ +static void cancel_writeback_rate_update_dwork(struct cached_dev *dc) +{ + int time_out = WRITEBACK_RATE_UPDATE_SECS_MAX * HZ; + + do { + if (!test_bit(BCACHE_DEV_RATE_DW_RUNNING, + &dc->disk.flags)) + break; + time_out--; + schedule_timeout_interruptible(1); + } while (time_out > 0); + + if (time_out == 0) + pr_warn("give up waiting for dc->writeback_write_update to quit"); + + cancel_delayed_work_sync(&dc->writeback_rate_update); +} + static void cached_dev_detach_finish(struct work_struct *w) { struct cached_dev *dc = container_of(w, struct cached_dev, detach); @@ -911,7 +936,9 @@ static void cached_dev_detach_finish(struct work_struct *w) mutex_lock(&bch_register_lock); - cancel_delayed_work_sync(&dc->writeback_rate_update); + if (test_and_clear_bit(BCACHE_DEV_WB_RUNNING, &dc->disk.flags)) + cancel_writeback_rate_update_dwork(dc); + if (!IS_ERR_OR_NULL(dc->writeback_thread)) { kthread_stop(dc->writeback_thread); dc->writeback_thread = NULL; @@ -954,6 +981,7 @@ void bch_cached_dev_detach(struct cached_dev *dc) closure_get(&dc->disk.cl); bch_writeback_queue(dc); + cached_dev_put(dc); } @@ -1081,14 +1109,16 @@ static void cached_dev_free(struct closure *cl) { struct cached_dev *dc = container_of(cl, struct cached_dev, disk.cl); - cancel_delayed_work_sync(&dc->writeback_rate_update); + mutex_lock(&bch_register_lock); + + if (test_and_clear_bit(BCACHE_DEV_WB_RUNNING, &dc->disk.flags)) + cancel_writeback_rate_update_dwork(dc); + if (!IS_ERR_OR_NULL(dc->writeback_thread)) kthread_stop(dc->writeback_thread); if (dc->writeback_write_wq) destroy_workqueue(dc->writeback_write_wq); - mutex_lock(&bch_register_lock); - if (atomic_read(&dc->running)) bd_unlink_disk_holder(dc->bdev, dc->disk.disk); bcache_device_free(&dc->disk); diff --git a/drivers/md/bcache/sysfs.c b/drivers/md/bcache/sysfs.c index 78cd7bd50fdd..55673508628f 100644 --- a/drivers/md/bcache/sysfs.c +++ b/drivers/md/bcache/sysfs.c @@ -309,7 +309,8 @@ STORE(bch_cached_dev) bch_writeback_queue(dc); if (attr == &sysfs_writeback_percent) - schedule_delayed_work(&dc->writeback_rate_update, + if (!test_and_set_bit(BCACHE_DEV_WB_RUNNING, &dc->disk.flags)) + schedule_delayed_work(&dc->writeback_rate_update, dc->writeback_rate_update_seconds * HZ); mutex_unlock(&bch_register_lock); diff --git a/drivers/md/bcache/writeback.c b/drivers/md/bcache/writeback.c index 4dbeaaa575bf..8f98ef1038d3 100644 --- a/drivers/md/bcache/writeback.c +++ b/drivers/md/bcache/writeback.c @@ -115,6 +115,21 @@ static void update_writeback_rate(struct work_struct *work) struct cached_dev, writeback_rate_update); + /* + * should check BCACHE_DEV_RATE_DW_RUNNING before calling + * cancel_delayed_work_sync(). + */ + set_bit(BCACHE_DEV_RATE_DW_RUNNING, &dc->disk.flags); + /* paired with where BCACHE_DEV_RATE_DW_RUNNING is tested */ + smp_mb(); + + if (!test_bit(BCACHE_DEV_WB_RUNNING, &dc->disk.flags)) { + clear_bit(BCACHE_DEV_RATE_DW_RUNNING, &dc->disk.flags); + /* paired with where BCACHE_DEV_RATE_DW_RUNNING is tested */ + smp_mb(); + return; + } + down_read(&dc->writeback_lock); if (atomic_read(&dc->has_dirty) && @@ -123,8 +138,18 @@ static void update_writeback_rate(struct work_struct *work) up_read(&dc->writeback_lock); - schedule_delayed_work(&dc->writeback_rate_update, + if (test_bit(BCACHE_DEV_WB_RUNNING, &dc->disk.flags)) { + schedule_delayed_work(&dc->writeback_rate_update, dc->writeback_rate_update_seconds * HZ); + } + + /* + * should check BCACHE_DEV_RATE_DW_RUNNING before calling + * cancel_delayed_work_sync(). + */ + clear_bit(BCACHE_DEV_RATE_DW_RUNNING, &dc->disk.flags); + /* paired with where BCACHE_DEV_RATE_DW_RUNNING is tested */ + smp_mb(); } static unsigned writeback_delay(struct cached_dev *dc, unsigned sectors) @@ -675,6 +700,7 @@ void bch_cached_dev_writeback_init(struct cached_dev *dc) dc->writeback_rate_p_term_inverse = 40; dc->writeback_rate_i_term_inverse = 10000; + WARN_ON(test_and_clear_bit(BCACHE_DEV_WB_RUNNING, &dc->disk.flags)); INIT_DELAYED_WORK(&dc->writeback_rate_update, update_writeback_rate); } @@ -693,6 +719,7 @@ int bch_cached_dev_writeback_start(struct cached_dev *dc) return PTR_ERR(dc->writeback_thread); } + WARN_ON(test_and_set_bit(BCACHE_DEV_WB_RUNNING, &dc->disk.flags)); schedule_delayed_work(&dc->writeback_rate_update, dc->writeback_rate_update_seconds * HZ); -- cgit v1.2.3 From 771f393e8ffc9b3066e4830ee5f7391b8e8874f1 Mon Sep 17 00:00:00 2001 From: Coly Li Date: Sun, 18 Mar 2018 17:36:17 -0700 Subject: bcache: add CACHE_SET_IO_DISABLE to struct cache_set flags When too many I/Os failed on cache device, bch_cache_set_error() is called in the error handling code path to retire whole problematic cache set. If new I/O requests continue to come and take refcount dc->count, the cache set won't be retired immediately, this is a problem. Further more, there are several kernel thread and self-armed kernel work may still running after bch_cache_set_error() is called. It needs to wait quite a while for them to stop, or they won't stop at all. They also prevent the cache set from being retired. The solution in this patch is, to add per cache set flag to disable I/O request on this cache and all attached backing devices. Then new coming I/O requests can be rejected in *_make_request() before taking refcount, kernel threads and self-armed kernel worker can stop very fast when flags bit CACHE_SET_IO_DISABLE is set. Because bcache also do internal I/Os for writeback, garbage collection, bucket allocation, journaling, this kind of I/O should be disabled after bch_cache_set_error() is called. So closure_bio_submit() is modified to check whether CACHE_SET_IO_DISABLE is set on cache_set->flags. If set, closure_bio_submit() will set bio->bi_status to BLK_STS_IOERR and return, generic_make_request() won't be called. A sysfs interface is also added to set or clear CACHE_SET_IO_DISABLE bit from cache_set->flags, to disable or enable cache set I/O for debugging. It is helpful to trigger more corner case issues for failed cache device. Changelog v4, add wait_for_kthread_stop(), and call it before exits writeback and gc kernel threads. v3, change CACHE_SET_IO_DISABLE from 4 to 3, since it is bit index. remove "bcache: " prefix when printing out kernel message. v2, more changes by previous review, - Use CACHE_SET_IO_DISABLE of cache_set->flags, suggested by Junhui. - Check CACHE_SET_IO_DISABLE in bch_btree_gc() to stop a while-loop, this is reported and inspired from origal patch of Pavel Vazharov. v1, initial version. Signed-off-by: Coly Li Reviewed-by: Hannes Reinecke Reviewed-by: Michael Lyle Cc: Junhui Tang Cc: Michael Lyle Cc: Pavel Vazharov Signed-off-by: Jens Axboe --- drivers/md/bcache/alloc.c | 3 ++- drivers/md/bcache/bcache.h | 33 +++++++++++++++++++++++++++++++++ drivers/md/bcache/btree.c | 11 ++++++++--- drivers/md/bcache/io.c | 2 +- drivers/md/bcache/journal.c | 4 ++-- drivers/md/bcache/request.c | 26 +++++++++++++++++++------- drivers/md/bcache/super.c | 6 +++++- drivers/md/bcache/sysfs.c | 18 ++++++++++++++++++ drivers/md/bcache/util.h | 6 ------ drivers/md/bcache/writeback.c | 37 ++++++++++++++++++++++++++++--------- 10 files changed, 116 insertions(+), 30 deletions(-) (limited to 'drivers/md') diff --git a/drivers/md/bcache/alloc.c b/drivers/md/bcache/alloc.c index 458e1d38577d..004cc3cc6123 100644 --- a/drivers/md/bcache/alloc.c +++ b/drivers/md/bcache/alloc.c @@ -287,7 +287,8 @@ do { \ break; \ \ mutex_unlock(&(ca)->set->bucket_lock); \ - if (kthread_should_stop()) { \ + if (kthread_should_stop() || \ + test_bit(CACHE_SET_IO_DISABLE, &ca->set->flags)) { \ set_current_state(TASK_RUNNING); \ return 0; \ } \ diff --git a/drivers/md/bcache/bcache.h b/drivers/md/bcache/bcache.h index b5ddb848cd31..8a0327581d62 100644 --- a/drivers/md/bcache/bcache.h +++ b/drivers/md/bcache/bcache.h @@ -188,6 +188,7 @@ #include #include #include +#include #include "bset.h" #include "util.h" @@ -475,10 +476,15 @@ struct gc_stat { * * CACHE_SET_RUNNING means all cache devices have been registered and journal * replay is complete. + * + * CACHE_SET_IO_DISABLE is set when bcache is stopping the whold cache set, all + * external and internal I/O should be denied when this flag is set. + * */ #define CACHE_SET_UNREGISTERING 0 #define CACHE_SET_STOPPING 1 #define CACHE_SET_RUNNING 2 +#define CACHE_SET_IO_DISABLE 3 struct cache_set { struct closure cl; @@ -868,6 +874,33 @@ static inline void wake_up_allocators(struct cache_set *c) wake_up_process(ca->alloc_thread); } +static inline void closure_bio_submit(struct cache_set *c, + struct bio *bio, + struct closure *cl) +{ + closure_get(cl); + if (unlikely(test_bit(CACHE_SET_IO_DISABLE, &c->flags))) { + bio->bi_status = BLK_STS_IOERR; + bio_endio(bio); + return; + } + generic_make_request(bio); +} + +/* + * Prevent the kthread exits directly, and make sure when kthread_stop() + * is called to stop a kthread, it is still alive. If a kthread might be + * stopped by CACHE_SET_IO_DISABLE bit set, wait_for_kthread_stop() is + * necessary before the kthread returns. + */ +static inline void wait_for_kthread_stop(void) +{ + while (!kthread_should_stop()) { + set_current_state(TASK_INTERRUPTIBLE); + schedule(); + } +} + /* Forward declarations */ void bch_count_io_errors(struct cache *, blk_status_t, int, const char *); diff --git a/drivers/md/bcache/btree.c b/drivers/md/bcache/btree.c index fad9fe8817eb..39cc8a549091 100644 --- a/drivers/md/bcache/btree.c +++ b/drivers/md/bcache/btree.c @@ -1744,6 +1744,7 @@ static void bch_btree_gc(struct cache_set *c) btree_gc_start(c); + /* if CACHE_SET_IO_DISABLE set, gc thread should stop too */ do { ret = btree_root(gc_root, c, &op, &writes, &stats); closure_sync(&writes); @@ -1751,7 +1752,7 @@ static void bch_btree_gc(struct cache_set *c) if (ret && ret != -EAGAIN) pr_warn("gc failed!"); - } while (ret); + } while (ret && !test_bit(CACHE_SET_IO_DISABLE, &c->flags)); bch_btree_gc_finish(c); wake_up_allocators(c); @@ -1789,15 +1790,19 @@ static int bch_gc_thread(void *arg) while (1) { wait_event_interruptible(c->gc_wait, - kthread_should_stop() || gc_should_run(c)); + kthread_should_stop() || + test_bit(CACHE_SET_IO_DISABLE, &c->flags) || + gc_should_run(c)); - if (kthread_should_stop()) + if (kthread_should_stop() || + test_bit(CACHE_SET_IO_DISABLE, &c->flags)) break; set_gc_sectors(c); bch_btree_gc(c); } + wait_for_kthread_stop(); return 0; } diff --git a/drivers/md/bcache/io.c b/drivers/md/bcache/io.c index a783c5a41ff1..8013ecbcdbda 100644 --- a/drivers/md/bcache/io.c +++ b/drivers/md/bcache/io.c @@ -38,7 +38,7 @@ void __bch_submit_bbio(struct bio *bio, struct cache_set *c) bio_set_dev(bio, PTR_CACHE(c, &b->key, 0)->bdev); b->submit_time_us = local_clock_us(); - closure_bio_submit(bio, bio->bi_private); + closure_bio_submit(c, bio, bio->bi_private); } void bch_submit_bbio(struct bio *bio, struct cache_set *c, diff --git a/drivers/md/bcache/journal.c b/drivers/md/bcache/journal.c index 1b736b860739..c94085f400a4 100644 --- a/drivers/md/bcache/journal.c +++ b/drivers/md/bcache/journal.c @@ -62,7 +62,7 @@ reread: left = ca->sb.bucket_size - offset; bio_set_op_attrs(bio, REQ_OP_READ, 0); bch_bio_map(bio, data); - closure_bio_submit(bio, &cl); + closure_bio_submit(ca->set, bio, &cl); closure_sync(&cl); /* This function could be simpler now since we no longer write @@ -674,7 +674,7 @@ static void journal_write_unlocked(struct closure *cl) spin_unlock(&c->journal.lock); while ((bio = bio_list_pop(&list))) - closure_bio_submit(bio, cl); + closure_bio_submit(c, bio, cl); continue_at(cl, journal_write_done, NULL); } diff --git a/drivers/md/bcache/request.c b/drivers/md/bcache/request.c index 6422846b546e..7aca308bee5b 100644 --- a/drivers/md/bcache/request.c +++ b/drivers/md/bcache/request.c @@ -747,7 +747,7 @@ static void cached_dev_read_error(struct closure *cl) /* XXX: invalidate cache */ - closure_bio_submit(bio, cl); + closure_bio_submit(s->iop.c, bio, cl); } continue_at(cl, cached_dev_cache_miss_done, NULL); @@ -872,7 +872,7 @@ static int cached_dev_cache_miss(struct btree *b, struct search *s, s->cache_miss = miss; s->iop.bio = cache_bio; bio_get(cache_bio); - closure_bio_submit(cache_bio, &s->cl); + closure_bio_submit(s->iop.c, cache_bio, &s->cl); return ret; out_put: @@ -880,7 +880,7 @@ out_put: out_submit: miss->bi_end_io = request_endio; miss->bi_private = &s->cl; - closure_bio_submit(miss, &s->cl); + closure_bio_submit(s->iop.c, miss, &s->cl); return ret; } @@ -945,7 +945,7 @@ static void cached_dev_write(struct cached_dev *dc, struct search *s) if ((bio_op(bio) != REQ_OP_DISCARD) || blk_queue_discard(bdev_get_queue(dc->bdev))) - closure_bio_submit(bio, cl); + closure_bio_submit(s->iop.c, bio, cl); } else if (s->iop.writeback) { bch_writeback_add(dc); s->iop.bio = bio; @@ -960,12 +960,12 @@ static void cached_dev_write(struct cached_dev *dc, struct search *s) flush->bi_private = cl; flush->bi_opf = REQ_OP_WRITE | REQ_PREFLUSH; - closure_bio_submit(flush, cl); + closure_bio_submit(s->iop.c, flush, cl); } } else { s->iop.bio = bio_clone_fast(bio, GFP_NOIO, dc->disk.bio_split); - closure_bio_submit(bio, cl); + closure_bio_submit(s->iop.c, bio, cl); } closure_call(&s->iop.cl, bch_data_insert, NULL, cl); @@ -981,7 +981,7 @@ static void cached_dev_nodata(struct closure *cl) bch_journal_meta(s->iop.c, cl); /* If it's a flush, we send the flush to the backing device too */ - closure_bio_submit(bio, cl); + closure_bio_submit(s->iop.c, bio, cl); continue_at(cl, cached_dev_bio_complete, NULL); } @@ -996,6 +996,12 @@ static blk_qc_t cached_dev_make_request(struct request_queue *q, struct cached_dev *dc = container_of(d, struct cached_dev, disk); int rw = bio_data_dir(bio); + if (unlikely(d->c && test_bit(CACHE_SET_IO_DISABLE, &d->c->flags))) { + bio->bi_status = BLK_STS_IOERR; + bio_endio(bio); + return BLK_QC_T_NONE; + } + atomic_set(&dc->backing_idle, 0); generic_start_io_acct(q, rw, bio_sectors(bio), &d->disk->part0); @@ -1112,6 +1118,12 @@ static blk_qc_t flash_dev_make_request(struct request_queue *q, struct bcache_device *d = bio->bi_disk->private_data; int rw = bio_data_dir(bio); + if (unlikely(d->c && test_bit(CACHE_SET_IO_DISABLE, &d->c->flags))) { + bio->bi_status = BLK_STS_IOERR; + bio_endio(bio); + return BLK_QC_T_NONE; + } + generic_start_io_acct(q, rw, bio_sectors(bio), &d->disk->part0); s = search_alloc(bio, d); diff --git a/drivers/md/bcache/super.c b/drivers/md/bcache/super.c index e5be599338c5..dda4ccdd1360 100644 --- a/drivers/md/bcache/super.c +++ b/drivers/md/bcache/super.c @@ -521,7 +521,7 @@ static void prio_io(struct cache *ca, uint64_t bucket, int op, bio_set_op_attrs(bio, op, REQ_SYNC|REQ_META|op_flags); bch_bio_map(bio, ca->disk_buckets); - closure_bio_submit(bio, &ca->prio); + closure_bio_submit(ca->set, bio, &ca->prio); closure_sync(cl); } @@ -1350,6 +1350,9 @@ bool bch_cache_set_error(struct cache_set *c, const char *fmt, ...) test_bit(CACHE_SET_STOPPING, &c->flags)) return false; + if (test_and_set_bit(CACHE_SET_IO_DISABLE, &c->flags)) + pr_warn("CACHE_SET_IO_DISABLE already set"); + /* XXX: we can be called from atomic context acquire_console_sem(); */ @@ -1585,6 +1588,7 @@ struct cache_set *bch_cache_set_alloc(struct cache_sb *sb) c->congested_read_threshold_us = 2000; c->congested_write_threshold_us = 20000; c->error_limit = DEFAULT_IO_ERROR_LIMIT; + WARN_ON(test_and_clear_bit(CACHE_SET_IO_DISABLE, &c->flags)); return c; err: diff --git a/drivers/md/bcache/sysfs.c b/drivers/md/bcache/sysfs.c index 55673508628f..a3a45de5626d 100644 --- a/drivers/md/bcache/sysfs.c +++ b/drivers/md/bcache/sysfs.c @@ -95,6 +95,7 @@ read_attribute(partial_stripes_expensive); rw_attribute(synchronous); rw_attribute(journal_delay_ms); +rw_attribute(io_disable); rw_attribute(discard); rw_attribute(running); rw_attribute(label); @@ -591,6 +592,8 @@ SHOW(__bch_cache_set) sysfs_printf(gc_always_rewrite, "%i", c->gc_always_rewrite); sysfs_printf(btree_shrinker_disabled, "%i", c->shrinker_disabled); sysfs_printf(copy_gc_enabled, "%i", c->copy_gc_enabled); + sysfs_printf(io_disable, "%i", + test_bit(CACHE_SET_IO_DISABLE, &c->flags)); if (attr == &sysfs_bset_tree_stats) return bch_bset_print_stats(c, buf); @@ -680,6 +683,20 @@ STORE(__bch_cache_set) if (attr == &sysfs_io_error_halflife) c->error_decay = strtoul_or_return(buf) / 88; + if (attr == &sysfs_io_disable) { + int v = strtoul_or_return(buf); + + if (v) { + if (test_and_set_bit(CACHE_SET_IO_DISABLE, + &c->flags)) + pr_warn("CACHE_SET_IO_DISABLE already set"); + } else { + if (!test_and_clear_bit(CACHE_SET_IO_DISABLE, + &c->flags)) + pr_warn("CACHE_SET_IO_DISABLE already cleared"); + } + } + sysfs_strtoul(journal_delay_ms, c->journal_delay_ms); sysfs_strtoul(verify, c->verify); sysfs_strtoul(key_merging_disabled, c->key_merging_disabled); @@ -765,6 +782,7 @@ static struct attribute *bch_cache_set_internal_files[] = { &sysfs_gc_always_rewrite, &sysfs_btree_shrinker_disabled, &sysfs_copy_gc_enabled, + &sysfs_io_disable, NULL }; KTYPE(bch_cache_set_internal); diff --git a/drivers/md/bcache/util.h b/drivers/md/bcache/util.h index a6763db7f061..268024529edd 100644 --- a/drivers/md/bcache/util.h +++ b/drivers/md/bcache/util.h @@ -567,12 +567,6 @@ static inline sector_t bdev_sectors(struct block_device *bdev) return bdev->bd_inode->i_size >> 9; } -#define closure_bio_submit(bio, cl) \ -do { \ - closure_get(cl); \ - generic_make_request(bio); \ -} while (0) - uint64_t bch_crc64_update(uint64_t, const void *, size_t); uint64_t bch_crc64(const void *, size_t); diff --git a/drivers/md/bcache/writeback.c b/drivers/md/bcache/writeback.c index 8f98ef1038d3..70092ada68e6 100644 --- a/drivers/md/bcache/writeback.c +++ b/drivers/md/bcache/writeback.c @@ -114,6 +114,7 @@ static void update_writeback_rate(struct work_struct *work) struct cached_dev *dc = container_of(to_delayed_work(work), struct cached_dev, writeback_rate_update); + struct cache_set *c = dc->disk.c; /* * should check BCACHE_DEV_RATE_DW_RUNNING before calling @@ -123,7 +124,12 @@ static void update_writeback_rate(struct work_struct *work) /* paired with where BCACHE_DEV_RATE_DW_RUNNING is tested */ smp_mb(); - if (!test_bit(BCACHE_DEV_WB_RUNNING, &dc->disk.flags)) { + /* + * CACHE_SET_IO_DISABLE might be set via sysfs interface, + * check it here too. + */ + if (!test_bit(BCACHE_DEV_WB_RUNNING, &dc->disk.flags) || + test_bit(CACHE_SET_IO_DISABLE, &c->flags)) { clear_bit(BCACHE_DEV_RATE_DW_RUNNING, &dc->disk.flags); /* paired with where BCACHE_DEV_RATE_DW_RUNNING is tested */ smp_mb(); @@ -138,7 +144,12 @@ static void update_writeback_rate(struct work_struct *work) up_read(&dc->writeback_lock); - if (test_bit(BCACHE_DEV_WB_RUNNING, &dc->disk.flags)) { + /* + * CACHE_SET_IO_DISABLE might be set via sysfs interface, + * check it here too. + */ + if (test_bit(BCACHE_DEV_WB_RUNNING, &dc->disk.flags) && + !test_bit(CACHE_SET_IO_DISABLE, &c->flags)) { schedule_delayed_work(&dc->writeback_rate_update, dc->writeback_rate_update_seconds * HZ); } @@ -278,7 +289,7 @@ static void write_dirty(struct closure *cl) bio_set_dev(&io->bio, io->dc->bdev); io->bio.bi_end_io = dirty_endio; - closure_bio_submit(&io->bio, cl); + closure_bio_submit(io->dc->disk.c, &io->bio, cl); } atomic_set(&dc->writeback_sequence_next, next_sequence); @@ -304,7 +315,7 @@ static void read_dirty_submit(struct closure *cl) { struct dirty_io *io = container_of(cl, struct dirty_io, cl); - closure_bio_submit(&io->bio, cl); + closure_bio_submit(io->dc->disk.c, &io->bio, cl); continue_at(cl, write_dirty, io->dc->writeback_write_wq); } @@ -330,7 +341,9 @@ static void read_dirty(struct cached_dev *dc) next = bch_keybuf_next(&dc->writeback_keys); - while (!kthread_should_stop() && next) { + while (!kthread_should_stop() && + !test_bit(CACHE_SET_IO_DISABLE, &dc->disk.c->flags) && + next) { size = 0; nk = 0; @@ -427,7 +440,9 @@ static void read_dirty(struct cached_dev *dc) } } - while (!kthread_should_stop() && delay) { + while (!kthread_should_stop() && + !test_bit(CACHE_SET_IO_DISABLE, &dc->disk.c->flags) && + delay) { schedule_timeout_interruptible(delay); delay = writeback_delay(dc, 0); } @@ -583,11 +598,13 @@ static bool refill_dirty(struct cached_dev *dc) static int bch_writeback_thread(void *arg) { struct cached_dev *dc = arg; + struct cache_set *c = dc->disk.c; bool searched_full_index; bch_ratelimit_reset(&dc->writeback_rate); - while (!kthread_should_stop()) { + while (!kthread_should_stop() && + !test_bit(CACHE_SET_IO_DISABLE, &c->flags)) { down_write(&dc->writeback_lock); set_current_state(TASK_INTERRUPTIBLE); /* @@ -601,7 +618,8 @@ static int bch_writeback_thread(void *arg) (!atomic_read(&dc->has_dirty) || !dc->writeback_running)) { up_write(&dc->writeback_lock); - if (kthread_should_stop()) { + if (kthread_should_stop() || + test_bit(CACHE_SET_IO_DISABLE, &c->flags)) { set_current_state(TASK_RUNNING); break; } @@ -637,6 +655,7 @@ static int bch_writeback_thread(void *arg) while (delay && !kthread_should_stop() && + !test_bit(CACHE_SET_IO_DISABLE, &c->flags) && !test_bit(BCACHE_DEV_DETACHING, &dc->disk.flags)) delay = schedule_timeout_interruptible(delay); @@ -644,8 +663,8 @@ static int bch_writeback_thread(void *arg) } } - dc->writeback_thread = NULL; cached_dev_put(dc); + wait_for_kthread_stop(); return 0; } -- cgit v1.2.3 From 7e027ca4b534b6b99a7c0471e13ba075ffa3f482 Mon Sep 17 00:00:00 2001 From: Coly Li Date: Sun, 18 Mar 2018 17:36:18 -0700 Subject: bcache: add stop_when_cache_set_failed option to backing device When there are too many I/O errors on cache device, current bcache code will retire the whole cache set, and detach all bcache devices. But the detached bcache devices are not stopped, which is problematic when bcache is in writeback mode. If the retired cache set has dirty data of backing devices, continue writing to bcache device will write to backing device directly. If the LBA of write request has a dirty version cached on cache device, next time when the cache device is re-registered and backing device re-attached to it again, the stale dirty data on cache device will be written to backing device, and overwrite latest directly written data. This situation causes a quite data corruption. But we cannot simply stop all attached bcache devices when the cache set is broken or disconnected. For example, use bcache to accelerate performance of an email service. In such workload, if cache device is broken but no dirty data lost, keep the bcache device alive and permit email service continue to access user data might be a better solution for the cache device failure. Nix points out the issue and provides the above example to explain why it might be necessary to not stop bcache device for broken cache device. Pavel Goran provides a brilliant suggestion to provide "always" and "auto" options to per-cached device sysfs file stop_when_cache_set_failed. If cache set is retiring and the backing device has no dirty data on cache, it should be safe to keep the bcache device alive. In this case, if stop_when_cache_set_failed is set to "auto", the device failure handling code will not stop this bcache device and permit application to access the backing device with a unattached bcache device. Changelog: [mlyle: edited to not break string constants across lines] v3: fix typos pointed out by Nix. v2: change option values of stop_when_cache_set_failed from 1/0 to "auto"/"always". v1: initial version, stop_when_cache_set_failed can be 0 (not stop) or 1 (always stop). Signed-off-by: Coly Li Reviewed-by: Michael Lyle Signed-off-by: Michael Lyle Cc: Nix Cc: Pavel Goran Cc: Junhui Tang Cc: Hannes Reinecke Signed-off-by: Jens Axboe --- drivers/md/bcache/bcache.h | 9 ++++++ drivers/md/bcache/super.c | 78 ++++++++++++++++++++++++++++++++++++++++------ drivers/md/bcache/sysfs.c | 17 ++++++++++ 3 files changed, 94 insertions(+), 10 deletions(-) (limited to 'drivers/md') diff --git a/drivers/md/bcache/bcache.h b/drivers/md/bcache/bcache.h index 8a0327581d62..5e9f3610c6fd 100644 --- a/drivers/md/bcache/bcache.h +++ b/drivers/md/bcache/bcache.h @@ -288,6 +288,12 @@ struct io { sector_t last; }; +enum stop_on_failure { + BCH_CACHED_DEV_STOP_AUTO = 0, + BCH_CACHED_DEV_STOP_ALWAYS, + BCH_CACHED_DEV_STOP_MODE_MAX, +}; + struct cached_dev { struct list_head list; struct bcache_device disk; @@ -380,6 +386,8 @@ struct cached_dev { unsigned writeback_rate_i_term_inverse; unsigned writeback_rate_p_term_inverse; unsigned writeback_rate_minimum; + + enum stop_on_failure stop_when_cache_set_failed; }; enum alloc_reserve { @@ -939,6 +947,7 @@ void bch_write_bdev_super(struct cached_dev *, struct closure *); extern struct workqueue_struct *bcache_wq; extern const char * const bch_cache_modes[]; +extern const char * const bch_stop_on_failure_modes[]; extern struct mutex bch_register_lock; extern struct list_head bch_cache_sets; diff --git a/drivers/md/bcache/super.c b/drivers/md/bcache/super.c index dda4ccdd1360..7b45160e9a22 100644 --- a/drivers/md/bcache/super.c +++ b/drivers/md/bcache/super.c @@ -47,6 +47,14 @@ const char * const bch_cache_modes[] = { NULL }; +/* Default is -1; we skip past it for stop_when_cache_set_failed */ +const char * const bch_stop_on_failure_modes[] = { + "default", + "auto", + "always", + NULL +}; + static struct kobject *bcache_kobj; struct mutex bch_register_lock; LIST_HEAD(bch_cache_sets); @@ -1188,6 +1196,9 @@ static int cached_dev_init(struct cached_dev *dc, unsigned block_size) max(dc->disk.disk->queue->backing_dev_info->ra_pages, q->backing_dev_info->ra_pages); + /* default to auto */ + dc->stop_when_cache_set_failed = BCH_CACHED_DEV_STOP_AUTO; + bch_cached_dev_request_init(dc); bch_cached_dev_writeback_init(dc); return 0; @@ -1464,25 +1475,72 @@ static void cache_set_flush(struct closure *cl) closure_return(cl); } +/* + * This function is only called when CACHE_SET_IO_DISABLE is set, which means + * cache set is unregistering due to too many I/O errors. In this condition, + * the bcache device might be stopped, it depends on stop_when_cache_set_failed + * value and whether the broken cache has dirty data: + * + * dc->stop_when_cache_set_failed dc->has_dirty stop bcache device + * BCH_CACHED_STOP_AUTO 0 NO + * BCH_CACHED_STOP_AUTO 1 YES + * BCH_CACHED_DEV_STOP_ALWAYS 0 YES + * BCH_CACHED_DEV_STOP_ALWAYS 1 YES + * + * The expected behavior is, if stop_when_cache_set_failed is configured to + * "auto" via sysfs interface, the bcache device will not be stopped if the + * backing device is clean on the broken cache device. + */ +static void conditional_stop_bcache_device(struct cache_set *c, + struct bcache_device *d, + struct cached_dev *dc) +{ + if (dc->stop_when_cache_set_failed == BCH_CACHED_DEV_STOP_ALWAYS) { + pr_warn("stop_when_cache_set_failed of %s is \"always\", stop it for failed cache set %pU.", + d->disk->disk_name, c->sb.set_uuid); + bcache_device_stop(d); + } else if (atomic_read(&dc->has_dirty)) { + /* + * dc->stop_when_cache_set_failed == BCH_CACHED_STOP_AUTO + * and dc->has_dirty == 1 + */ + pr_warn("stop_when_cache_set_failed of %s is \"auto\" and cache is dirty, stop it to avoid potential data corruption.", + d->disk->disk_name); + bcache_device_stop(d); + } else { + /* + * dc->stop_when_cache_set_failed == BCH_CACHED_STOP_AUTO + * and dc->has_dirty == 0 + */ + pr_warn("stop_when_cache_set_failed of %s is \"auto\" and cache is clean, keep it alive.", + d->disk->disk_name); + } +} + static void __cache_set_unregister(struct closure *cl) { struct cache_set *c = container_of(cl, struct cache_set, caching); struct cached_dev *dc; + struct bcache_device *d; size_t i; mutex_lock(&bch_register_lock); - for (i = 0; i < c->devices_max_used; i++) - if (c->devices[i]) { - if (!UUID_FLASH_ONLY(&c->uuids[i]) && - test_bit(CACHE_SET_UNREGISTERING, &c->flags)) { - dc = container_of(c->devices[i], - struct cached_dev, disk); - bch_cached_dev_detach(dc); - } else { - bcache_device_stop(c->devices[i]); - } + for (i = 0; i < c->devices_max_used; i++) { + d = c->devices[i]; + if (!d) + continue; + + if (!UUID_FLASH_ONLY(&c->uuids[i]) && + test_bit(CACHE_SET_UNREGISTERING, &c->flags)) { + dc = container_of(d, struct cached_dev, disk); + bch_cached_dev_detach(dc); + if (test_bit(CACHE_SET_IO_DISABLE, &c->flags)) + conditional_stop_bcache_device(c, d, dc); + } else { + bcache_device_stop(d); } + } mutex_unlock(&bch_register_lock); diff --git a/drivers/md/bcache/sysfs.c b/drivers/md/bcache/sysfs.c index a3a45de5626d..414129f7c49f 100644 --- a/drivers/md/bcache/sysfs.c +++ b/drivers/md/bcache/sysfs.c @@ -78,6 +78,7 @@ rw_attribute(congested_write_threshold_us); rw_attribute(sequential_cutoff); rw_attribute(data_csum); rw_attribute(cache_mode); +rw_attribute(stop_when_cache_set_failed); rw_attribute(writeback_metadata); rw_attribute(writeback_running); rw_attribute(writeback_percent); @@ -126,6 +127,12 @@ SHOW(__bch_cached_dev) bch_cache_modes + 1, BDEV_CACHE_MODE(&dc->sb)); + if (attr == &sysfs_stop_when_cache_set_failed) + return bch_snprint_string_list(buf, PAGE_SIZE, + bch_stop_on_failure_modes + 1, + dc->stop_when_cache_set_failed); + + sysfs_printf(data_csum, "%i", dc->disk.data_csum); var_printf(verify, "%i"); var_printf(bypass_torture_test, "%i"); @@ -247,6 +254,15 @@ STORE(__cached_dev) } } + if (attr == &sysfs_stop_when_cache_set_failed) { + v = bch_read_string_list(buf, bch_stop_on_failure_modes + 1); + + if (v < 0) + return v; + + dc->stop_when_cache_set_failed = v; + } + if (attr == &sysfs_label) { if (size > SB_LABEL_SIZE) return -EINVAL; @@ -326,6 +342,7 @@ static struct attribute *bch_cached_dev_files[] = { &sysfs_data_csum, #endif &sysfs_cache_mode, + &sysfs_stop_when_cache_set_failed, &sysfs_writeback_metadata, &sysfs_writeback_running, &sysfs_writeback_delay, -- cgit v1.2.3 From bc082a55d25c837341709accaf11311c3a9af727 Mon Sep 17 00:00:00 2001 From: Tang Junhui Date: Sun, 18 Mar 2018 17:36:19 -0700 Subject: bcache: fix inaccurate io state for detached bcache devices When we run IO in a detached device, and run iostat to shows IO status, normally it will show like bellow (Omitted some fields): Device: ... avgrq-sz avgqu-sz await r_await w_await svctm %util sdd ... 15.89 0.53 1.82 0.20 2.23 1.81 52.30 bcache0 ... 15.89 115.42 0.00 0.00 0.00 2.40 69.60 but after IO stopped, there are still very big avgqu-sz and %util values as bellow: Device: ... avgrq-sz avgqu-sz await r_await w_await svctm %util bcache0 ... 0 5326.32 0.00 0.00 0.00 0.00 100.10 The reason for this issue is that, only generic_start_io_acct() called and no generic_end_io_acct() called for detached device in cached_dev_make_request(). See the code: //start generic_start_io_acct() generic_start_io_acct(q, rw, bio_sectors(bio), &d->disk->part0); if (cached_dev_get(dc)) { //will callback generic_end_io_acct() } else { //will not call generic_end_io_acct() } This patch calls generic_end_io_acct() in the end of IO for detached devices, so we can show IO state correctly. (Modified to use GFP_NOIO in kzalloc() by Coly Li) Changelog: v2: fix typo. v1: the initial version. Signed-off-by: Tang Junhui Reviewed-by: Coly Li Reviewed-by: Hannes Reinecke Reviewed-by: Michael Lyle Signed-off-by: Jens Axboe --- drivers/md/bcache/request.c | 58 +++++++++++++++++++++++++++++++++++++++------ 1 file changed, 51 insertions(+), 7 deletions(-) (limited to 'drivers/md') diff --git a/drivers/md/bcache/request.c b/drivers/md/bcache/request.c index 7aca308bee5b..5c8ae69c8502 100644 --- a/drivers/md/bcache/request.c +++ b/drivers/md/bcache/request.c @@ -986,6 +986,55 @@ static void cached_dev_nodata(struct closure *cl) continue_at(cl, cached_dev_bio_complete, NULL); } +struct detached_dev_io_private { + struct bcache_device *d; + unsigned long start_time; + bio_end_io_t *bi_end_io; + void *bi_private; +}; + +static void detached_dev_end_io(struct bio *bio) +{ + struct detached_dev_io_private *ddip; + + ddip = bio->bi_private; + bio->bi_end_io = ddip->bi_end_io; + bio->bi_private = ddip->bi_private; + + generic_end_io_acct(ddip->d->disk->queue, + bio_data_dir(bio), + &ddip->d->disk->part0, ddip->start_time); + + kfree(ddip); + + bio->bi_end_io(bio); +} + +static void detached_dev_do_request(struct bcache_device *d, struct bio *bio) +{ + struct detached_dev_io_private *ddip; + struct cached_dev *dc = container_of(d, struct cached_dev, disk); + + /* + * no need to call closure_get(&dc->disk.cl), + * because upper layer had already opened bcache device, + * which would call closure_get(&dc->disk.cl) + */ + ddip = kzalloc(sizeof(struct detached_dev_io_private), GFP_NOIO); + ddip->d = d; + ddip->start_time = jiffies; + ddip->bi_end_io = bio->bi_end_io; + ddip->bi_private = bio->bi_private; + bio->bi_end_io = detached_dev_end_io; + bio->bi_private = ddip; + + if ((bio_op(bio) == REQ_OP_DISCARD) && + !blk_queue_discard(bdev_get_queue(dc->bdev))) + bio->bi_end_io(bio); + else + generic_make_request(bio); +} + /* Cached devices - read & write stuff */ static blk_qc_t cached_dev_make_request(struct request_queue *q, @@ -1028,13 +1077,8 @@ static blk_qc_t cached_dev_make_request(struct request_queue *q, else cached_dev_read(dc, s); } - } else { - if ((bio_op(bio) == REQ_OP_DISCARD) && - !blk_queue_discard(bdev_get_queue(dc->bdev))) - bio_endio(bio); - else - generic_make_request(bio); - } + } else + detached_dev_do_request(d, bio); return BLK_QC_T_NONE; } -- cgit v1.2.3 From 688892b3bc05e25da94866e32210e5f503f16f69 Mon Sep 17 00:00:00 2001 From: Tang Junhui Date: Sun, 18 Mar 2018 17:36:20 -0700 Subject: bcache: fix incorrect sysfs output value of strip size Stripe size is shown as zero when no strip in back end device: [root@ceph132 ~]# cat /sys/block/sdd/bcache/stripe_size 0.0k Actually it should be 1T Bytes (1 << 31 sectors), but in sysfs interface, stripe_size was changed from sectors to bytes, and move 9 bits left, so the 32 bits variable overflows. This patch change the variable to a 64 bits type before moving bits. Signed-off-by: Tang Junhui Reviewed-by: Michael Lyle Signed-off-by: Jens Axboe --- drivers/md/bcache/sysfs.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers/md') diff --git a/drivers/md/bcache/sysfs.c b/drivers/md/bcache/sysfs.c index 414129f7c49f..8c3fd05db87a 100644 --- a/drivers/md/bcache/sysfs.c +++ b/drivers/md/bcache/sysfs.c @@ -181,7 +181,7 @@ SHOW(__bch_cached_dev) sysfs_hprint(dirty_data, bcache_dev_sectors_dirty(&dc->disk) << 9); - sysfs_hprint(stripe_size, dc->disk.stripe_size << 9); + sysfs_hprint(stripe_size, ((uint64_t)dc->disk.stripe_size) << 9); var_printf(partial_stripes_expensive, "%u"); var_hprint(sequential_cutoff); -- cgit v1.2.3 From f3641c3abd1da978ee969b0203b71b86ec1bfa93 Mon Sep 17 00:00:00 2001 From: Tang Junhui Date: Sun, 18 Mar 2018 17:36:21 -0700 Subject: bcache: fix error return value in memory shrink In bch_mca_scan(), the return value should not be the number of freed btree nodes, but the number of pages of freed btree nodes. Signed-off-by: Tang Junhui Reviewed-by: Michael Lyle Signed-off-by: Jens Axboe --- drivers/md/bcache/btree.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers/md') diff --git a/drivers/md/bcache/btree.c b/drivers/md/bcache/btree.c index 39cc8a549091..b2d4899f48d5 100644 --- a/drivers/md/bcache/btree.c +++ b/drivers/md/bcache/btree.c @@ -719,7 +719,7 @@ static unsigned long bch_mca_scan(struct shrinker *shrink, } out: mutex_unlock(&c->bucket_lock); - return freed; + return freed * c->btree_pages; } static unsigned long bch_mca_count(struct shrinker *shrink, -- cgit v1.2.3 From ca71df31661a0518ed58a1a59cf1993962153ebb Mon Sep 17 00:00:00 2001 From: Tang Junhui Date: Sun, 18 Mar 2018 17:36:22 -0700 Subject: bcache: fix using of loop variable in memory shrink In bch_mca_scan(), There are some confusion and logical error in the use of loop variables. In this patch, we clarify them as: 1) nr: the number of btree nodes needs to scan, which will decrease after we scan a btree node, and should not be less than 0; 2) i: the number of btree nodes have scanned, includes both btree_cache_freeable and btree_cache, which should not be bigger than btree_cache_used; 3) freed: the number of btree nodes have freed. Signed-off-by: Tang Junhui Reviewed-by: Michael Lyle Signed-off-by: Jens Axboe --- drivers/md/bcache/btree.c | 9 ++++++--- 1 file changed, 6 insertions(+), 3 deletions(-) (limited to 'drivers/md') diff --git a/drivers/md/bcache/btree.c b/drivers/md/bcache/btree.c index b2d4899f48d5..d64aff0b8abc 100644 --- a/drivers/md/bcache/btree.c +++ b/drivers/md/bcache/btree.c @@ -665,6 +665,7 @@ static unsigned long bch_mca_scan(struct shrinker *shrink, struct btree *b, *t; unsigned long i, nr = sc->nr_to_scan; unsigned long freed = 0; + unsigned int btree_cache_used; if (c->shrinker_disabled) return SHRINK_STOP; @@ -689,9 +690,10 @@ static unsigned long bch_mca_scan(struct shrinker *shrink, nr = min_t(unsigned long, nr, mca_can_free(c)); i = 0; + btree_cache_used = c->btree_cache_used; list_for_each_entry_safe(b, t, &c->btree_cache_freeable, list) { - if (freed >= nr) - break; + if (nr <= 0) + goto out; if (++i > 3 && !mca_reap(b, 0, false)) { @@ -699,9 +701,10 @@ static unsigned long bch_mca_scan(struct shrinker *shrink, rw_unlock(true, b); freed++; } + nr--; } - for (i = 0; (nr--) && i < c->btree_cache_used; i++) { + for (; (nr--) && i < btree_cache_used; i++) { if (list_empty(&c->btree_cache)) goto out; -- cgit v1.2.3 From df2b94313ae5b4f60d49e01d4dff5acb4c2757cf Mon Sep 17 00:00:00 2001 From: Chengguang Xu Date: Sun, 18 Mar 2018 17:36:23 -0700 Subject: bcache: move closure debug file into debug directory In current code closure debug file is outside of debug directory and when unloading module there is lack of removing operation for closure debug file, so it will cause creating error when trying to reload module. This patch move closure debug file into "bcache" debug direcory so that the file can get deleted properly. Signed-off-by: Chengguang Xu Reviewed-by: Michael Lyle Reviewed-by: Tang Junhui Signed-off-by: Jens Axboe --- drivers/md/bcache/closure.c | 9 +++++---- drivers/md/bcache/closure.h | 5 +++-- drivers/md/bcache/debug.c | 14 +++++++------- drivers/md/bcache/super.c | 3 +-- 4 files changed, 16 insertions(+), 15 deletions(-) (limited to 'drivers/md') diff --git a/drivers/md/bcache/closure.c b/drivers/md/bcache/closure.c index 7f12920c14f7..c0949c9f843b 100644 --- a/drivers/md/bcache/closure.c +++ b/drivers/md/bcache/closure.c @@ -157,7 +157,7 @@ void closure_debug_destroy(struct closure *cl) } EXPORT_SYMBOL(closure_debug_destroy); -static struct dentry *debug; +static struct dentry *closure_debug; static int debug_seq_show(struct seq_file *f, void *data) { @@ -199,11 +199,12 @@ static const struct file_operations debug_ops = { .release = single_release }; -void __init closure_debug_init(void) +int __init closure_debug_init(void) { - debug = debugfs_create_file("closures", 0400, NULL, NULL, &debug_ops); + closure_debug = debugfs_create_file("closures", + 0400, bcache_debug, NULL, &debug_ops); + return IS_ERR_OR_NULL(closure_debug); } - #endif MODULE_AUTHOR("Kent Overstreet "); diff --git a/drivers/md/bcache/closure.h b/drivers/md/bcache/closure.h index 3b9dfc9962ad..71427eb5fdae 100644 --- a/drivers/md/bcache/closure.h +++ b/drivers/md/bcache/closure.h @@ -105,6 +105,7 @@ struct closure; struct closure_syncer; typedef void (closure_fn) (struct closure *); +extern struct dentry *bcache_debug; struct closure_waitlist { struct llist_head list; @@ -185,13 +186,13 @@ static inline void closure_sync(struct closure *cl) #ifdef CONFIG_BCACHE_CLOSURES_DEBUG -void closure_debug_init(void); +int closure_debug_init(void); void closure_debug_create(struct closure *cl); void closure_debug_destroy(struct closure *cl); #else -static inline void closure_debug_init(void) {} +static inline int closure_debug_init(void) { return 0; } static inline void closure_debug_create(struct closure *cl) {} static inline void closure_debug_destroy(struct closure *cl) {} diff --git a/drivers/md/bcache/debug.c b/drivers/md/bcache/debug.c index af89408befe8..028f7b386e01 100644 --- a/drivers/md/bcache/debug.c +++ b/drivers/md/bcache/debug.c @@ -17,7 +17,7 @@ #include #include -static struct dentry *debug; +struct dentry *bcache_debug; #ifdef CONFIG_BCACHE_DEBUG @@ -232,11 +232,11 @@ static const struct file_operations cache_set_debug_ops = { void bch_debug_init_cache_set(struct cache_set *c) { - if (!IS_ERR_OR_NULL(debug)) { + if (!IS_ERR_OR_NULL(bcache_debug)) { char name[50]; snprintf(name, 50, "bcache-%pU", c->sb.set_uuid); - c->debug = debugfs_create_file(name, 0400, debug, c, + c->debug = debugfs_create_file(name, 0400, bcache_debug, c, &cache_set_debug_ops); } } @@ -245,13 +245,13 @@ void bch_debug_init_cache_set(struct cache_set *c) void bch_debug_exit(void) { - if (!IS_ERR_OR_NULL(debug)) - debugfs_remove_recursive(debug); + if (!IS_ERR_OR_NULL(bcache_debug)) + debugfs_remove_recursive(bcache_debug); } int __init bch_debug_init(struct kobject *kobj) { - debug = debugfs_create_dir("bcache", NULL); + bcache_debug = debugfs_create_dir("bcache", NULL); - return IS_ERR_OR_NULL(debug); + return IS_ERR_OR_NULL(bcache_debug); } diff --git a/drivers/md/bcache/super.c b/drivers/md/bcache/super.c index 7b45160e9a22..f1f64853114b 100644 --- a/drivers/md/bcache/super.c +++ b/drivers/md/bcache/super.c @@ -2224,7 +2224,6 @@ static int __init bcache_init(void) mutex_init(&bch_register_lock); init_waitqueue_head(&unregister_wait); register_reboot_notifier(&reboot); - closure_debug_init(); bcache_major = register_blkdev(0, "bcache"); if (bcache_major < 0) { @@ -2236,7 +2235,7 @@ static int __init bcache_init(void) if (!(bcache_wq = alloc_workqueue("bcache", WQ_MEM_RECLAIM, 0)) || !(bcache_kobj = kobject_create_and_add("bcache", fs_kobj)) || bch_request_init() || - bch_debug_init(bcache_kobj) || + bch_debug_init(bcache_kobj) || closure_debug_init() || sysfs_create_files(bcache_kobj, files)) goto err; -- cgit v1.2.3 From 27a40ab9269e79b55672312b324f8f29d94463d4 Mon Sep 17 00:00:00 2001 From: Coly Li Date: Sun, 18 Mar 2018 17:36:24 -0700 Subject: bcache: add backing_request_endio() for bi_end_io In order to catch I/O error of backing device, a separate bi_end_io call back is required. Then a per backing device counter can record I/O errors number and retire the backing device if the counter reaches a per backing device I/O error limit. This patch adds backing_request_endio() to bcache backing device I/O code path, this is a preparation for further complicated backing device failure handling. So far there is no real code logic change, I make this change a separate patch to make sure it is stable and reliable for further work. Changelog: v2: Fix code comments typo, remove a redundant bch_writeback_add() line added in v4 patch set. v1: indeed this is new added in this patch set. [mlyle: truncated commit subject] Signed-off-by: Coly Li Reviewed-by: Hannes Reinecke Reviewed-by: Michael Lyle Cc: Junhui Tang Cc: Michael Lyle Signed-off-by: Jens Axboe --- drivers/md/bcache/request.c | 93 +++++++++++++++++++++++++++++++++++-------- drivers/md/bcache/super.c | 1 + drivers/md/bcache/writeback.c | 1 + 3 files changed, 79 insertions(+), 16 deletions(-) (limited to 'drivers/md') diff --git a/drivers/md/bcache/request.c b/drivers/md/bcache/request.c index 5c8ae69c8502..b4a5768afbe9 100644 --- a/drivers/md/bcache/request.c +++ b/drivers/md/bcache/request.c @@ -139,6 +139,7 @@ static void bch_data_invalidate(struct closure *cl) } op->insert_data_done = true; + /* get in bch_data_insert() */ bio_put(bio); out: continue_at(cl, bch_data_insert_keys, op->wq); @@ -630,6 +631,38 @@ static void request_endio(struct bio *bio) closure_put(cl); } +static void backing_request_endio(struct bio *bio) +{ + struct closure *cl = bio->bi_private; + + if (bio->bi_status) { + struct search *s = container_of(cl, struct search, cl); + /* + * If a bio has REQ_PREFLUSH for writeback mode, it is + * speically assembled in cached_dev_write() for a non-zero + * write request which has REQ_PREFLUSH. we don't set + * s->iop.status by this failure, the status will be decided + * by result of bch_data_insert() operation. + */ + if (unlikely(s->iop.writeback && + bio->bi_opf & REQ_PREFLUSH)) { + char buf[BDEVNAME_SIZE]; + + bio_devname(bio, buf); + pr_err("Can't flush %s: returned bi_status %i", + buf, bio->bi_status); + } else { + /* set to orig_bio->bi_status in bio_complete() */ + s->iop.status = bio->bi_status; + } + s->recoverable = false; + /* should count I/O error for backing device here */ + } + + bio_put(bio); + closure_put(cl); +} + static void bio_complete(struct search *s) { if (s->orig_bio) { @@ -644,13 +677,21 @@ static void bio_complete(struct search *s) } } -static void do_bio_hook(struct search *s, struct bio *orig_bio) +static void do_bio_hook(struct search *s, + struct bio *orig_bio, + bio_end_io_t *end_io_fn) { struct bio *bio = &s->bio.bio; bio_init(bio, NULL, 0); __bio_clone_fast(bio, orig_bio); - bio->bi_end_io = request_endio; + /* + * bi_end_io can be set separately somewhere else, e.g. the + * variants in, + * - cache_bio->bi_end_io from cached_dev_cache_miss() + * - n->bi_end_io from cache_lookup_fn() + */ + bio->bi_end_io = end_io_fn; bio->bi_private = &s->cl; bio_cnt_set(bio, 3); @@ -676,7 +717,7 @@ static inline struct search *search_alloc(struct bio *bio, s = mempool_alloc(d->c->search, GFP_NOIO); closure_init(&s->cl, NULL); - do_bio_hook(s, bio); + do_bio_hook(s, bio, request_endio); s->orig_bio = bio; s->cache_miss = NULL; @@ -743,10 +784,11 @@ static void cached_dev_read_error(struct closure *cl) trace_bcache_read_retry(s->orig_bio); s->iop.status = 0; - do_bio_hook(s, s->orig_bio); + do_bio_hook(s, s->orig_bio, backing_request_endio); /* XXX: invalidate cache */ + /* I/O request sent to backing device */ closure_bio_submit(s->iop.c, bio, cl); } @@ -859,7 +901,7 @@ static int cached_dev_cache_miss(struct btree *b, struct search *s, bio_copy_dev(cache_bio, miss); cache_bio->bi_iter.bi_size = s->insert_bio_sectors << 9; - cache_bio->bi_end_io = request_endio; + cache_bio->bi_end_io = backing_request_endio; cache_bio->bi_private = &s->cl; bch_bio_map(cache_bio, NULL); @@ -872,14 +914,16 @@ static int cached_dev_cache_miss(struct btree *b, struct search *s, s->cache_miss = miss; s->iop.bio = cache_bio; bio_get(cache_bio); + /* I/O request sent to backing device */ closure_bio_submit(s->iop.c, cache_bio, &s->cl); return ret; out_put: bio_put(cache_bio); out_submit: - miss->bi_end_io = request_endio; + miss->bi_end_io = backing_request_endio; miss->bi_private = &s->cl; + /* I/O request sent to backing device */ closure_bio_submit(s->iop.c, miss, &s->cl); return ret; } @@ -943,31 +987,46 @@ static void cached_dev_write(struct cached_dev *dc, struct search *s) s->iop.bio = s->orig_bio; bio_get(s->iop.bio); - if ((bio_op(bio) != REQ_OP_DISCARD) || - blk_queue_discard(bdev_get_queue(dc->bdev))) - closure_bio_submit(s->iop.c, bio, cl); + if (bio_op(bio) == REQ_OP_DISCARD && + !blk_queue_discard(bdev_get_queue(dc->bdev))) + goto insert_data; + + /* I/O request sent to backing device */ + bio->bi_end_io = backing_request_endio; + closure_bio_submit(s->iop.c, bio, cl); + } else if (s->iop.writeback) { bch_writeback_add(dc); s->iop.bio = bio; if (bio->bi_opf & REQ_PREFLUSH) { - /* Also need to send a flush to the backing device */ - struct bio *flush = bio_alloc_bioset(GFP_NOIO, 0, - dc->disk.bio_split); - + /* + * Also need to send a flush to the backing + * device. + */ + struct bio *flush; + + flush = bio_alloc_bioset(GFP_NOIO, 0, + dc->disk.bio_split); + if (!flush) { + s->iop.status = BLK_STS_RESOURCE; + goto insert_data; + } bio_copy_dev(flush, bio); - flush->bi_end_io = request_endio; + flush->bi_end_io = backing_request_endio; flush->bi_private = cl; flush->bi_opf = REQ_OP_WRITE | REQ_PREFLUSH; - + /* I/O request sent to backing device */ closure_bio_submit(s->iop.c, flush, cl); } } else { s->iop.bio = bio_clone_fast(bio, GFP_NOIO, dc->disk.bio_split); - + /* I/O request sent to backing device */ + bio->bi_end_io = backing_request_endio; closure_bio_submit(s->iop.c, bio, cl); } +insert_data: closure_call(&s->iop.cl, bch_data_insert, NULL, cl); continue_at(cl, cached_dev_write_complete, NULL); } @@ -981,6 +1040,7 @@ static void cached_dev_nodata(struct closure *cl) bch_journal_meta(s->iop.c, cl); /* If it's a flush, we send the flush to the backing device too */ + bio->bi_end_io = backing_request_endio; closure_bio_submit(s->iop.c, bio, cl); continue_at(cl, cached_dev_bio_complete, NULL); @@ -1078,6 +1138,7 @@ static blk_qc_t cached_dev_make_request(struct request_queue *q, cached_dev_read(dc, s); } } else + /* I/O request sent to backing device */ detached_dev_do_request(d, bio); return BLK_QC_T_NONE; diff --git a/drivers/md/bcache/super.c b/drivers/md/bcache/super.c index f1f64853114b..2f8e70aefc90 100644 --- a/drivers/md/bcache/super.c +++ b/drivers/md/bcache/super.c @@ -273,6 +273,7 @@ void bch_write_bdev_super(struct cached_dev *dc, struct closure *parent) bio->bi_private = dc; closure_get(cl); + /* I/O request sent to backing device */ __write_super(&dc->sb, bio); closure_return_with_destructor(cl, bch_write_bdev_super_unlock); diff --git a/drivers/md/bcache/writeback.c b/drivers/md/bcache/writeback.c index 70092ada68e6..4a9547cdcdc5 100644 --- a/drivers/md/bcache/writeback.c +++ b/drivers/md/bcache/writeback.c @@ -289,6 +289,7 @@ static void write_dirty(struct closure *cl) bio_set_dev(&io->bio, io->dc->bdev); io->bio.bi_end_io = dirty_endio; + /* I/O request sent to backing device */ closure_bio_submit(io->dc->disk.c, &io->bio, cl); } -- cgit v1.2.3 From c7b7bd07404c52d8b9c6fd2fe794052ac367a818 Mon Sep 17 00:00:00 2001 From: Coly Li Date: Sun, 18 Mar 2018 17:36:25 -0700 Subject: bcache: add io_disable to struct cached_dev If a bcache device is configured to writeback mode, current code does not handle write I/O errors on backing devices properly. In writeback mode, write request is written to cache device, and latter being flushed to backing device. If I/O failed when writing from cache device to the backing device, bcache code just ignores the error and upper layer code is NOT noticed that the backing device is broken. This patch tries to handle backing device failure like how the cache device failure is handled, - Add a error counter 'io_errors' and error limit 'error_limit' in struct cached_dev. Add another io_disable to struct cached_dev to disable I/Os on the problematic backing device. - When I/O error happens on backing device, increase io_errors counter. And if io_errors reaches error_limit, set cache_dev->io_disable to true, and stop the bcache device. The result is, if backing device is broken of disconnected, and I/O errors reach its error limit, backing device will be disabled and the associated bcache device will be removed from system. Changelog: v2: remove "bcache: " prefix in pr_error(), and use correct name string to print out bcache device gendisk name. v1: indeed this is new added in v2 patch set. Signed-off-by: Coly Li Reviewed-by: Hannes Reinecke Reviewed-by: Michael Lyle Cc: Michael Lyle Cc: Junhui Tang Signed-off-by: Jens Axboe --- drivers/md/bcache/bcache.h | 6 ++++++ drivers/md/bcache/io.c | 14 ++++++++++++++ drivers/md/bcache/request.c | 14 ++++++++++++-- drivers/md/bcache/super.c | 21 +++++++++++++++++++++ drivers/md/bcache/sysfs.c | 15 ++++++++++++++- 5 files changed, 67 insertions(+), 3 deletions(-) (limited to 'drivers/md') diff --git a/drivers/md/bcache/bcache.h b/drivers/md/bcache/bcache.h index 5e9f3610c6fd..d338b7086013 100644 --- a/drivers/md/bcache/bcache.h +++ b/drivers/md/bcache/bcache.h @@ -367,6 +367,7 @@ struct cached_dev { unsigned sequential_cutoff; unsigned readahead; + unsigned io_disable:1; unsigned verify:1; unsigned bypass_torture_test:1; @@ -388,6 +389,9 @@ struct cached_dev { unsigned writeback_rate_minimum; enum stop_on_failure stop_when_cache_set_failed; +#define DEFAULT_CACHED_DEV_ERROR_LIMIT 64 + atomic_t io_errors; + unsigned error_limit; }; enum alloc_reserve { @@ -911,6 +915,7 @@ static inline void wait_for_kthread_stop(void) /* Forward declarations */ +void bch_count_backing_io_errors(struct cached_dev *dc, struct bio *bio); void bch_count_io_errors(struct cache *, blk_status_t, int, const char *); void bch_bbio_count_io_errors(struct cache_set *, struct bio *, blk_status_t, const char *); @@ -938,6 +943,7 @@ int bch_bucket_alloc_set(struct cache_set *, unsigned, struct bkey *, int, bool); bool bch_alloc_sectors(struct cache_set *, struct bkey *, unsigned, unsigned, unsigned, bool); +bool bch_cached_dev_error(struct cached_dev *dc); __printf(2, 3) bool bch_cache_set_error(struct cache_set *, const char *, ...); diff --git a/drivers/md/bcache/io.c b/drivers/md/bcache/io.c index 8013ecbcdbda..7fac97ae036e 100644 --- a/drivers/md/bcache/io.c +++ b/drivers/md/bcache/io.c @@ -50,6 +50,20 @@ void bch_submit_bbio(struct bio *bio, struct cache_set *c, } /* IO errors */ +void bch_count_backing_io_errors(struct cached_dev *dc, struct bio *bio) +{ + char buf[BDEVNAME_SIZE]; + unsigned errors; + + WARN_ONCE(!dc, "NULL pointer of struct cached_dev"); + + errors = atomic_add_return(1, &dc->io_errors); + if (errors < dc->error_limit) + pr_err("%s: IO error on backing device, unrecoverable", + bio_devname(bio, buf)); + else + bch_cached_dev_error(dc); +} void bch_count_io_errors(struct cache *ca, blk_status_t error, diff --git a/drivers/md/bcache/request.c b/drivers/md/bcache/request.c index b4a5768afbe9..5a82237c7025 100644 --- a/drivers/md/bcache/request.c +++ b/drivers/md/bcache/request.c @@ -637,6 +637,8 @@ static void backing_request_endio(struct bio *bio) if (bio->bi_status) { struct search *s = container_of(cl, struct search, cl); + struct cached_dev *dc = container_of(s->d, + struct cached_dev, disk); /* * If a bio has REQ_PREFLUSH for writeback mode, it is * speically assembled in cached_dev_write() for a non-zero @@ -657,6 +659,7 @@ static void backing_request_endio(struct bio *bio) } s->recoverable = false; /* should count I/O error for backing device here */ + bch_count_backing_io_errors(dc, bio); } bio_put(bio); @@ -1065,8 +1068,14 @@ static void detached_dev_end_io(struct bio *bio) bio_data_dir(bio), &ddip->d->disk->part0, ddip->start_time); - kfree(ddip); + if (bio->bi_status) { + struct cached_dev *dc = container_of(ddip->d, + struct cached_dev, disk); + /* should count I/O error for backing device here */ + bch_count_backing_io_errors(dc, bio); + } + kfree(ddip); bio->bi_end_io(bio); } @@ -1105,7 +1114,8 @@ static blk_qc_t cached_dev_make_request(struct request_queue *q, struct cached_dev *dc = container_of(d, struct cached_dev, disk); int rw = bio_data_dir(bio); - if (unlikely(d->c && test_bit(CACHE_SET_IO_DISABLE, &d->c->flags))) { + if (unlikely((d->c && test_bit(CACHE_SET_IO_DISABLE, &d->c->flags)) || + dc->io_disable)) { bio->bi_status = BLK_STS_IOERR; bio_endio(bio); return BLK_QC_T_NONE; diff --git a/drivers/md/bcache/super.c b/drivers/md/bcache/super.c index 2f8e70aefc90..06f4b4833755 100644 --- a/drivers/md/bcache/super.c +++ b/drivers/md/bcache/super.c @@ -1197,6 +1197,9 @@ static int cached_dev_init(struct cached_dev *dc, unsigned block_size) max(dc->disk.disk->queue->backing_dev_info->ra_pages, q->backing_dev_info->ra_pages); + atomic_set(&dc->io_errors, 0); + dc->io_disable = false; + dc->error_limit = DEFAULT_CACHED_DEV_ERROR_LIMIT; /* default to auto */ dc->stop_when_cache_set_failed = BCH_CACHED_DEV_STOP_AUTO; @@ -1351,6 +1354,24 @@ int bch_flash_dev_create(struct cache_set *c, uint64_t size) return flash_dev_run(c, u); } +bool bch_cached_dev_error(struct cached_dev *dc) +{ + char name[BDEVNAME_SIZE]; + + if (!dc || test_bit(BCACHE_DEV_CLOSING, &dc->disk.flags)) + return false; + + dc->io_disable = true; + /* make others know io_disable is true earlier */ + smp_mb(); + + pr_err("stop %s: too many IO errors on backing device %s\n", + dc->disk.disk->disk_name, bdevname(dc->bdev, name)); + + bcache_device_stop(&dc->disk); + return true; +} + /* Cache set */ __printf(2, 3) diff --git a/drivers/md/bcache/sysfs.c b/drivers/md/bcache/sysfs.c index 8c3fd05db87a..dfeef583ee50 100644 --- a/drivers/md/bcache/sysfs.c +++ b/drivers/md/bcache/sysfs.c @@ -141,7 +141,9 @@ SHOW(__bch_cached_dev) var_print(writeback_delay); var_print(writeback_percent); sysfs_hprint(writeback_rate, dc->writeback_rate.rate << 9); - + sysfs_hprint(io_errors, atomic_read(&dc->io_errors)); + sysfs_printf(io_error_limit, "%i", dc->error_limit); + sysfs_printf(io_disable, "%i", dc->io_disable); var_print(writeback_rate_update_seconds); var_print(writeback_rate_i_term_inverse); var_print(writeback_rate_p_term_inverse); @@ -232,6 +234,14 @@ STORE(__cached_dev) d_strtoul(writeback_rate_i_term_inverse); d_strtoul_nonzero(writeback_rate_p_term_inverse); + sysfs_strtoul_clamp(io_error_limit, dc->error_limit, 0, INT_MAX); + + if (attr == &sysfs_io_disable) { + int v = strtoul_or_return(buf); + + dc->io_disable = v ? 1 : 0; + } + d_strtoi_h(sequential_cutoff); d_strtoi_h(readahead); @@ -352,6 +362,9 @@ static struct attribute *bch_cached_dev_files[] = { &sysfs_writeback_rate_i_term_inverse, &sysfs_writeback_rate_p_term_inverse, &sysfs_writeback_rate_debug, + &sysfs_errors, + &sysfs_io_error_limit, + &sysfs_io_disable, &sysfs_dirty_data, &sysfs_stripe_size, &sysfs_partial_stripes_expensive, -- cgit v1.2.3 From fd01991d5c20098c5c1ffc4dca6c821cc60a2f74 Mon Sep 17 00:00:00 2001 From: Bart Van Assche Date: Sun, 18 Mar 2018 17:36:26 -0700 Subject: bcache: Fix indentation This patch avoids that smatch complains about inconsistent indentation. Signed-off-by: Bart Van Assche Reviewed-by: Michael Lyle Reviewed-by: Coly Li Signed-off-by: Jens Axboe --- drivers/md/bcache/btree.c | 2 +- drivers/md/bcache/writeback.h | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) (limited to 'drivers/md') diff --git a/drivers/md/bcache/btree.c b/drivers/md/bcache/btree.c index d64aff0b8abc..143ed5a758e7 100644 --- a/drivers/md/bcache/btree.c +++ b/drivers/md/bcache/btree.c @@ -2178,7 +2178,7 @@ int bch_btree_insert_check_key(struct btree *b, struct btree_op *op, if (b->key.ptr[0] != btree_ptr || b->seq != seq + 1) { - op->lock = b->level; + op->lock = b->level; goto out; } } diff --git a/drivers/md/bcache/writeback.h b/drivers/md/bcache/writeback.h index 0bba8f1c6cdf..610fb01de629 100644 --- a/drivers/md/bcache/writeback.h +++ b/drivers/md/bcache/writeback.h @@ -39,7 +39,7 @@ static inline uint64_t bcache_flash_devs_sectors_dirty(struct cache_set *c) if (!d || !UUID_FLASH_ONLY(&c->uuids[i])) continue; - ret += bcache_dev_sectors_dirty(d); + ret += bcache_dev_sectors_dirty(d); } mutex_unlock(&bch_register_lock); -- cgit v1.2.3 From 4a4e443835a43a79113cc237c472c0d268eb1e1c Mon Sep 17 00:00:00 2001 From: Bart Van Assche Date: Sun, 18 Mar 2018 17:36:27 -0700 Subject: bcache: Add __printf annotation to __bch_check_keys() Make it possible for the compiler to verify the consistency of the format string passed to __bch_check_keys() and the arguments that should be formatted according to that format string. Signed-off-by: Bart Van Assche Reviewed-by: Michael Lyle Signed-off-by: Jens Axboe --- drivers/md/bcache/bset.h | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) (limited to 'drivers/md') diff --git a/drivers/md/bcache/bset.h b/drivers/md/bcache/bset.h index fa506c1aa524..0c24280f3b98 100644 --- a/drivers/md/bcache/bset.h +++ b/drivers/md/bcache/bset.h @@ -531,14 +531,15 @@ int __bch_keylist_realloc(struct keylist *, unsigned); #ifdef CONFIG_BCACHE_DEBUG int __bch_count_data(struct btree_keys *); -void __bch_check_keys(struct btree_keys *, const char *, ...); +void __printf(2, 3) __bch_check_keys(struct btree_keys *, const char *, ...); void bch_dump_bset(struct btree_keys *, struct bset *, unsigned); void bch_dump_bucket(struct btree_keys *); #else static inline int __bch_count_data(struct btree_keys *b) { return -1; } -static inline void __bch_check_keys(struct btree_keys *b, const char *fmt, ...) {} +static inline void __printf(2, 3) + __bch_check_keys(struct btree_keys *b, const char *fmt, ...) {} static inline void bch_dump_bucket(struct btree_keys *b) {} void bch_dump_bset(struct btree_keys *, struct bset *, unsigned); -- cgit v1.2.3 From 9dfbdec7b7fea1ff1b7b5d5d12980dbc7dca46c7 Mon Sep 17 00:00:00 2001 From: Bart Van Assche Date: Sun, 18 Mar 2018 17:36:28 -0700 Subject: bcache: Annotate switch fall-through This patch avoids that building with W=1 triggers complaints about switch fall-throughs. Signed-off-by: Bart Van Assche Reviewed-by: Michael Lyle Signed-off-by: Jens Axboe --- drivers/md/bcache/util.c | 7 +++++++ 1 file changed, 7 insertions(+) (limited to 'drivers/md') diff --git a/drivers/md/bcache/util.c b/drivers/md/bcache/util.c index a23cd6a14b74..6198041f0ee2 100644 --- a/drivers/md/bcache/util.c +++ b/drivers/md/bcache/util.c @@ -32,20 +32,27 @@ int bch_ ## name ## _h(const char *cp, type *res) \ case 'y': \ case 'z': \ u++; \ + /* fall through */ \ case 'e': \ u++; \ + /* fall through */ \ case 'p': \ u++; \ + /* fall through */ \ case 't': \ u++; \ + /* fall through */ \ case 'g': \ u++; \ + /* fall through */ \ case 'm': \ u++; \ + /* fall through */ \ case 'k': \ u++; \ if (e++ == cp) \ return -EINVAL; \ + /* fall through */ \ case '\n': \ case '\0': \ if (*e == '\n') \ -- cgit v1.2.3 From 47344e330eabc1515cbe6061eb337100a3ab6d37 Mon Sep 17 00:00:00 2001 From: Bart Van Assche Date: Sun, 18 Mar 2018 17:36:29 -0700 Subject: bcache: Fix kernel-doc warnings Avoid that building with W=1 triggers warnings about the kernel-doc headers. Signed-off-by: Bart Van Assche Reviewed-by: Michael Lyle Signed-off-by: Jens Axboe --- drivers/md/bcache/btree.c | 2 +- drivers/md/bcache/closure.c | 8 ++++---- drivers/md/bcache/request.c | 1 + drivers/md/bcache/util.c | 18 ++++++++---------- 4 files changed, 14 insertions(+), 15 deletions(-) (limited to 'drivers/md') diff --git a/drivers/md/bcache/btree.c b/drivers/md/bcache/btree.c index 143ed5a758e7..17936b2dc7d6 100644 --- a/drivers/md/bcache/btree.c +++ b/drivers/md/bcache/btree.c @@ -962,7 +962,7 @@ err: return b; } -/** +/* * bch_btree_node_get - find a btree node in the cache and lock it, reading it * in from disk if necessary. * diff --git a/drivers/md/bcache/closure.c b/drivers/md/bcache/closure.c index c0949c9f843b..0e14969182c6 100644 --- a/drivers/md/bcache/closure.c +++ b/drivers/md/bcache/closure.c @@ -46,7 +46,7 @@ void closure_sub(struct closure *cl, int v) } EXPORT_SYMBOL(closure_sub); -/** +/* * closure_put - decrement a closure's refcount */ void closure_put(struct closure *cl) @@ -55,7 +55,7 @@ void closure_put(struct closure *cl) } EXPORT_SYMBOL(closure_put); -/** +/* * closure_wake_up - wake up all closures on a wait list, without memory barrier */ void __closure_wake_up(struct closure_waitlist *wait_list) @@ -79,9 +79,9 @@ EXPORT_SYMBOL(__closure_wake_up); /** * closure_wait - add a closure to a waitlist - * - * @waitlist will own a ref on @cl, which will be released when + * @waitlist: will own a ref on @cl, which will be released when * closure_wake_up() is called on @waitlist. + * @cl: closure pointer. * */ bool closure_wait(struct closure_waitlist *waitlist, struct closure *cl) diff --git a/drivers/md/bcache/request.c b/drivers/md/bcache/request.c index 5a82237c7025..a65e3365eeb9 100644 --- a/drivers/md/bcache/request.c +++ b/drivers/md/bcache/request.c @@ -296,6 +296,7 @@ err: /** * bch_data_insert - stick some data in the cache + * @cl: closure pointer. * * This is the starting point for any data to end up in a cache device; it could * be from a normal write, or a writeback write, or a write to a flash only diff --git a/drivers/md/bcache/util.c b/drivers/md/bcache/util.c index 6198041f0ee2..74febd5230df 100644 --- a/drivers/md/bcache/util.c +++ b/drivers/md/bcache/util.c @@ -82,10 +82,9 @@ STRTO_H(strtoll, long long) STRTO_H(strtoull, unsigned long long) /** - * bch_hprint() - formats @v to human readable string for sysfs. - * - * @v - signed 64 bit integer - * @buf - the (at least 8 byte) buffer to format the result into. + * bch_hprint - formats @v to human readable string for sysfs. + * @buf: the (at least 8 byte) buffer to format the result into. + * @v: signed 64 bit integer * * Returns the number of bytes used by format. */ @@ -225,13 +224,12 @@ void bch_time_stats_update(struct time_stats *stats, uint64_t start_time) } /** - * bch_next_delay() - increment @d by the amount of work done, and return how - * long to delay until the next time to do some work. - * - * @d - the struct bch_ratelimit to update - * @done - the amount of work done, in arbitrary units + * bch_next_delay() - update ratelimiting statistics and calculate next delay + * @d: the struct bch_ratelimit to update + * @done: the amount of work done, in arbitrary units * - * Returns the amount of time to delay by, in jiffies + * Increment @d by the amount of work done, and return how long to delay in + * jiffies until the next time to do some work. */ uint64_t bch_next_delay(struct bch_ratelimit *d, uint64_t done) { -- cgit v1.2.3 From f0d3814090ac77de94c42b7124c37ece23629197 Mon Sep 17 00:00:00 2001 From: Bart Van Assche Date: Sun, 18 Mar 2018 17:36:30 -0700 Subject: bcache: Remove an unused variable Signed-off-by: Bart Van Assche Reviewed-by: Michael Lyle Signed-off-by: Jens Axboe --- drivers/md/bcache/extents.c | 2 -- 1 file changed, 2 deletions(-) (limited to 'drivers/md') diff --git a/drivers/md/bcache/extents.c b/drivers/md/bcache/extents.c index f9d391711595..c334e6666461 100644 --- a/drivers/md/bcache/extents.c +++ b/drivers/md/bcache/extents.c @@ -534,7 +534,6 @@ err: static bool bch_extent_bad(struct btree_keys *bk, const struct bkey *k) { struct btree *b = container_of(bk, struct btree, keys); - struct bucket *g; unsigned i, stale; if (!KEY_PTRS(k) || @@ -549,7 +548,6 @@ static bool bch_extent_bad(struct btree_keys *bk, const struct bkey *k) return false; for (i = 0; i < KEY_PTRS(k); i++) { - g = PTR_BUCKET(b->c, k, i); stale = ptr_stale(b->c, k, i); btree_bug_on(stale > 96, b, -- cgit v1.2.3 From 42361469ae84c851e40cb1f94c8c9a14cdd94039 Mon Sep 17 00:00:00 2001 From: Bart Van Assche Date: Sun, 18 Mar 2018 17:36:31 -0700 Subject: bcache: Suppress more warnings about set-but-not-used variables This patch does not change any functionality. Reviewed-by: Michael Lyle Reviewed-by: Coly Li Signed-off-by: Bart Van Assche Signed-off-by: Jens Axboe --- drivers/md/bcache/bset.c | 4 ++-- drivers/md/bcache/journal.c | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) (limited to 'drivers/md') diff --git a/drivers/md/bcache/bset.c b/drivers/md/bcache/bset.c index e56d3ecdbfcb..579c696a5fe0 100644 --- a/drivers/md/bcache/bset.c +++ b/drivers/md/bcache/bset.c @@ -1072,7 +1072,7 @@ EXPORT_SYMBOL(bch_btree_iter_init); static inline struct bkey *__bch_btree_iter_next(struct btree_iter *iter, btree_iter_cmp_fn *cmp) { - struct btree_iter_set unused; + struct btree_iter_set b __maybe_unused; struct bkey *ret = NULL; if (!btree_iter_end(iter)) { @@ -1087,7 +1087,7 @@ static inline struct bkey *__bch_btree_iter_next(struct btree_iter *iter, } if (iter->data->k == iter->data->end) - heap_pop(iter, unused, cmp); + heap_pop(iter, b, cmp); else heap_sift(iter, 0, cmp); } diff --git a/drivers/md/bcache/journal.c b/drivers/md/bcache/journal.c index c94085f400a4..acd0e5c074dd 100644 --- a/drivers/md/bcache/journal.c +++ b/drivers/md/bcache/journal.c @@ -493,7 +493,7 @@ static void journal_reclaim(struct cache_set *c) struct cache *ca; uint64_t last_seq; unsigned iter, n = 0; - atomic_t p; + atomic_t p __maybe_unused; atomic_long_inc(&c->reclaim); -- cgit v1.2.3 From 20d3a518713e394efa5a899c84574b4b79ec5098 Mon Sep 17 00:00:00 2001 From: Bart Van Assche Date: Sun, 18 Mar 2018 17:36:32 -0700 Subject: bcache: Reduce the number of sparse complaints about lock imbalances Add more annotations for sparse to inform it about which functions do not have the same number of spin_lock() and spin_unlock() calls. Signed-off-by: Bart Van Assche Reviewed-by: Michael Lyle Signed-off-by: Jens Axboe --- drivers/md/bcache/journal.c | 2 ++ 1 file changed, 2 insertions(+) (limited to 'drivers/md') diff --git a/drivers/md/bcache/journal.c b/drivers/md/bcache/journal.c index acd0e5c074dd..18f1b5239620 100644 --- a/drivers/md/bcache/journal.c +++ b/drivers/md/bcache/journal.c @@ -594,6 +594,7 @@ static void journal_write_done(struct closure *cl) } static void journal_write_unlock(struct closure *cl) + __releases(&c->journal.lock) { struct cache_set *c = container_of(cl, struct cache_set, journal.io); @@ -705,6 +706,7 @@ static void journal_try_write(struct cache_set *c) static struct journal_write *journal_wait_for_write(struct cache_set *c, unsigned nkeys) + __acquires(&c->journal.lock) { size_t sectors; struct closure cl; -- cgit v1.2.3 From 5f2b18ec8e1643410a2369f06888951cdedea0bf Mon Sep 17 00:00:00 2001 From: Bart Van Assche Date: Sun, 18 Mar 2018 17:36:33 -0700 Subject: bcache: Fix a compiler warning in bcache_device_init() Avoid that building with W=1 triggers the following compiler warning: drivers/md/bcache/super.c:776:20: warning: comparison is always false due to limited range of data type [-Wtype-limits] d->nr_stripes > SIZE_MAX / sizeof(atomic_t)) { ^ Reviewed-by: Coly Li Reviewed-by: Michael Lyle Signed-off-by: Bart Van Assche Signed-off-by: Jens Axboe --- drivers/md/bcache/super.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) (limited to 'drivers/md') diff --git a/drivers/md/bcache/super.c b/drivers/md/bcache/super.c index 06f4b4833755..a21694788619 100644 --- a/drivers/md/bcache/super.c +++ b/drivers/md/bcache/super.c @@ -778,6 +778,8 @@ static int bcache_device_init(struct bcache_device *d, unsigned block_size, sector_t sectors) { struct request_queue *q; + const size_t max_stripes = min_t(size_t, INT_MAX, + SIZE_MAX / sizeof(atomic_t)); size_t n; int idx; @@ -786,9 +788,7 @@ static int bcache_device_init(struct bcache_device *d, unsigned block_size, d->nr_stripes = DIV_ROUND_UP_ULL(sectors, d->stripe_size); - if (!d->nr_stripes || - d->nr_stripes > INT_MAX || - d->nr_stripes > SIZE_MAX / sizeof(atomic_t)) { + if (!d->nr_stripes || d->nr_stripes > max_stripes) { pr_err("nr_stripes too large or invalid: %u (start sector beyond end of disk?)", (unsigned)d->nr_stripes); return -ENOMEM; -- cgit v1.2.3