diff options
Diffstat (limited to 'drivers/md')
41 files changed, 1219 insertions, 443 deletions
diff --git a/drivers/md/Kconfig b/drivers/md/Kconfig index b7e2d9666614..f2014385d48b 100644 --- a/drivers/md/Kconfig +++ b/drivers/md/Kconfig @@ -270,6 +270,7 @@ config DM_CRYPT tristate "Crypt target support" depends on BLK_DEV_DM depends on (ENCRYPTED_KEYS || ENCRYPTED_KEYS=n) + depends on (TRUSTED_KEYS || TRUSTED_KEYS=n) select CRYPTO select CRYPTO_CBC select CRYPTO_ESSIV @@ -605,6 +606,7 @@ config DM_INTEGRITY select BLK_DEV_INTEGRITY select DM_BUFIO select CRYPTO + select CRYPTO_SKCIPHER select ASYNC_XOR help This device-mapper target emulates a block device that has @@ -622,6 +624,7 @@ config DM_ZONED tristate "Drive-managed zoned block device target support" depends on BLK_DEV_DM depends on BLK_DEV_ZONED + select CRC32 help This device-mapper target takes a host-managed or host-aware zoned block device and exposes most of its capacity as a regular block diff --git a/drivers/md/bcache/bcache.h b/drivers/md/bcache/bcache.h index 1d57f48307e6..848dd4db1659 100644 --- a/drivers/md/bcache/bcache.h +++ b/drivers/md/bcache/bcache.h @@ -373,6 +373,7 @@ struct cached_dev { unsigned int partial_stripes_expensive:1; unsigned int writeback_metadata:1; unsigned int writeback_running:1; + unsigned int writeback_consider_fragment:1; unsigned char writeback_percent; unsigned int writeback_delay; @@ -385,6 +386,9 @@ struct cached_dev { unsigned int writeback_rate_update_seconds; unsigned int writeback_rate_i_term_inverse; unsigned int writeback_rate_p_term_inverse; + unsigned int writeback_rate_fp_term_low; + unsigned int writeback_rate_fp_term_mid; + unsigned int writeback_rate_fp_term_high; unsigned int writeback_rate_minimum; enum stop_on_failure stop_when_cache_set_failed; @@ -1001,6 +1005,7 @@ void bch_write_bdev_super(struct cached_dev *dc, struct closure *parent); extern struct workqueue_struct *bcache_wq; extern struct workqueue_struct *bch_journal_wq; +extern struct workqueue_struct *bch_flush_wq; extern struct mutex bch_register_lock; extern struct list_head bch_cache_sets; @@ -1042,5 +1047,7 @@ void bch_debug_exit(void); void bch_debug_init(void); void bch_request_exit(void); int bch_request_init(void); +void bch_btree_exit(void); +int bch_btree_init(void); #endif /* _BCACHE_H */ diff --git a/drivers/md/bcache/bset.c b/drivers/md/bcache/bset.c index 67a2c47f4201..94d38e8a59b3 100644 --- a/drivers/md/bcache/bset.c +++ b/drivers/md/bcache/bset.c @@ -712,8 +712,10 @@ void bch_bset_build_written_tree(struct btree_keys *b) for (j = inorder_next(0, t->size); j; j = inorder_next(j, t->size)) { - while (bkey_to_cacheline(t, k) < cacheline) - prev = k, k = bkey_next(k); + while (bkey_to_cacheline(t, k) < cacheline) { + prev = k; + k = bkey_next(k); + } t->prev[j] = bkey_u64s(prev); t->tree[j].m = bkey_to_cacheline_offset(t, cacheline++, k); @@ -901,8 +903,10 @@ unsigned int bch_btree_insert_key(struct btree_keys *b, struct bkey *k, status = BTREE_INSERT_STATUS_INSERT; while (m != bset_bkey_last(i) && - bkey_cmp(k, b->ops->is_extents ? &START_KEY(m) : m) > 0) - prev = m, m = bkey_next(m); + bkey_cmp(k, b->ops->is_extents ? &START_KEY(m) : m) > 0) { + prev = m; + m = bkey_next(m); + } /* prev is in the tree, if we merge we're done */ status = BTREE_INSERT_STATUS_BACK_MERGE; diff --git a/drivers/md/bcache/btree.c b/drivers/md/bcache/btree.c index 910df242c83d..fe6dce125aba 100644 --- a/drivers/md/bcache/btree.c +++ b/drivers/md/bcache/btree.c @@ -99,6 +99,8 @@ #define PTR_HASH(c, k) \ (((k)->ptr[0] >> c->bucket_bits) | PTR_GEN(k, 0)) +static struct workqueue_struct *btree_io_wq; + #define insert_lock(s, b) ((b)->level <= (s)->lock) @@ -308,7 +310,7 @@ static void __btree_node_write_done(struct closure *cl) btree_complete_write(b, w); if (btree_node_dirty(b)) - schedule_delayed_work(&b->work, 30 * HZ); + queue_delayed_work(btree_io_wq, &b->work, 30 * HZ); closure_return_with_destructor(cl, btree_node_write_unlock); } @@ -481,7 +483,7 @@ static void bch_btree_leaf_dirty(struct btree *b, atomic_t *journal_ref) BUG_ON(!i->keys); if (!btree_node_dirty(b)) - schedule_delayed_work(&b->work, 30 * HZ); + queue_delayed_work(btree_io_wq, &b->work, 30 * HZ); set_btree_node_dirty(b); @@ -2764,3 +2766,18 @@ void bch_keybuf_init(struct keybuf *buf) spin_lock_init(&buf->lock); array_allocator_init(&buf->freelist); } + +void bch_btree_exit(void) +{ + if (btree_io_wq) + destroy_workqueue(btree_io_wq); +} + +int __init bch_btree_init(void) +{ + btree_io_wq = alloc_workqueue("bch_btree_io", WQ_MEM_RECLAIM, 0); + if (!btree_io_wq) + return -ENOMEM; + + return 0; +} diff --git a/drivers/md/bcache/debug.c b/drivers/md/bcache/debug.c index b00fd08d696b..63e809f38e3f 100644 --- a/drivers/md/bcache/debug.c +++ b/drivers/md/bcache/debug.c @@ -114,7 +114,7 @@ void bch_data_verify(struct cached_dev *dc, struct bio *bio) check = bio_kmalloc(GFP_NOIO, bio_segments(bio)); if (!check) return; - check->bi_disk = bio->bi_disk; + bio_set_dev(check, bio->bi_bdev); check->bi_opf = REQ_OP_READ; check->bi_iter.bi_sector = bio->bi_iter.bi_sector; check->bi_iter.bi_size = bio->bi_iter.bi_size; diff --git a/drivers/md/bcache/features.h b/drivers/md/bcache/features.h index 84fc2c0f0101..d1c8fd3977fc 100644 --- a/drivers/md/bcache/features.h +++ b/drivers/md/bcache/features.h @@ -33,6 +33,8 @@ #define BCH_FEATURE_COMPAT_FUNCS(name, flagname) \ static inline int bch_has_feature_##name(struct cache_sb *sb) \ { \ + if (sb->version < BCACHE_SB_VERSION_CDEV_WITH_FEATURES) \ + return 0; \ return (((sb)->feature_compat & \ BCH##_FEATURE_COMPAT_##flagname) != 0); \ } \ @@ -50,6 +52,8 @@ static inline void bch_clear_feature_##name(struct cache_sb *sb) \ #define BCH_FEATURE_RO_COMPAT_FUNCS(name, flagname) \ static inline int bch_has_feature_##name(struct cache_sb *sb) \ { \ + if (sb->version < BCACHE_SB_VERSION_CDEV_WITH_FEATURES) \ + return 0; \ return (((sb)->feature_ro_compat & \ BCH##_FEATURE_RO_COMPAT_##flagname) != 0); \ } \ @@ -67,6 +71,8 @@ static inline void bch_clear_feature_##name(struct cache_sb *sb) \ #define BCH_FEATURE_INCOMPAT_FUNCS(name, flagname) \ static inline int bch_has_feature_##name(struct cache_sb *sb) \ { \ + if (sb->version < BCACHE_SB_VERSION_CDEV_WITH_FEATURES) \ + return 0; \ return (((sb)->feature_incompat & \ BCH##_FEATURE_INCOMPAT_##flagname) != 0); \ } \ diff --git a/drivers/md/bcache/journal.c b/drivers/md/bcache/journal.c index aefbdb7e003b..c6613e817333 100644 --- a/drivers/md/bcache/journal.c +++ b/drivers/md/bcache/journal.c @@ -932,8 +932,8 @@ atomic_t *bch_journal(struct cache_set *c, journal_try_write(c); } else if (!w->dirty) { w->dirty = true; - schedule_delayed_work(&c->journal.work, - msecs_to_jiffies(c->journal_delay_ms)); + queue_delayed_work(bch_flush_wq, &c->journal.work, + msecs_to_jiffies(c->journal_delay_ms)); spin_unlock(&c->journal.lock); } else { spin_unlock(&c->journal.lock); diff --git a/drivers/md/bcache/request.c b/drivers/md/bcache/request.c index 85b1f2a9b72d..29c231758293 100644 --- a/drivers/md/bcache/request.c +++ b/drivers/md/bcache/request.c @@ -475,7 +475,7 @@ struct search { unsigned int read_dirty_data:1; unsigned int cache_missed:1; - struct block_device *part; + struct block_device *orig_bdev; unsigned long start_time; struct btree_op op; @@ -670,8 +670,8 @@ static void bio_complete(struct search *s) { if (s->orig_bio) { /* Count on bcache device */ - part_end_io_acct(s->part, s->orig_bio, s->start_time); - + bio_end_io_acct_remapped(s->orig_bio, s->start_time, + s->orig_bdev); trace_bcache_request_end(s->d, s->orig_bio); s->orig_bio->bi_status = s->iop.status; bio_endio(s->orig_bio); @@ -714,7 +714,8 @@ static void search_free(struct closure *cl) } static inline struct search *search_alloc(struct bio *bio, - struct bcache_device *d) + struct bcache_device *d, struct block_device *orig_bdev, + unsigned long start_time) { struct search *s; @@ -732,7 +733,8 @@ static inline struct search *search_alloc(struct bio *bio, s->write = op_is_write(bio_op(bio)); s->read_dirty_data = 0; /* Count on the bcache device */ - s->start_time = part_start_io_acct(d->disk, &s->part, bio); + s->orig_bdev = orig_bdev; + s->start_time = start_time; s->iop.c = d->c; s->iop.bio = NULL; s->iop.inode = d->id; @@ -894,7 +896,8 @@ static int cached_dev_cache_miss(struct btree *b, struct search *s, !(bio->bi_opf & (REQ_META|REQ_PRIO)) && s->iop.c->gc_stats.in_use < CUTOFF_CACHE_READA) reada = min_t(sector_t, dc->readahead >> 9, - get_capacity(bio->bi_disk) - bio_end_sector(bio)); + get_capacity(bio->bi_bdev->bd_disk) - + bio_end_sector(bio)); s->insert_bio_sectors = min(sectors, bio_sectors(bio) + reada); @@ -1073,7 +1076,7 @@ struct detached_dev_io_private { unsigned long start_time; bio_end_io_t *bi_end_io; void *bi_private; - struct block_device *part; + struct block_device *orig_bdev; }; static void detached_dev_end_io(struct bio *bio) @@ -1085,7 +1088,7 @@ static void detached_dev_end_io(struct bio *bio) bio->bi_private = ddip->bi_private; /* Count on the bcache device */ - part_end_io_acct(ddip->part, bio, ddip->start_time); + bio_end_io_acct_remapped(bio, ddip->start_time, ddip->orig_bdev); if (bio->bi_status) { struct cached_dev *dc = container_of(ddip->d, @@ -1098,7 +1101,8 @@ static void detached_dev_end_io(struct bio *bio) bio->bi_end_io(bio); } -static void detached_dev_do_request(struct bcache_device *d, struct bio *bio) +static void detached_dev_do_request(struct bcache_device *d, struct bio *bio, + struct block_device *orig_bdev, unsigned long start_time) { struct detached_dev_io_private *ddip; struct cached_dev *dc = container_of(d, struct cached_dev, disk); @@ -1111,7 +1115,8 @@ static void detached_dev_do_request(struct bcache_device *d, struct bio *bio) ddip = kzalloc(sizeof(struct detached_dev_io_private), GFP_NOIO); ddip->d = d; /* Count on the bcache device */ - ddip->start_time = part_start_io_acct(d->disk, &ddip->part, bio); + ddip->orig_bdev = orig_bdev; + ddip->start_time = start_time; ddip->bi_end_io = bio->bi_end_io; ddip->bi_private = bio->bi_private; bio->bi_end_io = detached_dev_end_io; @@ -1167,8 +1172,10 @@ static void quit_max_writeback_rate(struct cache_set *c, blk_qc_t cached_dev_submit_bio(struct bio *bio) { struct search *s; - struct bcache_device *d = bio->bi_disk->private_data; + struct block_device *orig_bdev = bio->bi_bdev; + struct bcache_device *d = orig_bdev->bd_disk->private_data; struct cached_dev *dc = container_of(d, struct cached_dev, disk); + unsigned long start_time; int rw = bio_data_dir(bio); if (unlikely((d->c && test_bit(CACHE_SET_IO_DISABLE, &d->c->flags)) || @@ -1193,11 +1200,13 @@ blk_qc_t cached_dev_submit_bio(struct bio *bio) } } + start_time = bio_start_io_acct(bio); + bio_set_dev(bio, dc->bdev); bio->bi_iter.bi_sector += dc->sb.data_offset; if (cached_dev_get(dc)) { - s = search_alloc(bio, d); + s = search_alloc(bio, d, orig_bdev, start_time); trace_bcache_request_start(s->d, bio); if (!bio->bi_iter.bi_size) { @@ -1218,7 +1227,7 @@ blk_qc_t cached_dev_submit_bio(struct bio *bio) } } else /* I/O request sent to backing device */ - detached_dev_do_request(d, bio); + detached_dev_do_request(d, bio, orig_bdev, start_time); return BLK_QC_T_NONE; } @@ -1274,7 +1283,7 @@ blk_qc_t flash_dev_submit_bio(struct bio *bio) { struct search *s; struct closure *cl; - struct bcache_device *d = bio->bi_disk->private_data; + struct bcache_device *d = bio->bi_bdev->bd_disk->private_data; if (unlikely(d->c && test_bit(CACHE_SET_IO_DISABLE, &d->c->flags))) { bio->bi_status = BLK_STS_IOERR; @@ -1282,7 +1291,7 @@ blk_qc_t flash_dev_submit_bio(struct bio *bio) return BLK_QC_T_NONE; } - s = search_alloc(bio, d); + s = search_alloc(bio, d, bio->bi_bdev, bio_start_io_acct(bio)); cl = &s->cl; bio = &s->bio.bio; diff --git a/drivers/md/bcache/super.c b/drivers/md/bcache/super.c index 2047a9cccdb5..71691f32959b 100644 --- a/drivers/md/bcache/super.c +++ b/drivers/md/bcache/super.c @@ -49,6 +49,7 @@ static int bcache_major; static DEFINE_IDA(bcache_device_idx); static wait_queue_head_t unregister_wait; struct workqueue_struct *bcache_wq; +struct workqueue_struct *bch_flush_wq; struct workqueue_struct *bch_journal_wq; @@ -1939,7 +1940,7 @@ struct cache_set *bch_cache_set_alloc(struct cache_sb *sb) goto err; if (bioset_init(&c->bio_split, 4, offsetof(struct bbio, bio), - BIOSET_NEED_BVECS|BIOSET_NEED_RESCUER)) + BIOSET_NEED_RESCUER)) goto err; c->uuids = alloc_meta_bucket_pages(GFP_KERNEL, sb); @@ -2517,7 +2518,7 @@ out: module_put(THIS_MODULE); } -static void register_device_aync(struct async_reg_args *args) +static void register_device_async(struct async_reg_args *args) { if (SB_IS_BDEV(args->sb)) INIT_DELAYED_WORK(&args->reg_work, register_bdev_worker); @@ -2611,7 +2612,7 @@ static ssize_t register_bcache(struct kobject *k, struct kobj_attribute *attr, args->sb = sb; args->sb_disk = sb_disk; args->bdev = bdev; - register_device_aync(args); + register_device_async(args); /* No wait and returns to user space */ goto async_done; } @@ -2821,6 +2822,9 @@ static void bcache_exit(void) destroy_workqueue(bcache_wq); if (bch_journal_wq) destroy_workqueue(bch_journal_wq); + if (bch_flush_wq) + destroy_workqueue(bch_flush_wq); + bch_btree_exit(); if (bcache_major) unregister_blkdev(bcache_major, "bcache"); @@ -2876,10 +2880,26 @@ static int __init bcache_init(void) return bcache_major; } + if (bch_btree_init()) + goto err; + bcache_wq = alloc_workqueue("bcache", WQ_MEM_RECLAIM, 0); if (!bcache_wq) goto err; + /* + * Let's not make this `WQ_MEM_RECLAIM` for the following reasons: + * + * 1. It used `system_wq` before which also does no memory reclaim. + * 2. With `WQ_MEM_RECLAIM` desktop stalls, increased boot times, and + * reduced throughput can be observed. + * + * We still want to user our own queue to not congest the `system_wq`. + */ + bch_flush_wq = alloc_workqueue("bch_flush", 0, 0); + if (!bch_flush_wq) + goto err; + bch_journal_wq = alloc_workqueue("bch_journal", WQ_MEM_RECLAIM, 0); if (!bch_journal_wq) goto err; diff --git a/drivers/md/bcache/sysfs.c b/drivers/md/bcache/sysfs.c index 00a520c03f41..cc89f3156d1a 100644 --- a/drivers/md/bcache/sysfs.c +++ b/drivers/md/bcache/sysfs.c @@ -117,10 +117,14 @@ rw_attribute(writeback_running); rw_attribute(writeback_percent); rw_attribute(writeback_delay); rw_attribute(writeback_rate); +rw_attribute(writeback_consider_fragment); rw_attribute(writeback_rate_update_seconds); rw_attribute(writeback_rate_i_term_inverse); rw_attribute(writeback_rate_p_term_inverse); +rw_attribute(writeback_rate_fp_term_low); +rw_attribute(writeback_rate_fp_term_mid); +rw_attribute(writeback_rate_fp_term_high); rw_attribute(writeback_rate_minimum); read_attribute(writeback_rate_debug); @@ -195,6 +199,7 @@ SHOW(__bch_cached_dev) var_printf(bypass_torture_test, "%i"); var_printf(writeback_metadata, "%i"); var_printf(writeback_running, "%i"); + var_printf(writeback_consider_fragment, "%i"); var_print(writeback_delay); var_print(writeback_percent); sysfs_hprint(writeback_rate, @@ -205,6 +210,9 @@ SHOW(__bch_cached_dev) var_print(writeback_rate_update_seconds); var_print(writeback_rate_i_term_inverse); var_print(writeback_rate_p_term_inverse); + var_print(writeback_rate_fp_term_low); + var_print(writeback_rate_fp_term_mid); + var_print(writeback_rate_fp_term_high); var_print(writeback_rate_minimum); if (attr == &sysfs_writeback_rate_debug) { @@ -303,6 +311,7 @@ STORE(__cached_dev) sysfs_strtoul_bool(bypass_torture_test, dc->bypass_torture_test); sysfs_strtoul_bool(writeback_metadata, dc->writeback_metadata); sysfs_strtoul_bool(writeback_running, dc->writeback_running); + sysfs_strtoul_bool(writeback_consider_fragment, dc->writeback_consider_fragment); sysfs_strtoul_clamp(writeback_delay, dc->writeback_delay, 0, UINT_MAX); sysfs_strtoul_clamp(writeback_percent, dc->writeback_percent, @@ -331,6 +340,16 @@ STORE(__cached_dev) sysfs_strtoul_clamp(writeback_rate_p_term_inverse, dc->writeback_rate_p_term_inverse, 1, UINT_MAX); + sysfs_strtoul_clamp(writeback_rate_fp_term_low, + dc->writeback_rate_fp_term_low, + 1, dc->writeback_rate_fp_term_mid - 1); + sysfs_strtoul_clamp(writeback_rate_fp_term_mid, + dc->writeback_rate_fp_term_mid, + dc->writeback_rate_fp_term_low + 1, + dc->writeback_rate_fp_term_high - 1); + sysfs_strtoul_clamp(writeback_rate_fp_term_high, + dc->writeback_rate_fp_term_high, + dc->writeback_rate_fp_term_mid + 1, UINT_MAX); sysfs_strtoul_clamp(writeback_rate_minimum, dc->writeback_rate_minimum, 1, UINT_MAX); @@ -499,9 +518,13 @@ static struct attribute *bch_cached_dev_files[] = { &sysfs_writeback_delay, &sysfs_writeback_percent, &sysfs_writeback_rate, + &sysfs_writeback_consider_fragment, &sysfs_writeback_rate_update_seconds, &sysfs_writeback_rate_i_term_inverse, &sysfs_writeback_rate_p_term_inverse, + &sysfs_writeback_rate_fp_term_low, + &sysfs_writeback_rate_fp_term_mid, + &sysfs_writeback_rate_fp_term_high, &sysfs_writeback_rate_minimum, &sysfs_writeback_rate_debug, &sysfs_io_errors, @@ -1071,8 +1094,10 @@ SHOW(__bch_cache) --n; while (cached < p + n && - *cached == BTREE_PRIO) - cached++, n--; + *cached == BTREE_PRIO) { + cached++; + n--; + } for (i = 0; i < n; i++) sum += INITIAL_PRIO - cached[i]; diff --git a/drivers/md/bcache/writeback.c b/drivers/md/bcache/writeback.c index a129e4d2707c..82d4e0880a99 100644 --- a/drivers/md/bcache/writeback.c +++ b/drivers/md/bcache/writeback.c @@ -88,6 +88,44 @@ static void __update_writeback_rate(struct cached_dev *dc) int64_t integral_scaled; uint32_t new_rate; + /* + * We need to consider the number of dirty buckets as well + * when calculating the proportional_scaled, Otherwise we might + * have an unreasonable small writeback rate at a highly fragmented situation + * when very few dirty sectors consumed a lot dirty buckets, the + * worst case is when dirty buckets reached cutoff_writeback_sync and + * dirty data is still not even reached to writeback percent, so the rate + * still will be at the minimum value, which will cause the write + * stuck at a non-writeback mode. + */ + struct cache_set *c = dc->disk.c; + + int64_t dirty_buckets = c->nbuckets - c->avail_nbuckets; + + if (dc->writeback_consider_fragment && + c->gc_stats.in_use > BCH_WRITEBACK_FRAGMENT_THRESHOLD_LOW && dirty > 0) { + int64_t fragment = + div_s64((dirty_buckets * c->cache->sb.bucket_size), dirty); + int64_t fp_term; + int64_t fps; + + if (c->gc_stats.in_use <= BCH_WRITEBACK_FRAGMENT_THRESHOLD_MID) { + fp_term = dc->writeback_rate_fp_term_low * + (c->gc_stats.in_use - BCH_WRITEBACK_FRAGMENT_THRESHOLD_LOW); + } else if (c->gc_stats.in_use <= BCH_WRITEBACK_FRAGMENT_THRESHOLD_HIGH) { + fp_term = dc->writeback_rate_fp_term_mid * + (c->gc_stats.in_use - BCH_WRITEBACK_FRAGMENT_THRESHOLD_MID); + } else { + fp_term = dc->writeback_rate_fp_term_high * + (c->gc_stats.in_use - BCH_WRITEBACK_FRAGMENT_THRESHOLD_HIGH); + } + fps = div_s64(dirty, dirty_buckets) * fp_term; + if (fragment > 3 && fps > proportional_scaled) { + /* Only overrite the p when fragment > 3 */ + proportional_scaled = fps; + } + } + if ((error < 0 && dc->writeback_rate_integral > 0) || (error > 0 && time_before64(local_clock(), dc->writeback_rate.next + NSEC_PER_MSEC))) { @@ -977,6 +1015,7 @@ void bch_cached_dev_writeback_init(struct cached_dev *dc) dc->writeback_metadata = true; dc->writeback_running = false; + dc->writeback_consider_fragment = true; dc->writeback_percent = 10; dc->writeback_delay = 30; atomic_long_set(&dc->writeback_rate.rate, 1024); @@ -984,6 +1023,9 @@ void bch_cached_dev_writeback_init(struct cached_dev *dc) dc->writeback_rate_update_seconds = WRITEBACK_RATE_UPDATE_SECS_DEFAULT; dc->writeback_rate_p_term_inverse = 40; + dc->writeback_rate_fp_term_low = 1; + dc->writeback_rate_fp_term_mid = 10; + dc->writeback_rate_fp_term_high = 1000; dc->writeback_rate_i_term_inverse = 10000; WARN_ON(test_and_clear_bit(BCACHE_DEV_WB_RUNNING, &dc->disk.flags)); diff --git a/drivers/md/bcache/writeback.h b/drivers/md/bcache/writeback.h index 3f1230e22de0..02b2f9df73f6 100644 --- a/drivers/md/bcache/writeback.h +++ b/drivers/md/bcache/writeback.h @@ -16,6 +16,10 @@ #define BCH_AUTO_GC_DIRTY_THRESHOLD 50 +#define BCH_WRITEBACK_FRAGMENT_THRESHOLD_LOW 50 +#define BCH_WRITEBACK_FRAGMENT_THRESHOLD_MID 57 +#define BCH_WRITEBACK_FRAGMENT_THRESHOLD_HIGH 64 + #define BCH_DIRTY_INIT_THRD_MAX 64 /* * 14 (16384ths) is chosen here as something that each backing device diff --git a/drivers/md/dm-bio-record.h b/drivers/md/dm-bio-record.h index 2ea0360108e1..a3b71350eec8 100644 --- a/drivers/md/dm-bio-record.h +++ b/drivers/md/dm-bio-record.h @@ -18,8 +18,7 @@ */ struct dm_bio_details { - struct gendisk *bi_disk; - u8 bi_partno; + struct block_device *bi_bdev; int __bi_remaining; unsigned long bi_flags; struct bvec_iter bi_iter; @@ -31,8 +30,7 @@ struct dm_bio_details { static inline void dm_bio_record(struct dm_bio_details *bd, struct bio *bio) { - bd->bi_disk = bio->bi_disk; - bd->bi_partno = bio->bi_partno; + bd->bi_bdev = bio->bi_bdev; bd->bi_flags = bio->bi_flags; bd->bi_iter = bio->bi_iter; bd->__bi_remaining = atomic_read(&bio->__bi_remaining); @@ -44,8 +42,7 @@ static inline void dm_bio_record(struct dm_bio_details *bd, struct bio *bio) static inline void dm_bio_restore(struct dm_bio_details *bd, struct bio *bio) { - bio->bi_disk = bd->bi_disk; - bio->bi_partno = bd->bi_partno; + bio->bi_bdev = bd->bi_bdev; bio->bi_flags = bd->bi_flags; bio->bi_iter = bd->bi_iter; atomic_set(&bio->__bi_remaining, bd->__bi_remaining); diff --git a/drivers/md/dm-bufio.c b/drivers/md/dm-bufio.c index 9c1a86bde658..fce4cbf9529d 100644 --- a/drivers/md/dm-bufio.c +++ b/drivers/md/dm-bufio.c @@ -1534,6 +1534,12 @@ sector_t dm_bufio_get_device_size(struct dm_bufio_client *c) } EXPORT_SYMBOL_GPL(dm_bufio_get_device_size); +struct dm_io_client *dm_bufio_get_dm_io_client(struct dm_bufio_client *c) +{ + return c->dm_io; +} +EXPORT_SYMBOL_GPL(dm_bufio_get_dm_io_client); + sector_t dm_bufio_get_block_number(struct dm_buffer *b) { return b->block; diff --git a/drivers/md/dm-cache-metadata.c b/drivers/md/dm-cache-metadata.c index af6d4f898e4c..89a73204dbf4 100644 --- a/drivers/md/dm-cache-metadata.c +++ b/drivers/md/dm-cache-metadata.c @@ -449,7 +449,7 @@ static int __check_incompat_features(struct cache_disk_superblock *disk_super, /* * Check for read-only metadata to skip the following RDWR checks. */ - if (get_disk_ro(cmd->bdev->bd_disk)) + if (bdev_read_only(cmd->bdev)) return 0; features = le32_to_cpu(disk_super->compat_ro_flags) & ~DM_CACHE_FEATURE_COMPAT_RO_SUPP; diff --git a/drivers/md/dm-clone-target.c b/drivers/md/dm-clone-target.c index bdb255edc200..a90bdf9b2ca6 100644 --- a/drivers/md/dm-clone-target.c +++ b/drivers/md/dm-clone-target.c @@ -85,12 +85,6 @@ struct clone { struct dm_clone_metadata *cmd; - /* - * bio used to flush the destination device, before committing the - * metadata. - */ - struct bio flush_bio; - /* Region hydration hash table */ struct hash_table_bucket *ht; @@ -1155,11 +1149,7 @@ static int commit_metadata(struct clone *clone, bool *dest_dev_flushed) goto out; } - bio_reset(&clone->flush_bio); - bio_set_dev(&clone->flush_bio, clone->dest_dev->bdev); - clone->flush_bio.bi_opf = REQ_OP_WRITE | REQ_PREFLUSH; - - r = submit_bio_wait(&clone->flush_bio); + r = blkdev_issue_flush(clone->dest_dev->bdev); if (unlikely(r)) { __metadata_operation_failed(clone, "flush destination device", r); goto out; @@ -1886,7 +1876,6 @@ static int clone_ctr(struct dm_target *ti, unsigned int argc, char **argv) bio_list_init(&clone->deferred_flush_completions); clone->hydration_offset = 0; atomic_set(&clone->hydrations_in_flight, 0); - bio_init(&clone->flush_bio, NULL, 0); clone->wq = alloc_workqueue("dm-" DM_MSG_PREFIX, WQ_MEM_RECLAIM, 0); if (!clone->wq) { @@ -1958,7 +1947,6 @@ static void clone_dtr(struct dm_target *ti) struct clone *clone = ti->private; mutex_destroy(&clone->commit_lock); - bio_uninit(&clone->flush_bio); for (i = 0; i < clone->nr_ctr_args; i++) kfree(clone->ctr_args[i]); diff --git a/drivers/md/dm-core.h b/drivers/md/dm-core.h index 086d293c2b03..5953ff2bd260 100644 --- a/drivers/md/dm-core.h +++ b/drivers/md/dm-core.h @@ -13,6 +13,7 @@ #include <linux/ktime.h> #include <linux/genhd.h> #include <linux/blk-mq.h> +#include <linux/keyslot-manager.h> #include <trace/events/block.h> @@ -102,6 +103,10 @@ struct mapped_device { /* kobject and completion */ struct dm_kobject_holder kobj_holder; + int swap_bios; + struct semaphore swap_bios_semaphore; + struct mutex swap_bios_lock; + struct dm_stats stats; /* for blk-mq request-based DM support */ @@ -162,6 +167,10 @@ struct dm_table { void *event_context; struct dm_md_mempools *mempools; + +#ifdef CONFIG_BLK_INLINE_ENCRYPTION + struct blk_keyslot_manager *ksm; +#endif }; static inline struct completion *dm_get_completion_from_kobject(struct kobject *kobj) diff --git a/drivers/md/dm-crypt.c b/drivers/md/dm-crypt.c index 53791138d78b..11c105ecd165 100644 --- a/drivers/md/dm-crypt.c +++ b/drivers/md/dm-crypt.c @@ -37,6 +37,7 @@ #include <linux/key-type.h> #include <keys/user-type.h> #include <keys/encrypted-type.h> +#include <keys/trusted-type.h> #include <linux/device-mapper.h> @@ -133,7 +134,7 @@ enum flags { DM_CRYPT_SUSPENDED, DM_CRYPT_KEY_VALID, DM_CRYPT_WRITE_INLINE }; enum cipher_flags { - CRYPT_MODE_INTEGRITY_AEAD, /* Use authenticated mode for cihper */ + CRYPT_MODE_INTEGRITY_AEAD, /* Use authenticated mode for cipher */ CRYPT_IV_LARGE_SECTORS, /* Calculate IV from sector_size, not 512B sectors */ CRYPT_ENCRYPT_PREPROCESS, /* Must preprocess data for encryption (elephant) */ }; @@ -1454,13 +1455,16 @@ static int crypt_convert_block_skcipher(struct crypt_config *cc, static void kcryptd_async_done(struct crypto_async_request *async_req, int error); -static void crypt_alloc_req_skcipher(struct crypt_config *cc, +static int crypt_alloc_req_skcipher(struct crypt_config *cc, struct convert_context *ctx) { unsigned key_index = ctx->cc_sector & (cc->tfms_count - 1); - if (!ctx->r.req) - ctx->r.req = mempool_alloc(&cc->req_pool, GFP_NOIO); + if (!ctx->r.req) { + ctx->r.req = mempool_alloc(&cc->req_pool, in_interrupt() ? GFP_ATOMIC : GFP_NOIO); + if (!ctx->r.req) + return -ENOMEM; + } skcipher_request_set_tfm(ctx->r.req, cc->cipher_tfm.tfms[key_index]); @@ -1471,13 +1475,18 @@ static void crypt_alloc_req_skcipher(struct crypt_config *cc, skcipher_request_set_callback(ctx->r.req, CRYPTO_TFM_REQ_MAY_BACKLOG, kcryptd_async_done, dmreq_of_req(cc, ctx->r.req)); + + return 0; } -static void crypt_alloc_req_aead(struct crypt_config *cc, +static int crypt_alloc_req_aead(struct crypt_config *cc, struct convert_context *ctx) { - if (!ctx->r.req_aead) - ctx->r.req_aead = mempool_alloc(&cc->req_pool, GFP_NOIO); + if (!ctx->r.req_aead) { + ctx->r.req_aead = mempool_alloc(&cc->req_pool, in_interrupt() ? GFP_ATOMIC : GFP_NOIO); + if (!ctx->r.req_aead) + return -ENOMEM; + } aead_request_set_tfm(ctx->r.req_aead, cc->cipher_tfm.tfms_aead[0]); @@ -1488,15 +1497,17 @@ static void crypt_alloc_req_aead(struct crypt_config *cc, aead_request_set_callback(ctx->r.req_aead, CRYPTO_TFM_REQ_MAY_BACKLOG, kcryptd_async_done, dmreq_of_req(cc, ctx->r.req_aead)); + + return 0; } -static void crypt_alloc_req(struct crypt_config *cc, +static int crypt_alloc_req(struct crypt_config *cc, struct convert_context *ctx) { if (crypt_integrity_aead(cc)) - crypt_alloc_req_aead(cc, ctx); + return crypt_alloc_req_aead(cc, ctx); else - crypt_alloc_req_skcipher(cc, ctx); + return crypt_alloc_req_skcipher(cc, ctx); } static void crypt_free_req_skcipher(struct crypt_config *cc, @@ -1529,17 +1540,28 @@ static void crypt_free_req(struct crypt_config *cc, void *req, struct bio *base_ * Encrypt / decrypt data from one bio to another one (can be the same one) */ static blk_status_t crypt_convert(struct crypt_config *cc, - struct convert_context *ctx, bool atomic) + struct convert_context *ctx, bool atomic, bool reset_pending) { unsigned int tag_offset = 0; unsigned int sector_step = cc->sector_size >> SECTOR_SHIFT; int r; - atomic_set(&ctx->cc_pending, 1); + /* + * if reset_pending is set we are dealing with the bio for the first time, + * else we're continuing to work on the previous bio, so don't mess with + * the cc_pending counter + */ + if (reset_pending) + atomic_set(&ctx->cc_pending, 1); while (ctx->iter_in.bi_size && ctx->iter_out.bi_size) { - crypt_alloc_req(cc, ctx); + r = crypt_alloc_req(cc, ctx); + if (r) { + complete(&ctx->restart); + return BLK_STS_DEV_RESOURCE; + } + atomic_inc(&ctx->cc_pending); if (crypt_integrity_aead(cc)) @@ -1553,7 +1575,25 @@ static blk_status_t crypt_convert(struct crypt_config *cc, * but the driver request queue is full, let's wait. */ case -EBUSY: - wait_for_completion(&ctx->restart); + if (in_interrupt()) { + if (try_wait_for_completion(&ctx->restart)) { + /* + * we don't have to block to wait for completion, + * so proceed + */ + } else { + /* + * we can't wait for completion without blocking + * exit and continue processing in a workqueue + */ + ctx->r.req = NULL; + ctx->cc_sector += sector_step; + tag_offset++; + return BLK_STS_DEV_RESOURCE; + } + } else { + wait_for_completion(&ctx->restart); + } reinit_completion(&ctx->restart); fallthrough; /* @@ -1691,6 +1731,12 @@ static void crypt_inc_pending(struct dm_crypt_io *io) atomic_inc(&io->io_pending); } +static void kcryptd_io_bio_endio(struct work_struct *work) +{ + struct dm_crypt_io *io = container_of(work, struct dm_crypt_io, work); + bio_endio(io->base_bio); +} + /* * One of the bios was finished. Check for completion of * the whole request and correctly clean up the buffer. @@ -1713,7 +1759,23 @@ static void crypt_dec_pending(struct dm_crypt_io *io) kfree(io->integrity_metadata); base_bio->bi_status = error; - bio_endio(base_bio); + + /* + * If we are running this function from our tasklet, + * we can't call bio_endio() here, because it will call + * clone_endio() from dm.c, which in turn will + * free the current struct dm_crypt_io structure with + * our tasklet. In this case we need to delay bio_endio() + * execution to after the tasklet is done and dequeued. + */ + if (tasklet_trylock(&io->tasklet)) { + tasklet_unlock(&io->tasklet); + bio_endio(base_bio); + return; + } + + INIT_WORK(&io->work, kcryptd_io_bio_endio); + queue_work(cc->io_queue, &io->work); } /* @@ -1945,6 +2007,37 @@ static bool kcryptd_crypt_write_inline(struct crypt_config *cc, } } +static void kcryptd_crypt_write_continue(struct work_struct *work) +{ + struct dm_crypt_io *io = container_of(work, struct dm_crypt_io, work); + struct crypt_config *cc = io->cc; + struct convert_context *ctx = &io->ctx; + int crypt_finished; + sector_t sector = io->sector; + blk_status_t r; + + wait_for_completion(&ctx->restart); + reinit_completion(&ctx->restart); + + r = crypt_convert(cc, &io->ctx, true, false); + if (r) + io->error = r; + crypt_finished = atomic_dec_and_test(&ctx->cc_pending); + if (!crypt_finished && kcryptd_crypt_write_inline(cc, ctx)) { + /* Wait for completion signaled by kcryptd_async_done() */ + wait_for_completion(&ctx->restart); + crypt_finished = 1; + } + + /* Encryption was already finished, submit io now */ + if (crypt_finished) { + kcryptd_crypt_write_io_submit(io, 0); + io->sector = sector; + } + + crypt_dec_pending(io); +} + static void kcryptd_crypt_write_convert(struct dm_crypt_io *io) { struct crypt_config *cc = io->cc; @@ -1973,7 +2066,17 @@ static void kcryptd_crypt_write_convert(struct dm_crypt_io *io) crypt_inc_pending(io); r = crypt_convert(cc, ctx, - test_bit(DM_CRYPT_NO_WRITE_WORKQUEUE, &cc->flags)); + test_bit(DM_CRYPT_NO_WRITE_WORKQUEUE, &cc->flags), true); + /* + * Crypto API backlogged the request, because its queue was full + * and we're in softirq context, so continue from a workqueue + * (TODO: is it actually possible to be in softirq in the write path?) + */ + if (r == BLK_STS_DEV_RESOURCE) { + INIT_WORK(&io->work, kcryptd_crypt_write_continue); + queue_work(cc->crypt_queue, &io->work); + return; + } if (r) io->error = r; crypt_finished = atomic_dec_and_test(&ctx->cc_pending); @@ -1998,6 +2101,25 @@ static void kcryptd_crypt_read_done(struct dm_crypt_io *io) crypt_dec_pending(io); } +static void kcryptd_crypt_read_continue(struct work_struct *work) +{ + struct dm_crypt_io *io = container_of(work, struct dm_crypt_io, work); + struct crypt_config *cc = io->cc; + blk_status_t r; + + wait_for_completion(&io->ctx.restart); + reinit_completion(&io->ctx.restart); + + r = crypt_convert(cc, &io->ctx, true, false); + if (r) + io->error = r; + + if (atomic_dec_and_test(&io->ctx.cc_pending)) + kcryptd_crypt_read_done(io); + + crypt_dec_pending(io); +} + static void kcryptd_crypt_read_convert(struct dm_crypt_io *io) { struct crypt_config *cc = io->cc; @@ -2009,7 +2131,16 @@ static void kcryptd_crypt_read_convert(struct dm_crypt_io *io) io->sector); r = crypt_convert(cc, &io->ctx, - test_bit(DM_CRYPT_NO_READ_WORKQUEUE, &cc->flags)); + test_bit(DM_CRYPT_NO_READ_WORKQUEUE, &cc->flags), true); + /* + * Crypto API backlogged the request, because its queue was full + * and we're in softirq context, so continue from a workqueue + */ + if (r == BLK_STS_DEV_RESOURCE) { + INIT_WORK(&io->work, kcryptd_crypt_read_continue); + queue_work(cc->crypt_queue, &io->work); + return; + } if (r) io->error = r; @@ -2091,8 +2222,12 @@ static void kcryptd_queue_crypt(struct dm_crypt_io *io) if ((bio_data_dir(io->base_bio) == READ && test_bit(DM_CRYPT_NO_READ_WORKQUEUE, &cc->flags)) || (bio_data_dir(io->base_bio) == WRITE && test_bit(DM_CRYPT_NO_WRITE_WORKQUEUE, &cc->flags))) { - if (in_irq()) { - /* Crypto API's "skcipher_walk_first() refuses to work in hard IRQ context */ + /* + * in_irq(): Crypto API's skcipher_walk_first() refuses to work in hard IRQ context. + * irqs_disabled(): the kernel may run some IO completion from the idle thread, but + * it is being executed with irqs disabled. + */ + if (in_irq() || irqs_disabled()) { tasklet_init(&io->tasklet, kcryptd_crypt_tasklet, (unsigned long)&io->work); tasklet_schedule(&io->tasklet); return; @@ -2302,7 +2437,6 @@ static int set_key_user(struct crypt_config *cc, struct key *key) return 0; } -#if defined(CONFIG_ENCRYPTED_KEYS) || defined(CONFIG_ENCRYPTED_KEYS_MODULE) static int set_key_encrypted(struct crypt_config *cc, struct key *key) { const struct encrypted_key_payload *ekp; @@ -2318,7 +2452,22 @@ static int set_key_encrypted(struct crypt_config *cc, struct key *key) return 0; } -#endif /* CONFIG_ENCRYPTED_KEYS */ + +static int set_key_trusted(struct crypt_config *cc, struct key *key) +{ + const struct trusted_key_payload *tkp; + + tkp = key->payload.data[0]; + if (!tkp) + return -EKEYREVOKED; + + if (cc->key_size != tkp->key_len) + return -EINVAL; + + memcpy(cc->key, tkp->key, cc->key_size); + + return 0; +} static int crypt_set_keyring_key(struct crypt_config *cc, const char *key_string) { @@ -2348,11 +2497,14 @@ static int crypt_set_keyring_key(struct crypt_config *cc, const char *key_string } else if (!strncmp(key_string, "user:", key_desc - key_string + 1)) { type = &key_type_user; set_key = set_key_user; -#if defined(CONFIG_ENCRYPTED_KEYS) || defined(CONFIG_ENCRYPTED_KEYS_MODULE) - } else if (!strncmp(key_string, "encrypted:", key_desc - key_string + 1)) { + } else if (IS_ENABLED(CONFIG_ENCRYPTED_KEYS) && + !strncmp(key_string, "encrypted:", key_desc - key_string + 1)) { type = &key_type_encrypted; set_key = set_key_encrypted; -#endif + } else if (IS_ENABLED(CONFIG_TRUSTED_KEYS) && + !strncmp(key_string, "trusted:", key_desc - key_string + 1)) { + type = &key_type_trusted; + set_key = set_key_trusted; } else { return -EINVAL; } @@ -2982,7 +3134,6 @@ static int crypt_ctr_optional(struct dm_target *ti, unsigned int argc, char **ar } #ifdef CONFIG_BLK_DEV_ZONED - static int crypt_report_zones(struct dm_target *ti, struct dm_report_zones_args *args, unsigned int nr_zones) { @@ -2993,7 +3144,8 @@ static int crypt_report_zones(struct dm_target *ti, return blkdev_report_zones(cc->dev->bdev, sector, nr_zones, dm_report_zones_cb, args); } - +#else +#define crypt_report_zones NULL #endif /* @@ -3190,6 +3342,7 @@ static int crypt_ctr(struct dm_target *ti, unsigned int argc, char **argv) wake_up_process(cc->write_thread); ti->num_flush_bios = 1; + ti->limit_swap_bios = true; return 0; @@ -3424,14 +3577,12 @@ static void crypt_io_hints(struct dm_target *ti, struct queue_limits *limits) static struct target_type crypt_target = { .name = "crypt", - .version = {1, 22, 0}, + .version = {1, 23, 0}, .module = THIS_MODULE, .ctr = crypt_ctr, .dtr = crypt_dtr, -#ifdef CONFIG_BLK_DEV_ZONED .features = DM_TARGET_ZONED_HM, .report_zones = crypt_report_zones, -#endif .map = crypt_map, .status = crypt_status, .postsuspend = crypt_postsuspend, diff --git a/drivers/md/dm-dust.c b/drivers/md/dm-dust.c index 072ea913cebc..cbe1058ee589 100644 --- a/drivers/md/dm-dust.c +++ b/drivers/md/dm-dust.c @@ -130,7 +130,7 @@ static int dust_add_block(struct dust_device *dd, unsigned long long block, dd->badblock_count++; if (!dd->quiet_mode) { - DMINFO("%s: badblock added at block %llu with write fail count %hhu", + DMINFO("%s: badblock added at block %llu with write fail count %u", __func__, block, wr_fail_cnt); } spin_unlock_irqrestore(&dd->dust_lock, flags); diff --git a/drivers/md/dm-era-target.c b/drivers/md/dm-era-target.c index b24e3839bb3a..d9ac7372108c 100644 --- a/drivers/md/dm-era-target.c +++ b/drivers/md/dm-era-target.c @@ -47,6 +47,7 @@ struct writeset { static void writeset_free(struct writeset *ws) { vfree(ws->bits); + ws->bits = NULL; } static int setup_on_disk_bitset(struct dm_disk_bitset *info, @@ -71,8 +72,6 @@ static size_t bitset_size(unsigned nr_bits) */ static int writeset_alloc(struct writeset *ws, dm_block_t nr_blocks) { - ws->md.nr_bits = nr_blocks; - ws->md.root = INVALID_WRITESET_ROOT; ws->bits = vzalloc(bitset_size(nr_blocks)); if (!ws->bits) { DMERR("%s: couldn't allocate in memory bitset", __func__); @@ -85,12 +84,14 @@ static int writeset_alloc(struct writeset *ws, dm_block_t nr_blocks) /* * Wipes the in-core bitset, and creates a new on disk bitset. */ -static int writeset_init(struct dm_disk_bitset *info, struct writeset *ws) +static int writeset_init(struct dm_disk_bitset *info, struct writeset *ws, + dm_block_t nr_blocks) { int r; - memset(ws->bits, 0, bitset_size(ws->md.nr_bits)); + memset(ws->bits, 0, bitset_size(nr_blocks)); + ws->md.nr_bits = nr_blocks; r = setup_on_disk_bitset(info, ws->md.nr_bits, &ws->md.root); if (r) { DMERR("%s: setup_on_disk_bitset failed", __func__); @@ -134,7 +135,7 @@ static int writeset_test_and_set(struct dm_disk_bitset *info, { int r; - if (!test_and_set_bit(block, ws->bits)) { + if (!test_bit(block, ws->bits)) { r = dm_bitset_set_bit(info, ws->md.root, block, &ws->md.root); if (r) { /* FIXME: fail mode */ @@ -388,7 +389,7 @@ static void ws_dec(void *context, const void *value) static int ws_eq(void *context, const void *value1, const void *value2) { - return !memcmp(value1, value2, sizeof(struct writeset_metadata)); + return !memcmp(value1, value2, sizeof(struct writeset_disk)); } /*----------------------------------------------------------------*/ @@ -564,6 +565,15 @@ static int open_metadata(struct era_metadata *md) } disk = dm_block_data(sblock); + + /* Verify the data block size hasn't changed */ + if (le32_to_cpu(disk->data_block_size) != md->block_size) { + DMERR("changing the data block size (from %u to %llu) is not supported", + le32_to_cpu(disk->data_block_size), md->block_size); + r = -EINVAL; + goto bad; + } + r = dm_tm_open_with_sm(md->bm, SUPERBLOCK_LOCATION, disk->metadata_space_map_root, sizeof(disk->metadata_space_map_root), @@ -575,10 +585,10 @@ static int open_metadata(struct era_metadata *md) setup_infos(md); - md->block_size = le32_to_cpu(disk->data_block_size); md->nr_blocks = le32_to_cpu(disk->nr_blocks); md->current_era = le32_to_cpu(disk->current_era); + ws_unpack(&disk->current_writeset, &md->current_writeset->md); md->writeset_tree_root = le64_to_cpu(disk->writeset_tree_root); md->era_array_root = le64_to_cpu(disk->era_array_root); md->metadata_snap = le64_to_cpu(disk->metadata_snap); @@ -746,6 +756,12 @@ static int metadata_digest_lookup_writeset(struct era_metadata *md, ws_unpack(&disk, &d->writeset); d->value = cpu_to_le32(key); + /* + * We initialise another bitset info to avoid any caching side effects + * with the previous one. + */ + dm_disk_bitset_init(md->tm, &d->info); + d->nr_bits = min(d->writeset.nr_bits, md->nr_blocks); d->current_bit = 0; d->step = metadata_digest_transcribe_writeset; @@ -759,12 +775,6 @@ static int metadata_digest_start(struct era_metadata *md, struct digest *d) return 0; memset(d, 0, sizeof(*d)); - - /* - * We initialise another bitset info to avoid any caching side - * effects with the previous one. - */ - dm_disk_bitset_init(md->tm, &d->info); d->step = metadata_digest_lookup_writeset; return 0; @@ -802,6 +812,8 @@ static struct era_metadata *metadata_open(struct block_device *bdev, static void metadata_close(struct era_metadata *md) { + writeset_free(&md->writesets[0]); + writeset_free(&md->writesets[1]); destroy_persistent_data_objects(md); kfree(md); } @@ -839,6 +851,7 @@ static int metadata_resize(struct era_metadata *md, void *arg) r = writeset_alloc(&md->writesets[1], *new_size); if (r) { DMERR("%s: writeset_alloc failed for writeset 1", __func__); + writeset_free(&md->writesets[0]); return r; } @@ -849,6 +862,8 @@ static int metadata_resize(struct era_metadata *md, void *arg) &value, &md->era_array_root); if (r) { DMERR("%s: dm_array_resize failed", __func__); + writeset_free(&md->writesets[0]); + writeset_free(&md->writesets[1]); return r; } @@ -870,7 +885,6 @@ static int metadata_era_archive(struct era_metadata *md) } ws_pack(&md->current_writeset->md, &value); - md->current_writeset->md.root = INVALID_WRITESET_ROOT; keys[0] = md->current_era; __dm_bless_for_disk(&value); @@ -882,6 +896,7 @@ static int metadata_era_archive(struct era_metadata *md) return r; } + md->current_writeset->md.root = INVALID_WRITESET_ROOT; md->archived_writesets = true; return 0; @@ -898,7 +913,7 @@ static int metadata_new_era(struct era_metadata *md) int r; struct writeset *new_writeset = next_writeset(md); - r = writeset_init(&md->bitset_info, new_writeset); + r = writeset_init(&md->bitset_info, new_writeset, md->nr_blocks); if (r) { DMERR("%s: writeset_init failed", __func__); return r; @@ -951,7 +966,7 @@ static int metadata_commit(struct era_metadata *md) int r; struct dm_block *sblock; - if (md->current_writeset->md.root != SUPERBLOCK_LOCATION) { + if (md->current_writeset->md.root != INVALID_WRITESET_ROOT) { r = dm_bitset_flush(&md->bitset_info, md->current_writeset->md.root, &md->current_writeset->md.root); if (r) { @@ -1225,8 +1240,10 @@ static void process_deferred_bios(struct era *era) int r; struct bio_list deferred_bios, marked_bios; struct bio *bio; + struct blk_plug plug; bool commit_needed = false; bool failed = false; + struct writeset *ws = era->md->current_writeset; bio_list_init(&deferred_bios); bio_list_init(&marked_bios); @@ -1236,9 +1253,11 @@ static void process_deferred_bios(struct era *era) bio_list_init(&era->deferred_bios); spin_unlock(&era->deferred_lock); + if (bio_list_empty(&deferred_bios)) + return; + while ((bio = bio_list_pop(&deferred_bios))) { - r = writeset_test_and_set(&era->md->bitset_info, - era->md->current_writeset, + r = writeset_test_and_set(&era->md->bitset_info, ws, get_block(era, bio)); if (r < 0) { /* @@ -1246,7 +1265,6 @@ static void process_deferred_bios(struct era *era) * FIXME: finish. */ failed = true; - } else if (r == 0) commit_needed = true; @@ -1262,9 +1280,19 @@ static void process_deferred_bios(struct era *era) if (failed) while ((bio = bio_list_pop(&marked_bios))) bio_io_error(bio); - else - while ((bio = bio_list_pop(&marked_bios))) + else { + blk_start_plug(&plug); + while ((bio = bio_list_pop(&marked_bios))) { + /* + * Only update the in-core writeset if the on-disk one + * was updated too. + */ + if (commit_needed) + set_bit(get_block(era, bio), ws->bits); submit_bio_noacct(bio); + } + blk_finish_plug(&plug); + } } static void process_rpc_calls(struct era *era) @@ -1473,15 +1501,6 @@ static int era_ctr(struct dm_target *ti, unsigned argc, char **argv) } era->md = md; - era->nr_blocks = calc_nr_blocks(era); - - r = metadata_resize(era->md, &era->nr_blocks); - if (r) { - ti->error = "couldn't resize metadata"; - era_destroy(era); - return -ENOMEM; - } - era->wq = alloc_ordered_workqueue("dm-" DM_MSG_PREFIX, WQ_MEM_RECLAIM); if (!era->wq) { ti->error = "could not create workqueue for metadata object"; @@ -1556,16 +1575,24 @@ static int era_preresume(struct dm_target *ti) dm_block_t new_size = calc_nr_blocks(era); if (era->nr_blocks != new_size) { - r = in_worker1(era, metadata_resize, &new_size); - if (r) + r = metadata_resize(era->md, &new_size); + if (r) { + DMERR("%s: metadata_resize failed", __func__); + return r; + } + + r = metadata_commit(era->md); + if (r) { + DMERR("%s: metadata_commit failed", __func__); return r; + } era->nr_blocks = new_size; } start_worker(era); - r = in_worker0(era, metadata_new_era); + r = in_worker0(era, metadata_era_rollover); if (r) { DMERR("%s: metadata_era_rollover failed", __func__); return r; diff --git a/drivers/md/dm-flakey.c b/drivers/md/dm-flakey.c index a2cc9e45cbba..b7fee9936f05 100644 --- a/drivers/md/dm-flakey.c +++ b/drivers/md/dm-flakey.c @@ -469,6 +469,8 @@ static int flakey_report_zones(struct dm_target *ti, return blkdev_report_zones(fc->dev->bdev, sector, nr_zones, dm_report_zones_cb, args); } +#else +#define flakey_report_zones NULL #endif static int flakey_iterate_devices(struct dm_target *ti, iterate_devices_callout_fn fn, void *data) @@ -481,10 +483,8 @@ static int flakey_iterate_devices(struct dm_target *ti, iterate_devices_callout_ static struct target_type flakey_target = { .name = "flakey", .version = {1, 5, 0}, -#ifdef CONFIG_BLK_DEV_ZONED - .features = DM_TARGET_ZONED_HM, + .features = DM_TARGET_ZONED_HM | DM_TARGET_PASSES_CRYPTO, .report_zones = flakey_report_zones, -#endif .module = THIS_MODULE, .ctr = flakey_ctr, .dtr = flakey_dtr, diff --git a/drivers/md/dm-integrity.c b/drivers/md/dm-integrity.c index 5a7a1b90e671..46b5d542b8fe 100644 --- a/drivers/md/dm-integrity.c +++ b/drivers/md/dm-integrity.c @@ -40,6 +40,7 @@ #define BITMAP_BLOCK_SIZE 4096 /* don't change it */ #define BITMAP_FLUSH_INTERVAL (10 * HZ) #define DISCARD_FILLER 0xf6 +#define SALT_SIZE 16 /* * Warning - DEBUG_PRINT prints security-sensitive data to the log, @@ -57,6 +58,7 @@ #define SB_VERSION_2 2 #define SB_VERSION_3 3 #define SB_VERSION_4 4 +#define SB_VERSION_5 5 #define SB_SECTORS 8 #define MAX_SECTORS_PER_BLOCK 8 @@ -72,12 +74,15 @@ struct superblock { __u8 log2_blocks_per_bitmap_bit; __u8 pad[2]; __u64 recalc_sector; + __u8 pad2[8]; + __u8 salt[SALT_SIZE]; }; #define SB_FLAG_HAVE_JOURNAL_MAC 0x1 #define SB_FLAG_RECALCULATING 0x2 #define SB_FLAG_DIRTY_BITMAP 0x4 #define SB_FLAG_FIXED_PADDING 0x8 +#define SB_FLAG_FIXED_HMAC 0x10 #define JOURNAL_ENTRY_ROUNDUP 8 @@ -257,8 +262,10 @@ struct dm_integrity_c { bool journal_uptodate; bool just_formatted; bool recalculate_flag; - bool fix_padding; bool discard; + bool fix_padding; + bool fix_hmac; + bool legacy_recalculate; struct alg_spec internal_hash_alg; struct alg_spec journal_crypt_alg; @@ -386,6 +393,17 @@ static int dm_integrity_failed(struct dm_integrity_c *ic) return READ_ONCE(ic->failed); } +static bool dm_integrity_disable_recalculate(struct dm_integrity_c *ic) +{ + if (ic->legacy_recalculate) + return false; + if (!(ic->sb->flags & cpu_to_le32(SB_FLAG_FIXED_HMAC)) ? + ic->internal_hash_alg.key || ic->journal_mac_alg.key : + ic->internal_hash_alg.key && !ic->journal_mac_alg.key) + return true; + return false; +} + static commit_id_t dm_integrity_commit_id(struct dm_integrity_c *ic, unsigned i, unsigned j, unsigned char seq) { @@ -468,7 +486,9 @@ static void wraparound_section(struct dm_integrity_c *ic, unsigned *sec_ptr) static void sb_set_version(struct dm_integrity_c *ic) { - if (ic->sb->flags & cpu_to_le32(SB_FLAG_FIXED_PADDING)) + if (ic->sb->flags & cpu_to_le32(SB_FLAG_FIXED_HMAC)) + ic->sb->version = SB_VERSION_5; + else if (ic->sb->flags & cpu_to_le32(SB_FLAG_FIXED_PADDING)) ic->sb->version = SB_VERSION_4; else if (ic->mode == 'B' || ic->sb->flags & cpu_to_le32(SB_FLAG_DIRTY_BITMAP)) ic->sb->version = SB_VERSION_3; @@ -478,10 +498,58 @@ static void sb_set_version(struct dm_integrity_c *ic) ic->sb->version = SB_VERSION_1; } +static int sb_mac(struct dm_integrity_c *ic, bool wr) +{ + SHASH_DESC_ON_STACK(desc, ic->journal_mac); + int r; + unsigned size = crypto_shash_digestsize(ic->journal_mac); + + if (sizeof(struct superblock) + size > 1 << SECTOR_SHIFT) { + dm_integrity_io_error(ic, "digest is too long", -EINVAL); + return -EINVAL; + } + + desc->tfm = ic->journal_mac; + + r = crypto_shash_init(desc); + if (unlikely(r < 0)) { + dm_integrity_io_error(ic, "crypto_shash_init", r); + return r; + } + + r = crypto_shash_update(desc, (__u8 *)ic->sb, (1 << SECTOR_SHIFT) - size); + if (unlikely(r < 0)) { + dm_integrity_io_error(ic, "crypto_shash_update", r); + return r; + } + + if (likely(wr)) { + r = crypto_shash_final(desc, (__u8 *)ic->sb + (1 << SECTOR_SHIFT) - size); + if (unlikely(r < 0)) { + dm_integrity_io_error(ic, "crypto_shash_final", r); + return r; + } + } else { + __u8 result[HASH_MAX_DIGESTSIZE]; + r = crypto_shash_final(desc, result); + if (unlikely(r < 0)) { + dm_integrity_io_error(ic, "crypto_shash_final", r); + return r; + } + if (memcmp((__u8 *)ic->sb + (1 << SECTOR_SHIFT) - size, result, size)) { + dm_integrity_io_error(ic, "superblock mac", -EILSEQ); + return -EILSEQ; + } + } + + return 0; +} + static int sync_rw_sb(struct dm_integrity_c *ic, int op, int op_flags) { struct dm_io_request io_req; struct dm_io_region io_loc; + int r; io_req.bi_op = op; io_req.bi_op_flags = op_flags; @@ -493,10 +561,28 @@ static int sync_rw_sb(struct dm_integrity_c *ic, int op, int op_flags) io_loc.sector = ic->start; io_loc.count = SB_SECTORS; - if (op == REQ_OP_WRITE) + if (op == REQ_OP_WRITE) { sb_set_version(ic); + if (ic->journal_mac && ic->sb->flags & cpu_to_le32(SB_FLAG_FIXED_HMAC)) { + r = sb_mac(ic, true); + if (unlikely(r)) + return r; + } + } + + r = dm_io(&io_req, 1, &io_loc, NULL); + if (unlikely(r)) + return r; + + if (op == REQ_OP_READ) { + if (ic->mode != 'R' && ic->journal_mac && ic->sb->flags & cpu_to_le32(SB_FLAG_FIXED_HMAC)) { + r = sb_mac(ic, false); + if (unlikely(r)) + return r; + } + } - return dm_io(&io_req, 1, &io_loc, NULL); + return 0; } #define BITMAP_OP_TEST_ALL_SET 0 @@ -713,15 +799,32 @@ static void section_mac(struct dm_integrity_c *ic, unsigned section, __u8 result desc->tfm = ic->journal_mac; r = crypto_shash_init(desc); - if (unlikely(r)) { + if (unlikely(r < 0)) { dm_integrity_io_error(ic, "crypto_shash_init", r); goto err; } + if (ic->sb->flags & cpu_to_le32(SB_FLAG_FIXED_HMAC)) { + uint64_t section_le; + + r = crypto_shash_update(desc, (__u8 *)&ic->sb->salt, SALT_SIZE); + if (unlikely(r < 0)) { + dm_integrity_io_error(ic, "crypto_shash_update", r); + goto err; + } + + section_le = cpu_to_le64(section); + r = crypto_shash_update(desc, (__u8 *)§ion_le, sizeof section_le); + if (unlikely(r < 0)) { + dm_integrity_io_error(ic, "crypto_shash_update", r); + goto err; + } + } + for (j = 0; j < ic->journal_section_entries; j++) { struct journal_entry *je = access_journal_entry(ic, section, j); r = crypto_shash_update(desc, (__u8 *)&je->u.sector, sizeof je->u.sector); - if (unlikely(r)) { + if (unlikely(r < 0)) { dm_integrity_io_error(ic, "crypto_shash_update", r); goto err; } @@ -731,7 +834,7 @@ static void section_mac(struct dm_integrity_c *ic, unsigned section, __u8 result if (likely(size <= JOURNAL_MAC_SIZE)) { r = crypto_shash_final(desc, result); - if (unlikely(r)) { + if (unlikely(r < 0)) { dm_integrity_io_error(ic, "crypto_shash_final", r); goto err; } @@ -744,7 +847,7 @@ static void section_mac(struct dm_integrity_c *ic, unsigned section, __u8 result goto err; } r = crypto_shash_final(desc, digest); - if (unlikely(r)) { + if (unlikely(r < 0)) { dm_integrity_io_error(ic, "crypto_shash_final", r); goto err; } @@ -1379,12 +1482,52 @@ thorough_test: #undef MAY_BE_HASH } -static void dm_integrity_flush_buffers(struct dm_integrity_c *ic) +struct flush_request { + struct dm_io_request io_req; + struct dm_io_region io_reg; + struct dm_integrity_c *ic; + struct completion comp; +}; + +static void flush_notify(unsigned long error, void *fr_) +{ + struct flush_request *fr = fr_; + if (unlikely(error != 0)) + dm_integrity_io_error(fr->ic, "flushing disk cache", -EIO); + complete(&fr->comp); +} + +static void dm_integrity_flush_buffers(struct dm_integrity_c *ic, bool flush_data) { int r; + + struct flush_request fr; + + if (!ic->meta_dev) + flush_data = false; + if (flush_data) { + fr.io_req.bi_op = REQ_OP_WRITE, + fr.io_req.bi_op_flags = REQ_PREFLUSH | REQ_SYNC, + fr.io_req.mem.type = DM_IO_KMEM, + fr.io_req.mem.ptr.addr = NULL, + fr.io_req.notify.fn = flush_notify, + fr.io_req.notify.context = &fr; + fr.io_req.client = dm_bufio_get_dm_io_client(ic->bufio), + fr.io_reg.bdev = ic->dev->bdev, + fr.io_reg.sector = 0, + fr.io_reg.count = 0, + fr.ic = ic; + init_completion(&fr.comp); + r = dm_io(&fr.io_req, 1, &fr.io_reg, NULL); + BUG_ON(r); + } + r = dm_bufio_write_dirty_buffers(ic->bufio); if (unlikely(r)) dm_integrity_io_error(ic, "writing tags", r); + + if (flush_data) + wait_for_completion(&fr.comp); } static void sleep_on_endio_wait(struct dm_integrity_c *ic) @@ -1507,6 +1650,14 @@ static void integrity_sector_checksum(struct dm_integrity_c *ic, sector_t sector goto failed; } + if (ic->sb->flags & cpu_to_le32(SB_FLAG_FIXED_HMAC)) { + r = crypto_shash_update(req, (__u8 *)&ic->sb->salt, SALT_SIZE); + if (unlikely(r < 0)) { + dm_integrity_io_error(ic, "crypto_shash_update", r); + goto failed; + } + } + r = crypto_shash_update(req, (const __u8 *)§or_le, sizeof sector_le); if (unlikely(r < 0)) { dm_integrity_io_error(ic, "crypto_shash_update", r); @@ -2110,7 +2261,7 @@ offload_to_thread: if (unlikely(dio->op == REQ_OP_DISCARD) && likely(ic->mode != 'D')) { integrity_metadata(&dio->work); - dm_integrity_flush_buffers(ic); + dm_integrity_flush_buffers(ic, false); dio->in_flight = (atomic_t)ATOMIC_INIT(1); dio->completion = NULL; @@ -2195,7 +2346,7 @@ static void integrity_commit(struct work_struct *w) flushes = bio_list_get(&ic->flush_bio_list); if (unlikely(ic->mode != 'J')) { spin_unlock_irq(&ic->endio_wait.lock); - dm_integrity_flush_buffers(ic); + dm_integrity_flush_buffers(ic, true); goto release_flush_bios; } @@ -2409,7 +2560,7 @@ skip_io: complete_journal_op(&comp); wait_for_completion_io(&comp.comp); - dm_integrity_flush_buffers(ic); + dm_integrity_flush_buffers(ic, true); } static void integrity_writer(struct work_struct *w) @@ -2451,7 +2602,7 @@ static void recalc_write_super(struct dm_integrity_c *ic) { int r; - dm_integrity_flush_buffers(ic); + dm_integrity_flush_buffers(ic, false); if (dm_integrity_failed(ic)) return; @@ -2654,7 +2805,7 @@ static void bitmap_flush_work(struct work_struct *work) unsigned long limit; struct bio *bio; - dm_integrity_flush_buffers(ic); + dm_integrity_flush_buffers(ic, false); range.logical_sector = 0; range.n_sectors = ic->provided_data_sectors; @@ -2663,9 +2814,7 @@ static void bitmap_flush_work(struct work_struct *work) add_new_range_and_wait(ic, &range); spin_unlock_irq(&ic->endio_wait.lock); - dm_integrity_flush_buffers(ic); - if (ic->meta_dev) - blkdev_issue_flush(ic->dev->bdev, GFP_NOIO); + dm_integrity_flush_buffers(ic, true); limit = ic->provided_data_sectors; if (ic->sb->flags & cpu_to_le32(SB_FLAG_RECALCULATING)) { @@ -2934,11 +3083,11 @@ static void dm_integrity_postsuspend(struct dm_target *ti) if (ic->meta_dev) queue_work(ic->writer_wq, &ic->writer_work); drain_workqueue(ic->writer_wq); - dm_integrity_flush_buffers(ic); + dm_integrity_flush_buffers(ic, true); } if (ic->mode == 'B') { - dm_integrity_flush_buffers(ic); + dm_integrity_flush_buffers(ic, true); #if 1 /* set to 0 to test bitmap replay code */ init_journal(ic, 0, ic->journal_sections, 0); @@ -3102,6 +3251,8 @@ static void dm_integrity_status(struct dm_target *ti, status_type_t type, arg_count += !!ic->journal_crypt_alg.alg_string; arg_count += !!ic->journal_mac_alg.alg_string; arg_count += (ic->sb->flags & cpu_to_le32(SB_FLAG_FIXED_PADDING)) != 0; + arg_count += (ic->sb->flags & cpu_to_le32(SB_FLAG_FIXED_HMAC)) != 0; + arg_count += ic->legacy_recalculate; DMEMIT("%s %llu %u %c %u", ic->dev->name, ic->start, ic->tag_size, ic->mode, arg_count); if (ic->meta_dev) @@ -3125,6 +3276,10 @@ static void dm_integrity_status(struct dm_target *ti, status_type_t type, } if ((ic->sb->flags & cpu_to_le32(SB_FLAG_FIXED_PADDING)) != 0) DMEMIT(" fix_padding"); + if ((ic->sb->flags & cpu_to_le32(SB_FLAG_FIXED_HMAC)) != 0) + DMEMIT(" fix_hmac"); + if (ic->legacy_recalculate) + DMEMIT(" legacy_recalculate"); #define EMIT_ALG(a, n) \ do { \ @@ -3260,6 +3415,11 @@ static int initialize_superblock(struct dm_integrity_c *ic, unsigned journal_sec if (!journal_sections) journal_sections = 1; + if (ic->fix_hmac && (ic->internal_hash_alg.alg_string || ic->journal_mac_alg.alg_string)) { + ic->sb->flags |= cpu_to_le32(SB_FLAG_FIXED_HMAC); + get_random_bytes(ic->sb->salt, SALT_SIZE); + } + if (!ic->meta_dev) { if (ic->fix_padding) ic->sb->flags |= cpu_to_le32(SB_FLAG_FIXED_PADDING); @@ -3754,7 +3914,7 @@ static int dm_integrity_ctr(struct dm_target *ti, unsigned argc, char **argv) unsigned extra_args; struct dm_arg_set as; static const struct dm_arg _args[] = { - {0, 9, "Invalid number of feature args"}, + {0, 17, "Invalid number of feature args"}, }; unsigned journal_sectors, interleave_sectors, buffer_sectors, journal_watermark, sync_msec; bool should_write_sb; @@ -3892,7 +4052,7 @@ static int dm_integrity_ctr(struct dm_target *ti, unsigned argc, char **argv) if (r) goto bad; } else if (!strncmp(opt_string, "journal_mac:", strlen("journal_mac:"))) { - r = get_alg_and_key(opt_string, &ic->journal_mac_alg, &ti->error, + r = get_alg_and_key(opt_string, &ic->journal_mac_alg, &ti->error, "Invalid journal_mac argument"); if (r) goto bad; @@ -3902,6 +4062,10 @@ static int dm_integrity_ctr(struct dm_target *ti, unsigned argc, char **argv) ic->discard = true; } else if (!strcmp(opt_string, "fix_padding")) { ic->fix_padding = true; + } else if (!strcmp(opt_string, "fix_hmac")) { + ic->fix_hmac = true; + } else if (!strcmp(opt_string, "legacy_recalculate")) { + ic->legacy_recalculate = true; } else { r = -EINVAL; ti->error = "Invalid argument"; @@ -4058,7 +4222,7 @@ static int dm_integrity_ctr(struct dm_target *ti, unsigned argc, char **argv) should_write_sb = true; } - if (!ic->sb->version || ic->sb->version > SB_VERSION_4) { + if (!ic->sb->version || ic->sb->version > SB_VERSION_5) { r = -EINVAL; ti->error = "Unknown version"; goto bad; @@ -4197,6 +4361,20 @@ try_smaller_buffer: r = -ENOMEM; goto bad; } + } else { + if (ic->sb->flags & cpu_to_le32(SB_FLAG_RECALCULATING)) { + ti->error = "Recalculate can only be specified with internal_hash"; + r = -EINVAL; + goto bad; + } + } + + if (ic->sb->flags & cpu_to_le32(SB_FLAG_RECALCULATING) && + le64_to_cpu(ic->sb->recalc_sector) < ic->provided_data_sectors && + dm_integrity_disable_recalculate(ic)) { + ti->error = "Recalculating with HMAC is disabled for security reasons - if you really need it, use the argument \"legacy_recalculate\""; + r = -EOPNOTSUPP; + goto bad; } ic->bufio = dm_bufio_client_create(ic->meta_dev ? ic->meta_dev->bdev : ic->dev->bdev, @@ -4376,7 +4554,7 @@ static void dm_integrity_dtr(struct dm_target *ti) static struct target_type integrity_target = { .name = "integrity", - .version = {1, 6, 0}, + .version = {1, 7, 0}, .module = THIS_MODULE, .features = DM_TARGET_SINGLETON | DM_TARGET_INTEGRITY, .ctr = dm_integrity_ctr, diff --git a/drivers/md/dm-linear.c b/drivers/md/dm-linear.c index 00774b5d7668..92db0f5e7f28 100644 --- a/drivers/md/dm-linear.c +++ b/drivers/md/dm-linear.c @@ -146,6 +146,8 @@ static int linear_report_zones(struct dm_target *ti, return blkdev_report_zones(lc->dev->bdev, sector, nr_zones, dm_report_zones_cb, args); } +#else +#define linear_report_zones NULL #endif static int linear_iterate_devices(struct dm_target *ti, @@ -227,13 +229,9 @@ static int linear_dax_zero_page_range(struct dm_target *ti, pgoff_t pgoff, static struct target_type linear_target = { .name = "linear", .version = {1, 4, 0}, -#ifdef CONFIG_BLK_DEV_ZONED .features = DM_TARGET_PASSES_INTEGRITY | DM_TARGET_NOWAIT | - DM_TARGET_ZONED_HM, + DM_TARGET_ZONED_HM | DM_TARGET_PASSES_CRYPTO, .report_zones = linear_report_zones, -#else - .features = DM_TARGET_PASSES_INTEGRITY | DM_TARGET_NOWAIT, -#endif .module = THIS_MODULE, .ctr = linear_ctr, .dtr = linear_dtr, diff --git a/drivers/md/dm-raid.c b/drivers/md/dm-raid.c index 23c38777e8f6..cab12b2251ba 100644 --- a/drivers/md/dm-raid.c +++ b/drivers/md/dm-raid.c @@ -3729,10 +3729,10 @@ static void raid_io_hints(struct dm_target *ti, struct queue_limits *limits) blk_limits_io_opt(limits, chunk_size_bytes * mddev_data_stripes(rs)); /* - * RAID1 and RAID10 personalities require bio splitting, - * RAID0/4/5/6 don't and process large discard bios properly. + * RAID0 and RAID10 personalities require bio splitting, + * RAID1/4/5/6 don't and process large discard bios properly. */ - if (rs_is_raid1(rs) || rs_is_raid10(rs)) { + if (rs_is_raid0(rs) || rs_is_raid10(rs)) { limits->discard_granularity = chunk_size_bytes; limits->max_discard_sectors = rs->md.chunk_sectors; } diff --git a/drivers/md/dm-raid1.c b/drivers/md/dm-raid1.c index fa09bc4e4c54..b0a82f29a2e4 100644 --- a/drivers/md/dm-raid1.c +++ b/drivers/md/dm-raid1.c @@ -145,7 +145,7 @@ static void dispatch_bios(void *context, struct bio_list *bio_list) struct dm_raid1_bio_record { struct mirror *m; - /* if details->bi_disk == NULL, details were not saved */ + /* if details->bi_bdev == NULL, details were not saved */ struct dm_bio_details details; region_t write_region; }; @@ -1190,7 +1190,7 @@ static int mirror_map(struct dm_target *ti, struct bio *bio) struct dm_raid1_bio_record *bio_record = dm_per_bio_data(bio, sizeof(struct dm_raid1_bio_record)); - bio_record->details.bi_disk = NULL; + bio_record->details.bi_bdev = NULL; if (rw == WRITE) { /* Save region for mirror_end_io() handler */ @@ -1257,7 +1257,7 @@ static int mirror_end_io(struct dm_target *ti, struct bio *bio, goto out; if (unlikely(*error)) { - if (!bio_record->details.bi_disk) { + if (!bio_record->details.bi_bdev) { /* * There wasn't enough memory to record necessary * information for a retry or there was no other @@ -1282,7 +1282,7 @@ static int mirror_end_io(struct dm_target *ti, struct bio *bio, bd = &bio_record->details; dm_bio_restore(bd, bio); - bio_record->details.bi_disk = NULL; + bio_record->details.bi_bdev = NULL; bio->bi_status = 0; queue_bio(ms, bio, rw); @@ -1292,7 +1292,7 @@ static int mirror_end_io(struct dm_target *ti, struct bio *bio, } out: - bio_record->details.bi_disk = NULL; + bio_record->details.bi_bdev = NULL; return DM_ENDIO_DONE; } diff --git a/drivers/md/dm-snap.c b/drivers/md/dm-snap.c index 4668b2cd98f4..11890db71f3f 100644 --- a/drivers/md/dm-snap.c +++ b/drivers/md/dm-snap.c @@ -141,6 +141,11 @@ struct dm_snapshot { * for them to be committed. */ struct bio_list bios_queued_during_merge; + + /* + * Flush data after merge. + */ + struct bio flush_bio; }; /* @@ -1121,6 +1126,17 @@ shut: static void error_bios(struct bio *bio); +static int flush_data(struct dm_snapshot *s) +{ + struct bio *flush_bio = &s->flush_bio; + + bio_reset(flush_bio); + bio_set_dev(flush_bio, s->origin->bdev); + flush_bio->bi_opf = REQ_OP_WRITE | REQ_PREFLUSH; + + return submit_bio_wait(flush_bio); +} + static void merge_callback(int read_err, unsigned long write_err, void *context) { struct dm_snapshot *s = context; @@ -1134,6 +1150,11 @@ static void merge_callback(int read_err, unsigned long write_err, void *context) goto shut; } + if (flush_data(s) < 0) { + DMERR("Flush after merge failed: shutting down merge"); + goto shut; + } + if (s->store->type->commit_merge(s->store, s->num_merging_chunks) < 0) { DMERR("Write error in exception store: shutting down merge"); @@ -1318,6 +1339,7 @@ static int snapshot_ctr(struct dm_target *ti, unsigned int argc, char **argv) s->first_merging_chunk = 0; s->num_merging_chunks = 0; bio_list_init(&s->bios_queued_during_merge); + bio_init(&s->flush_bio, NULL, 0); /* Allocate hash table for COW data */ if (init_hash_tables(s)) { @@ -1504,6 +1526,8 @@ static void snapshot_dtr(struct dm_target *ti) dm_exception_store_destroy(s->store); + bio_uninit(&s->flush_bio); + dm_put_device(ti, s->cow); dm_put_device(ti, s->origin); diff --git a/drivers/md/dm-table.c b/drivers/md/dm-table.c index 188f41287f18..95391f78b8d5 100644 --- a/drivers/md/dm-table.c +++ b/drivers/md/dm-table.c @@ -187,6 +187,8 @@ static void free_devices(struct list_head *devices, struct mapped_device *md) } } +static void dm_table_destroy_keyslot_manager(struct dm_table *t); + void dm_table_destroy(struct dm_table *t) { unsigned int i; @@ -215,6 +217,8 @@ void dm_table_destroy(struct dm_table *t) dm_free_md_mempools(t->mempools); + dm_table_destroy_keyslot_manager(t); + kfree(t); } @@ -363,14 +367,23 @@ int dm_get_device(struct dm_target *ti, const char *path, fmode_t mode, { int r; dev_t dev; + unsigned int major, minor; + char dummy; struct dm_dev_internal *dd; struct dm_table *t = ti->table; BUG_ON(!t); - dev = dm_get_dev_t(path); - if (!dev) - return -ENODEV; + if (sscanf(path, "%u:%u%c", &major, &minor, &dummy) == 2) { + /* Extract the major/minor numbers */ + dev = MKDEV(major, minor); + if (MAJOR(dev) != major || MINOR(dev) != minor) + return -EOVERFLOW; + } else { + dev = dm_get_dev_t(path); + if (!dev) + return -ENODEV; + } dd = find_device(&t->devices, dev); if (!dd) { @@ -811,24 +824,24 @@ void dm_table_set_type(struct dm_table *t, enum dm_queue_mode type) EXPORT_SYMBOL_GPL(dm_table_set_type); /* validate the dax capability of the target device span */ -int device_supports_dax(struct dm_target *ti, struct dm_dev *dev, +int device_not_dax_capable(struct dm_target *ti, struct dm_dev *dev, sector_t start, sector_t len, void *data) { int blocksize = *(int *) data, id; bool rc; id = dax_read_lock(); - rc = dax_supported(dev->dax_dev, dev->bdev, blocksize, start, len); + rc = !dax_supported(dev->dax_dev, dev->bdev, blocksize, start, len); dax_read_unlock(id); return rc; } /* Check devices support synchronous DAX */ -static int device_dax_synchronous(struct dm_target *ti, struct dm_dev *dev, - sector_t start, sector_t len, void *data) +static int device_not_dax_synchronous_capable(struct dm_target *ti, struct dm_dev *dev, + sector_t start, sector_t len, void *data) { - return dev->dax_dev && dax_synchronous(dev->dax_dev); + return !dev->dax_dev || !dax_synchronous(dev->dax_dev); } bool dm_table_supports_dax(struct dm_table *t, @@ -845,7 +858,7 @@ bool dm_table_supports_dax(struct dm_table *t, return false; if (!ti->type->iterate_devices || - !ti->type->iterate_devices(ti, iterate_fn, blocksize)) + ti->type->iterate_devices(ti, iterate_fn, blocksize)) return false; } @@ -916,7 +929,7 @@ static int dm_table_determine_type(struct dm_table *t) verify_bio_based: /* We must use this table as bio-based */ t->type = DM_TYPE_BIO_BASED; - if (dm_table_supports_dax(t, device_supports_dax, &page_size) || + if (dm_table_supports_dax(t, device_not_dax_capable, &page_size) || (list_empty(devices) && live_md_type == DM_TYPE_DAX_BIO_BASED)) { t->type = DM_TYPE_DAX_BIO_BASED; } @@ -1194,6 +1207,210 @@ static int dm_table_register_integrity(struct dm_table *t) return 0; } +#ifdef CONFIG_BLK_INLINE_ENCRYPTION + +struct dm_keyslot_manager { + struct blk_keyslot_manager ksm; + struct mapped_device *md; +}; + +struct dm_keyslot_evict_args { + const struct blk_crypto_key *key; + int err; +}; + +static int dm_keyslot_evict_callback(struct dm_target *ti, struct dm_dev *dev, + sector_t start, sector_t len, void *data) +{ + struct dm_keyslot_evict_args *args = data; + int err; + + err = blk_crypto_evict_key(bdev_get_queue(dev->bdev), args->key); + if (!args->err) + args->err = err; + /* Always try to evict the key from all devices. */ + return 0; +} + +/* + * When an inline encryption key is evicted from a device-mapper device, evict + * it from all the underlying devices. + */ +static int dm_keyslot_evict(struct blk_keyslot_manager *ksm, + const struct blk_crypto_key *key, unsigned int slot) +{ + struct dm_keyslot_manager *dksm = container_of(ksm, + struct dm_keyslot_manager, + ksm); + struct mapped_device *md = dksm->md; + struct dm_keyslot_evict_args args = { key }; + struct dm_table *t; + int srcu_idx; + int i; + struct dm_target *ti; + + t = dm_get_live_table(md, &srcu_idx); + if (!t) + return 0; + for (i = 0; i < dm_table_get_num_targets(t); i++) { + ti = dm_table_get_target(t, i); + if (!ti->type->iterate_devices) + continue; + ti->type->iterate_devices(ti, dm_keyslot_evict_callback, &args); + } + dm_put_live_table(md, srcu_idx); + return args.err; +} + +static struct blk_ksm_ll_ops dm_ksm_ll_ops = { + .keyslot_evict = dm_keyslot_evict, +}; + +static int device_intersect_crypto_modes(struct dm_target *ti, + struct dm_dev *dev, sector_t start, + sector_t len, void *data) +{ + struct blk_keyslot_manager *parent = data; + struct blk_keyslot_manager *child = bdev_get_queue(dev->bdev)->ksm; + + blk_ksm_intersect_modes(parent, child); + return 0; +} + +void dm_destroy_keyslot_manager(struct blk_keyslot_manager *ksm) +{ + struct dm_keyslot_manager *dksm = container_of(ksm, + struct dm_keyslot_manager, + ksm); + + if (!ksm) + return; + + blk_ksm_destroy(ksm); + kfree(dksm); +} + +static void dm_table_destroy_keyslot_manager(struct dm_table *t) +{ + dm_destroy_keyslot_manager(t->ksm); + t->ksm = NULL; +} + +/* + * Constructs and initializes t->ksm with a keyslot manager that + * represents the common set of crypto capabilities of the devices + * described by the dm_table. However, if the constructed keyslot + * manager does not support a superset of the crypto capabilities + * supported by the current keyslot manager of the mapped_device, + * it returns an error instead, since we don't support restricting + * crypto capabilities on table changes. Finally, if the constructed + * keyslot manager doesn't actually support any crypto modes at all, + * it just returns NULL. + */ +static int dm_table_construct_keyslot_manager(struct dm_table *t) +{ + struct dm_keyslot_manager *dksm; + struct blk_keyslot_manager *ksm; + struct dm_target *ti; + unsigned int i; + bool ksm_is_empty = true; + + dksm = kmalloc(sizeof(*dksm), GFP_KERNEL); + if (!dksm) + return -ENOMEM; + dksm->md = t->md; + + ksm = &dksm->ksm; + blk_ksm_init_passthrough(ksm); + ksm->ksm_ll_ops = dm_ksm_ll_ops; + ksm->max_dun_bytes_supported = UINT_MAX; + memset(ksm->crypto_modes_supported, 0xFF, + sizeof(ksm->crypto_modes_supported)); + + for (i = 0; i < dm_table_get_num_targets(t); i++) { + ti = dm_table_get_target(t, i); + + if (!dm_target_passes_crypto(ti->type)) { + blk_ksm_intersect_modes(ksm, NULL); + break; + } + if (!ti->type->iterate_devices) + continue; + ti->type->iterate_devices(ti, device_intersect_crypto_modes, + ksm); + } + + if (t->md->queue && !blk_ksm_is_superset(ksm, t->md->queue->ksm)) { + DMWARN("Inline encryption capabilities of new DM table were more restrictive than the old table's. This is not supported!"); + dm_destroy_keyslot_manager(ksm); + return -EINVAL; + } + + /* + * If the new KSM doesn't actually support any crypto modes, we may as + * well represent it with a NULL ksm. + */ + ksm_is_empty = true; + for (i = 0; i < ARRAY_SIZE(ksm->crypto_modes_supported); i++) { + if (ksm->crypto_modes_supported[i]) { + ksm_is_empty = false; + break; + } + } + + if (ksm_is_empty) { + dm_destroy_keyslot_manager(ksm); + ksm = NULL; + } + + /* + * t->ksm is only set temporarily while the table is being set + * up, and it gets set to NULL after the capabilities have + * been transferred to the request_queue. + */ + t->ksm = ksm; + + return 0; +} + +static void dm_update_keyslot_manager(struct request_queue *q, + struct dm_table *t) +{ + if (!t->ksm) + return; + + /* Make the ksm less restrictive */ + if (!q->ksm) { + blk_ksm_register(t->ksm, q); + } else { + blk_ksm_update_capabilities(q->ksm, t->ksm); + dm_destroy_keyslot_manager(t->ksm); + } + t->ksm = NULL; +} + +#else /* CONFIG_BLK_INLINE_ENCRYPTION */ + +static int dm_table_construct_keyslot_manager(struct dm_table *t) +{ + return 0; +} + +void dm_destroy_keyslot_manager(struct blk_keyslot_manager *ksm) +{ +} + +static void dm_table_destroy_keyslot_manager(struct dm_table *t) +{ +} + +static void dm_update_keyslot_manager(struct request_queue *q, + struct dm_table *t) +{ +} + +#endif /* !CONFIG_BLK_INLINE_ENCRYPTION */ + /* * Prepares the table for use by building the indices, * setting the type, and allocating mempools. @@ -1220,6 +1437,12 @@ int dm_table_complete(struct dm_table *t) return r; } + r = dm_table_construct_keyslot_manager(t); + if (r) { + DMERR("could not construct keyslot manager."); + return r; + } + r = dm_table_alloc_md_mempools(t, t->md); if (r) DMERR("unable to allocate mempools"); @@ -1286,6 +1509,46 @@ struct dm_target *dm_table_find_target(struct dm_table *t, sector_t sector) return &t->targets[(KEYS_PER_NODE * n) + k]; } +/* + * type->iterate_devices() should be called when the sanity check needs to + * iterate and check all underlying data devices. iterate_devices() will + * iterate all underlying data devices until it encounters a non-zero return + * code, returned by whether the input iterate_devices_callout_fn, or + * iterate_devices() itself internally. + * + * For some target type (e.g. dm-stripe), one call of iterate_devices() may + * iterate multiple underlying devices internally, in which case a non-zero + * return code returned by iterate_devices_callout_fn will stop the iteration + * in advance. + * + * Cases requiring _any_ underlying device supporting some kind of attribute, + * should use the iteration structure like dm_table_any_dev_attr(), or call + * it directly. @func should handle semantics of positive examples, e.g. + * capable of something. + * + * Cases requiring _all_ underlying devices supporting some kind of attribute, + * should use the iteration structure like dm_table_supports_nowait() or + * dm_table_supports_discards(). Or introduce dm_table_all_devs_attr() that + * uses an @anti_func that handle semantics of counter examples, e.g. not + * capable of something. So: return !dm_table_any_dev_attr(t, anti_func, data); + */ +static bool dm_table_any_dev_attr(struct dm_table *t, + iterate_devices_callout_fn func, void *data) +{ + struct dm_target *ti; + unsigned int i; + + for (i = 0; i < dm_table_get_num_targets(t); i++) { + ti = dm_table_get_target(t, i); + + if (ti->type->iterate_devices && + ti->type->iterate_devices(ti, func, data)) + return true; + } + + return false; +} + static int count_device(struct dm_target *ti, struct dm_dev *dev, sector_t start, sector_t len, void *data) { @@ -1322,13 +1585,13 @@ bool dm_table_has_no_data_devices(struct dm_table *table) return true; } -static int device_is_zoned_model(struct dm_target *ti, struct dm_dev *dev, - sector_t start, sector_t len, void *data) +static int device_not_zoned_model(struct dm_target *ti, struct dm_dev *dev, + sector_t start, sector_t len, void *data) { struct request_queue *q = bdev_get_queue(dev->bdev); enum blk_zoned_model *zoned_model = data; - return q && blk_queue_zoned_model(q) == *zoned_model; + return blk_queue_zoned_model(q) != *zoned_model; } static bool dm_table_supports_zoned_model(struct dm_table *t, @@ -1345,37 +1608,20 @@ static bool dm_table_supports_zoned_model(struct dm_table *t, return false; if (!ti->type->iterate_devices || - !ti->type->iterate_devices(ti, device_is_zoned_model, &zoned_model)) + ti->type->iterate_devices(ti, device_not_zoned_model, &zoned_model)) return false; } return true; } -static int device_matches_zone_sectors(struct dm_target *ti, struct dm_dev *dev, - sector_t start, sector_t len, void *data) +static int device_not_matches_zone_sectors(struct dm_target *ti, struct dm_dev *dev, + sector_t start, sector_t len, void *data) { struct request_queue *q = bdev_get_queue(dev->bdev); unsigned int *zone_sectors = data; - return q && blk_queue_zone_sectors(q) == *zone_sectors; -} - -static bool dm_table_matches_zone_sectors(struct dm_table *t, - unsigned int zone_sectors) -{ - struct dm_target *ti; - unsigned i; - - for (i = 0; i < dm_table_get_num_targets(t); i++) { - ti = dm_table_get_target(t, i); - - if (!ti->type->iterate_devices || - !ti->type->iterate_devices(ti, device_matches_zone_sectors, &zone_sectors)) - return false; - } - - return true; + return blk_queue_zone_sectors(q) != *zone_sectors; } static int validate_hardware_zoned_model(struct dm_table *table, @@ -1395,7 +1641,7 @@ static int validate_hardware_zoned_model(struct dm_table *table, if (!zone_sectors || !is_power_of_2(zone_sectors)) return -EINVAL; - if (!dm_table_matches_zone_sectors(table, zone_sectors)) { + if (dm_table_any_dev_attr(table, device_not_matches_zone_sectors, &zone_sectors)) { DMERR("%s: zone sectors is not consistent across all devices", dm_device_name(table->md)); return -EINVAL; @@ -1524,7 +1770,7 @@ static int device_flush_capable(struct dm_target *ti, struct dm_dev *dev, unsigned long flush = (unsigned long) data; struct request_queue *q = bdev_get_queue(dev->bdev); - return q && (q->queue_flags & flush); + return (q->queue_flags & flush); } static bool dm_table_supports_flush(struct dm_table *t, unsigned long flush) @@ -1569,29 +1815,12 @@ static int device_dax_write_cache_enabled(struct dm_target *ti, return false; } -static int dm_table_supports_dax_write_cache(struct dm_table *t) -{ - struct dm_target *ti; - unsigned i; - - for (i = 0; i < dm_table_get_num_targets(t); i++) { - ti = dm_table_get_target(t, i); - - if (ti->type->iterate_devices && - ti->type->iterate_devices(ti, - device_dax_write_cache_enabled, NULL)) - return true; - } - - return false; -} - -static int device_is_nonrot(struct dm_target *ti, struct dm_dev *dev, - sector_t start, sector_t len, void *data) +static int device_is_rotational(struct dm_target *ti, struct dm_dev *dev, + sector_t start, sector_t len, void *data) { struct request_queue *q = bdev_get_queue(dev->bdev); - return q && blk_queue_nonrot(q); + return !blk_queue_nonrot(q); } static int device_is_not_random(struct dm_target *ti, struct dm_dev *dev, @@ -1599,24 +1828,7 @@ static int device_is_not_random(struct dm_target *ti, struct dm_dev *dev, { struct request_queue *q = bdev_get_queue(dev->bdev); - return q && !blk_queue_add_random(q); -} - -static bool dm_table_all_devices_attribute(struct dm_table *t, - iterate_devices_callout_fn func) -{ - struct dm_target *ti; - unsigned i; - - for (i = 0; i < dm_table_get_num_targets(t); i++) { - ti = dm_table_get_target(t, i); - - if (!ti->type->iterate_devices || - !ti->type->iterate_devices(ti, func, NULL)) - return false; - } - - return true; + return !blk_queue_add_random(q); } static int device_not_write_same_capable(struct dm_target *ti, struct dm_dev *dev, @@ -1624,7 +1836,7 @@ static int device_not_write_same_capable(struct dm_target *ti, struct dm_dev *de { struct request_queue *q = bdev_get_queue(dev->bdev); - return q && !q->limits.max_write_same_sectors; + return !q->limits.max_write_same_sectors; } static bool dm_table_supports_write_same(struct dm_table *t) @@ -1651,7 +1863,7 @@ static int device_not_write_zeroes_capable(struct dm_target *ti, struct dm_dev * { struct request_queue *q = bdev_get_queue(dev->bdev); - return q && !q->limits.max_write_zeroes_sectors; + return !q->limits.max_write_zeroes_sectors; } static bool dm_table_supports_write_zeroes(struct dm_table *t) @@ -1678,7 +1890,7 @@ static int device_not_nowait_capable(struct dm_target *ti, struct dm_dev *dev, { struct request_queue *q = bdev_get_queue(dev->bdev); - return q && !blk_queue_nowait(q); + return !blk_queue_nowait(q); } static bool dm_table_supports_nowait(struct dm_table *t) @@ -1705,7 +1917,7 @@ static int device_not_discard_capable(struct dm_target *ti, struct dm_dev *dev, { struct request_queue *q = bdev_get_queue(dev->bdev); - return q && !blk_queue_discard(q); + return !blk_queue_discard(q); } static bool dm_table_supports_discards(struct dm_table *t) @@ -1739,7 +1951,7 @@ static int device_not_secure_erase_capable(struct dm_target *ti, { struct request_queue *q = bdev_get_queue(dev->bdev); - return q && !blk_queue_secure_erase(q); + return !blk_queue_secure_erase(q); } static bool dm_table_supports_secure_erase(struct dm_table *t) @@ -1767,28 +1979,7 @@ static int device_requires_stable_pages(struct dm_target *ti, { struct request_queue *q = bdev_get_queue(dev->bdev); - return q && blk_queue_stable_writes(q); -} - -/* - * If any underlying device requires stable pages, a table must require - * them as well. Only targets that support iterate_devices are considered: - * don't want error, zero, etc to require stable pages. - */ -static bool dm_table_requires_stable_pages(struct dm_table *t) -{ - struct dm_target *ti; - unsigned i; - - for (i = 0; i < dm_table_get_num_targets(t); i++) { - ti = dm_table_get_target(t, i); - - if (ti->type->iterate_devices && - ti->type->iterate_devices(ti, device_requires_stable_pages, NULL)) - return true; - } - - return false; + return blk_queue_stable_writes(q); } void dm_table_set_restrictions(struct dm_table *t, struct request_queue *q, @@ -1828,22 +2019,22 @@ void dm_table_set_restrictions(struct dm_table *t, struct request_queue *q, } blk_queue_write_cache(q, wc, fua); - if (dm_table_supports_dax(t, device_supports_dax, &page_size)) { + if (dm_table_supports_dax(t, device_not_dax_capable, &page_size)) { blk_queue_flag_set(QUEUE_FLAG_DAX, q); - if (dm_table_supports_dax(t, device_dax_synchronous, NULL)) + if (dm_table_supports_dax(t, device_not_dax_synchronous_capable, NULL)) set_dax_synchronous(t->md->dax_dev); } else blk_queue_flag_clear(QUEUE_FLAG_DAX, q); - if (dm_table_supports_dax_write_cache(t)) + if (dm_table_any_dev_attr(t, device_dax_write_cache_enabled, NULL)) dax_write_cache(t->md->dax_dev, true); /* Ensure that all underlying devices are non-rotational. */ - if (dm_table_all_devices_attribute(t, device_is_nonrot)) - blk_queue_flag_set(QUEUE_FLAG_NONROT, q); - else + if (dm_table_any_dev_attr(t, device_is_rotational, NULL)) blk_queue_flag_clear(QUEUE_FLAG_NONROT, q); + else + blk_queue_flag_set(QUEUE_FLAG_NONROT, q); if (!dm_table_supports_write_same(t)) q->limits.max_write_same_sectors = 0; @@ -1855,8 +2046,11 @@ void dm_table_set_restrictions(struct dm_table *t, struct request_queue *q, /* * Some devices don't use blk_integrity but still want stable pages * because they do their own checksumming. + * If any underlying device requires stable pages, a table must require + * them as well. Only targets that support iterate_devices are considered: + * don't want error, zero, etc to require stable pages. */ - if (dm_table_requires_stable_pages(t)) + if (dm_table_any_dev_attr(t, device_requires_stable_pages, NULL)) blk_queue_flag_set(QUEUE_FLAG_STABLE_WRITES, q); else blk_queue_flag_clear(QUEUE_FLAG_STABLE_WRITES, q); @@ -1867,7 +2061,8 @@ void dm_table_set_restrictions(struct dm_table *t, struct request_queue *q, * Clear QUEUE_FLAG_ADD_RANDOM if any underlying device does not * have it set. */ - if (blk_queue_add_random(q) && dm_table_all_devices_attribute(t, device_is_not_random)) + if (blk_queue_add_random(q) && + dm_table_any_dev_attr(t, device_is_not_random, NULL)) blk_queue_flag_clear(QUEUE_FLAG_ADD_RANDOM, q); /* @@ -1882,6 +2077,7 @@ void dm_table_set_restrictions(struct dm_table *t, struct request_queue *q, } #endif + dm_update_keyslot_manager(q, t); blk_queue_update_readahead(q); } diff --git a/drivers/md/dm-thin-metadata.c b/drivers/md/dm-thin-metadata.c index 6ebb2127f3e2..e75b20480e46 100644 --- a/drivers/md/dm-thin-metadata.c +++ b/drivers/md/dm-thin-metadata.c @@ -636,7 +636,7 @@ static int __check_incompat_features(struct thin_disk_superblock *disk_super, /* * Check for read-only metadata to skip the following RDWR checks. */ - if (get_disk_ro(pmd->bdev->bd_disk)) + if (bdev_read_only(pmd->bdev)) return 0; features = le32_to_cpu(disk_super->compat_ro_flags) & ~THIN_FEATURE_COMPAT_RO_SUPP; diff --git a/drivers/md/dm-writecache.c b/drivers/md/dm-writecache.c index d5223a0e5cc5..844c4be11768 100644 --- a/drivers/md/dm-writecache.c +++ b/drivers/md/dm-writecache.c @@ -148,6 +148,7 @@ struct dm_writecache { size_t metadata_sectors; size_t n_blocks; uint64_t seq_count; + sector_t data_device_sectors; void *block_start; struct wc_entry *entries; unsigned block_size; @@ -159,14 +160,22 @@ struct dm_writecache { bool overwrote_committed:1; bool memory_vmapped:1; + bool start_sector_set:1; bool high_wm_percent_set:1; bool low_wm_percent_set:1; bool max_writeback_jobs_set:1; bool autocommit_blocks_set:1; bool autocommit_time_set:1; + bool max_age_set:1; bool writeback_fua_set:1; bool flush_on_suspend:1; bool cleaner:1; + bool cleaner_set:1; + + unsigned high_wm_percent_value; + unsigned low_wm_percent_value; + unsigned autocommit_time_value; + unsigned max_age_value; unsigned writeback_all; struct workqueue_struct *writeback_wq; @@ -523,7 +532,7 @@ static void ssd_commit_superblock(struct dm_writecache *wc) region.bdev = wc->ssd_dev->bdev; region.sector = 0; - region.count = PAGE_SIZE; + region.count = PAGE_SIZE >> SECTOR_SHIFT; if (unlikely(region.sector + region.count > wc->metadata_sectors)) region.count = wc->metadata_sectors - region.sector; @@ -969,6 +978,8 @@ static void writecache_resume(struct dm_target *ti) wc_lock(wc); + wc->data_device_sectors = bdev_nr_sectors(wc->dev->bdev); + if (WC_MODE_PMEM(wc)) { persistent_memory_invalidate_cache(wc->memory_map, wc->memory_map_size); } else { @@ -1638,6 +1649,10 @@ static bool wc_add_block(struct writeback_struct *wb, struct wc_entry *e, gfp_t void *address = memory_data(wc, e); persistent_memory_flush_cache(address, block_size); + + if (unlikely(bio_end_sector(&wb->bio) >= wc->data_device_sectors)) + return true; + return bio_add_page(&wb->bio, persistent_memory_page(address), block_size, persistent_memory_page_offset(address)) != 0; } @@ -1709,6 +1724,9 @@ static void __writecache_writeback_pmem(struct dm_writecache *wc, struct writeba if (writecache_has_error(wc)) { bio->bi_status = BLK_STS_IOERR; bio_endio(bio); + } else if (unlikely(!bio_sectors(bio))) { + bio->bi_status = BLK_STS_OK; + bio_endio(bio); } else { submit_bio(bio); } @@ -1752,6 +1770,14 @@ static void __writecache_writeback_ssd(struct dm_writecache *wc, struct writebac e = f; } + if (unlikely(to.sector + to.count > wc->data_device_sectors)) { + if (to.sector >= wc->data_device_sectors) { + writecache_copy_endio(0, 0, c); + continue; + } + from.count = to.count = wc->data_device_sectors - to.sector; + } + dm_kcopyd_copy(wc->dm_kcopyd, &from, 1, &to, 0, writecache_copy_endio, c); __writeback_throttle(wc, wbl); @@ -2004,8 +2030,7 @@ static void writecache_dtr(struct dm_target *ti) if (wc->ssd_dev) dm_put_device(ti, wc->ssd_dev); - if (wc->entries) - vfree(wc->entries); + vfree(wc->entries); if (wc->memory_map) { if (WC_MODE_PMEM(wc)) @@ -2020,8 +2045,7 @@ static void writecache_dtr(struct dm_target *ti) if (wc->dm_io) dm_io_client_destroy(wc->dm_io); - if (wc->dirty_bitmap) - vfree(wc->dirty_bitmap); + vfree(wc->dirty_bitmap); kfree(wc); } @@ -2205,6 +2229,7 @@ static int writecache_ctr(struct dm_target *ti, unsigned argc, char **argv) if (sscanf(string, "%llu%c", &start_sector, &dummy) != 1) goto invalid_optional; wc->start_sector = start_sector; + wc->start_sector_set = true; if (wc->start_sector != start_sector || wc->start_sector >= wc->memory_map_size >> SECTOR_SHIFT) goto invalid_optional; @@ -2214,6 +2239,7 @@ static int writecache_ctr(struct dm_target *ti, unsigned argc, char **argv) goto invalid_optional; if (high_wm_percent < 0 || high_wm_percent > 100) goto invalid_optional; + wc->high_wm_percent_value = high_wm_percent; wc->high_wm_percent_set = true; } else if (!strcasecmp(string, "low_watermark") && opt_params >= 1) { string = dm_shift_arg(&as), opt_params--; @@ -2221,6 +2247,7 @@ static int writecache_ctr(struct dm_target *ti, unsigned argc, char **argv) goto invalid_optional; if (low_wm_percent < 0 || low_wm_percent > 100) goto invalid_optional; + wc->low_wm_percent_value = low_wm_percent; wc->low_wm_percent_set = true; } else if (!strcasecmp(string, "writeback_jobs") && opt_params >= 1) { string = dm_shift_arg(&as), opt_params--; @@ -2240,6 +2267,7 @@ static int writecache_ctr(struct dm_target *ti, unsigned argc, char **argv) if (autocommit_msecs > 3600000) goto invalid_optional; wc->autocommit_jiffies = msecs_to_jiffies(autocommit_msecs); + wc->autocommit_time_value = autocommit_msecs; wc->autocommit_time_set = true; } else if (!strcasecmp(string, "max_age") && opt_params >= 1) { unsigned max_age_msecs; @@ -2249,7 +2277,10 @@ static int writecache_ctr(struct dm_target *ti, unsigned argc, char **argv) if (max_age_msecs > 86400000) goto invalid_optional; wc->max_age = msecs_to_jiffies(max_age_msecs); + wc->max_age_set = true; + wc->max_age_value = max_age_msecs; } else if (!strcasecmp(string, "cleaner")) { + wc->cleaner_set = true; wc->cleaner = true; } else if (!strcasecmp(string, "fua")) { if (WC_MODE_PMEM(wc)) { @@ -2455,7 +2486,6 @@ static void writecache_status(struct dm_target *ti, status_type_t type, struct dm_writecache *wc = ti->private; unsigned extra_args; unsigned sz = 0; - uint64_t x; switch (type) { case STATUSTYPE_INFO: @@ -2467,11 +2497,11 @@ static void writecache_status(struct dm_target *ti, status_type_t type, DMEMIT("%c %s %s %u ", WC_MODE_PMEM(wc) ? 'p' : 's', wc->dev->name, wc->ssd_dev->name, wc->block_size); extra_args = 0; - if (wc->start_sector) + if (wc->start_sector_set) extra_args += 2; - if (wc->high_wm_percent_set && !wc->cleaner) + if (wc->high_wm_percent_set) extra_args += 2; - if (wc->low_wm_percent_set && !wc->cleaner) + if (wc->low_wm_percent_set) extra_args += 2; if (wc->max_writeback_jobs_set) extra_args += 2; @@ -2479,37 +2509,29 @@ static void writecache_status(struct dm_target *ti, status_type_t type, extra_args += 2; if (wc->autocommit_time_set) extra_args += 2; - if (wc->max_age != MAX_AGE_UNSPECIFIED) + if (wc->max_age_set) extra_args += 2; - if (wc->cleaner) + if (wc->cleaner_set) extra_args++; if (wc->writeback_fua_set) extra_args++; DMEMIT("%u", extra_args); - if (wc->start_sector) + if (wc->start_sector_set) DMEMIT(" start_sector %llu", (unsigned long long)wc->start_sector); - if (wc->high_wm_percent_set && !wc->cleaner) { - x = (uint64_t)wc->freelist_high_watermark * 100; - x += wc->n_blocks / 2; - do_div(x, (size_t)wc->n_blocks); - DMEMIT(" high_watermark %u", 100 - (unsigned)x); - } - if (wc->low_wm_percent_set && !wc->cleaner) { - x = (uint64_t)wc->freelist_low_watermark * 100; - x += wc->n_blocks / 2; - do_div(x, (size_t)wc->n_blocks); - DMEMIT(" low_watermark %u", 100 - (unsigned)x); - } + if (wc->high_wm_percent_set) + DMEMIT(" high_watermark %u", wc->high_wm_percent_value); + if (wc->low_wm_percent_set) + DMEMIT(" low_watermark %u", wc->low_wm_percent_value); if (wc->max_writeback_jobs_set) DMEMIT(" writeback_jobs %u", wc->max_writeback_jobs); if (wc->autocommit_blocks_set) DMEMIT(" autocommit_blocks %u", wc->autocommit_blocks); if (wc->autocommit_time_set) - DMEMIT(" autocommit_time %u", jiffies_to_msecs(wc->autocommit_jiffies)); - if (wc->max_age != MAX_AGE_UNSPECIFIED) - DMEMIT(" max_age %u", jiffies_to_msecs(wc->max_age)); - if (wc->cleaner) + DMEMIT(" autocommit_time %u", wc->autocommit_time_value); + if (wc->max_age_set) + DMEMIT(" max_age %u", wc->max_age_value); + if (wc->cleaner_set) DMEMIT(" cleaner"); if (wc->writeback_fua_set) DMEMIT(" %sfua", wc->writeback_fua ? "" : "no"); @@ -2519,7 +2541,7 @@ static void writecache_status(struct dm_target *ti, status_type_t type, static struct target_type writecache_target = { .name = "writecache", - .version = {1, 3, 0}, + .version = {1, 4, 0}, .module = THIS_MODULE, .ctr = writecache_ctr, .dtr = writecache_dtr, diff --git a/drivers/md/dm-zoned-metadata.c b/drivers/md/dm-zoned-metadata.c index b298fefb022e..039d17b28938 100644 --- a/drivers/md/dm-zoned-metadata.c +++ b/drivers/md/dm-zoned-metadata.c @@ -819,7 +819,7 @@ static int dmz_write_sb(struct dmz_metadata *zmd, unsigned int set) ret = dmz_rdwr_block(dev, REQ_OP_WRITE, zmd->sb[set].block, mblk->page); if (ret == 0) - ret = blkdev_issue_flush(dev->bdev, GFP_NOIO); + ret = blkdev_issue_flush(dev->bdev); return ret; } @@ -862,7 +862,7 @@ static int dmz_write_dirty_mblocks(struct dmz_metadata *zmd, /* Flush drive cache (this will also sync data) */ if (ret == 0) - ret = blkdev_issue_flush(dev->bdev, GFP_NOIO); + ret = blkdev_issue_flush(dev->bdev); return ret; } @@ -933,7 +933,7 @@ int dmz_flush_metadata(struct dmz_metadata *zmd) /* If there are no dirty metadata blocks, just flush the device cache */ if (list_empty(&write_list)) { - ret = blkdev_issue_flush(dev->bdev, GFP_NOIO); + ret = blkdev_issue_flush(dev->bdev); goto err; } diff --git a/drivers/md/dm.c b/drivers/md/dm.c index b3c3c8b4cb42..50b693d776d6 100644 --- a/drivers/md/dm.c +++ b/drivers/md/dm.c @@ -28,6 +28,7 @@ #include <linux/refcount.h> #include <linux/part_stat.h> #include <linux/blk-crypto.h> +#include <linux/keyslot-manager.h> #define DM_MSG_PREFIX "core" @@ -105,12 +106,16 @@ struct dm_io { struct dm_target_io tio; }; +#define DM_TARGET_IO_BIO_OFFSET (offsetof(struct dm_target_io, clone)) +#define DM_IO_BIO_OFFSET \ + (offsetof(struct dm_target_io, clone) + offsetof(struct dm_io, tio)) + void *dm_per_bio_data(struct bio *bio, size_t data_size) { struct dm_target_io *tio = container_of(bio, struct dm_target_io, clone); if (!tio->inside_dm_io) - return (char *)bio - offsetof(struct dm_target_io, clone) - data_size; - return (char *)bio - offsetof(struct dm_target_io, clone) - offsetof(struct dm_io, tio) - data_size; + return (char *)bio - DM_TARGET_IO_BIO_OFFSET - data_size; + return (char *)bio - DM_IO_BIO_OFFSET - data_size; } EXPORT_SYMBOL_GPL(dm_per_bio_data); @@ -118,9 +123,9 @@ struct bio *dm_bio_from_per_bio_data(void *data, size_t data_size) { struct dm_io *io = (struct dm_io *)((char *)data + data_size); if (io->magic == DM_IO_MAGIC) - return (struct bio *)((char *)io + offsetof(struct dm_io, tio) + offsetof(struct dm_target_io, clone)); + return (struct bio *)((char *)io + DM_IO_BIO_OFFSET); BUG_ON(io->magic != DM_TIO_MAGIC); - return (struct bio *)((char *)io + offsetof(struct dm_target_io, clone)); + return (struct bio *)((char *)io + DM_TARGET_IO_BIO_OFFSET); } EXPORT_SYMBOL_GPL(dm_bio_from_per_bio_data); @@ -148,6 +153,16 @@ EXPORT_SYMBOL_GPL(dm_bio_get_target_bio_nr); #define DM_NUMA_NODE NUMA_NO_NODE static int dm_numa_node = DM_NUMA_NODE; +#define DEFAULT_SWAP_BIOS (8 * 1048576 / PAGE_SIZE) +static int swap_bios = DEFAULT_SWAP_BIOS; +static int get_swap_bios(void) +{ + int latch = READ_ONCE(swap_bios); + if (unlikely(latch <= 0)) + latch = DEFAULT_SWAP_BIOS; + return latch; +} + /* * For mempools pre-allocation at the table loading time. */ @@ -562,7 +577,7 @@ static int dm_blk_ioctl(struct block_device *bdev, fmode_t mode, * subset of the parent bdev; require extra privileges. */ if (!capable(CAP_SYS_RAWIO)) { - DMWARN_LIMIT( + DMDEBUG_LIMIT( "%s: sending ioctl %x to DM device without required privilege.", current->comm, cmd); r = -ENOIOCTLCMD; @@ -969,6 +984,11 @@ void disable_write_zeroes(struct mapped_device *md) limits->max_write_zeroes_sectors = 0; } +static bool swap_bios_limit(struct dm_target *ti, struct bio *bio) +{ + return unlikely((bio->bi_opf & REQ_SWAP) != 0) && unlikely(ti->limit_swap_bios); +} + static void clone_endio(struct bio *bio) { blk_status_t error = bio->bi_status; @@ -977,16 +997,17 @@ static void clone_endio(struct bio *bio) struct mapped_device *md = tio->io->md; dm_endio_fn endio = tio->ti->type->end_io; struct bio *orig_bio = io->orig_bio; + struct request_queue *q = bio->bi_bdev->bd_disk->queue; if (unlikely(error == BLK_STS_TARGET)) { if (bio_op(bio) == REQ_OP_DISCARD && - !bio->bi_disk->queue->limits.max_discard_sectors) + !q->limits.max_discard_sectors) disable_discard(md); else if (bio_op(bio) == REQ_OP_WRITE_SAME && - !bio->bi_disk->queue->limits.max_write_same_sectors) + !q->limits.max_write_same_sectors) disable_write_same(md); else if (bio_op(bio) == REQ_OP_WRITE_ZEROES && - !bio->bi_disk->queue->limits.max_write_zeroes_sectors) + !q->limits.max_write_zeroes_sectors) disable_write_zeroes(md); } @@ -996,7 +1017,7 @@ static void clone_endio(struct bio *bio) */ if (bio_op(orig_bio) == REQ_OP_ZONE_APPEND) { sector_t written_sector = bio->bi_iter.bi_sector; - struct request_queue *q = orig_bio->bi_disk->queue; + struct request_queue *q = orig_bio->bi_bdev->bd_disk->queue; u64 mask = (u64)blk_queue_zone_sectors(q) - 1; orig_bio->bi_iter.bi_sector += written_sector & mask; @@ -1019,6 +1040,11 @@ static void clone_endio(struct bio *bio) } } + if (unlikely(swap_bios_limit(tio->ti, bio))) { + struct mapped_device *md = io->md; + up(&md->swap_bios_semaphore); + } + free_tio(tio); dec_pending(io, error); } @@ -1128,7 +1154,7 @@ static bool dm_dax_supported(struct dax_device *dax_dev, struct block_device *bd if (!map) goto out; - ret = dm_table_supports_dax(map, device_supports_dax, &blocksize); + ret = dm_table_supports_dax(map, device_not_dax_capable, &blocksize); out: dm_put_live_table(md, srcu_idx); @@ -1252,6 +1278,22 @@ void dm_accept_partial_bio(struct bio *bio, unsigned n_sectors) } EXPORT_SYMBOL_GPL(dm_accept_partial_bio); +static noinline void __set_swap_bios_limit(struct mapped_device *md, int latch) +{ + mutex_lock(&md->swap_bios_lock); + while (latch < md->swap_bios) { + cond_resched(); + down(&md->swap_bios_semaphore); + md->swap_bios--; + } + while (latch > md->swap_bios) { + cond_resched(); + up(&md->swap_bios_semaphore); + md->swap_bios++; + } + mutex_unlock(&md->swap_bios_lock); +} + static blk_qc_t __map_bio(struct dm_target_io *tio) { int r; @@ -1271,6 +1313,14 @@ static blk_qc_t __map_bio(struct dm_target_io *tio) atomic_inc(&io->io_count); sector = clone->bi_iter.bi_sector; + if (unlikely(swap_bios_limit(ti, clone))) { + struct mapped_device *md = io->md; + int latch = get_swap_bios(); + if (unlikely(latch != md->swap_bios)) + __set_swap_bios_limit(md, latch); + down(&md->swap_bios_semaphore); + } + r = ti->type->map(ti, clone); switch (r) { case DM_MAPIO_SUBMITTED: @@ -1281,10 +1331,18 @@ static blk_qc_t __map_bio(struct dm_target_io *tio) ret = submit_bio_noacct(clone); break; case DM_MAPIO_KILL: + if (unlikely(swap_bios_limit(ti, clone))) { + struct mapped_device *md = io->md; + up(&md->swap_bios_semaphore); + } free_tio(tio); dec_pending(io, BLK_STS_IOERR); break; case DM_MAPIO_REQUEUE: + if (unlikely(swap_bios_limit(ti, clone))) { + struct mapped_device *md = io->md; + up(&md->swap_bios_semaphore); + } free_tio(tio); dec_pending(io, BLK_STS_DM_REQUEUE); break; @@ -1422,8 +1480,7 @@ static int __send_empty_flush(struct clone_info *ci) */ bio_init(&flush_bio, NULL, 0); flush_bio.bi_opf = REQ_OP_WRITE | REQ_PREFLUSH | REQ_SYNC; - flush_bio.bi_disk = ci->io->md->disk; - bio_associate_blkg(&flush_bio); + bio_set_dev(&flush_bio, ci->io->md->disk->part0); ci->bio = &flush_bio; ci->sector_count = 0; @@ -1626,7 +1683,7 @@ static blk_qc_t __split_and_process_bio(struct mapped_device *md, static blk_qc_t dm_submit_bio(struct bio *bio) { - struct mapped_device *md = bio->bi_disk->private_data; + struct mapped_device *md = bio->bi_bdev->bd_disk->private_data; blk_qc_t ret = BLK_QC_T_NONE; int srcu_idx; struct dm_table *map; @@ -1718,6 +1775,19 @@ static const struct dax_operations dm_dax_ops; static void dm_wq_work(struct work_struct *work); +#ifdef CONFIG_BLK_INLINE_ENCRYPTION +static void dm_queue_destroy_keyslot_manager(struct request_queue *q) +{ + dm_destroy_keyslot_manager(q->ksm); +} + +#else /* CONFIG_BLK_INLINE_ENCRYPTION */ + +static inline void dm_queue_destroy_keyslot_manager(struct request_queue *q) +{ +} +#endif /* !CONFIG_BLK_INLINE_ENCRYPTION */ + static void cleanup_mapped_device(struct mapped_device *md) { if (md->wq) @@ -1739,14 +1809,17 @@ static void cleanup_mapped_device(struct mapped_device *md) put_disk(md->disk); } - if (md->queue) + if (md->queue) { + dm_queue_destroy_keyslot_manager(md->queue); blk_cleanup_queue(md->queue); + } cleanup_srcu_struct(&md->io_barrier); mutex_destroy(&md->suspend_lock); mutex_destroy(&md->type_lock); mutex_destroy(&md->table_devices_lock); + mutex_destroy(&md->swap_bios_lock); dm_mq_cleanup_mapped_device(md); } @@ -1814,6 +1887,10 @@ static struct mapped_device *alloc_dev(int minor) init_waitqueue_head(&md->eventq); init_completion(&md->kobj_holder.completion); + md->swap_bios = get_swap_bios(); + sema_init(&md->swap_bios_semaphore, md->swap_bios); + mutex_init(&md->swap_bios_lock); + md->disk->major = _major; md->disk->first_minor = minor; md->disk->fops = &dm_blk_dops; @@ -2849,8 +2926,8 @@ struct dm_md_mempools *dm_alloc_md_mempools(struct mapped_device *md, enum dm_qu case DM_TYPE_BIO_BASED: case DM_TYPE_DAX_BIO_BASED: pool_size = max(dm_get_reserved_bio_based_ios(), min_pool_size); - front_pad = roundup(per_io_data_size, __alignof__(struct dm_target_io)) + offsetof(struct dm_target_io, clone); - io_front_pad = roundup(front_pad, __alignof__(struct dm_io)) + offsetof(struct dm_io, tio); + front_pad = roundup(per_io_data_size, __alignof__(struct dm_target_io)) + DM_TARGET_IO_BIO_OFFSET; + io_front_pad = roundup(per_io_data_size, __alignof__(struct dm_io)) + DM_IO_BIO_OFFSET; ret = bioset_init(&pools->io_bs, pool_size, io_front_pad, 0); if (ret) goto out; @@ -3097,6 +3174,9 @@ MODULE_PARM_DESC(reserved_bio_based_ios, "Reserved IOs in bio-based mempools"); module_param(dm_numa_node, int, S_IRUGO | S_IWUSR); MODULE_PARM_DESC(dm_numa_node, "NUMA node for DM device memory allocations"); +module_param(swap_bios, int, S_IRUGO | S_IWUSR); +MODULE_PARM_DESC(swap_bios, "Maximum allowed inflight swap IOs"); + MODULE_DESCRIPTION(DM_NAME " driver"); MODULE_AUTHOR("Joe Thornber <dm-devel@redhat.com>"); MODULE_LICENSE("GPL"); diff --git a/drivers/md/dm.h b/drivers/md/dm.h index fffe1e289c53..b441ad772c18 100644 --- a/drivers/md/dm.h +++ b/drivers/md/dm.h @@ -73,7 +73,7 @@ void dm_table_free_md_mempools(struct dm_table *t); struct dm_md_mempools *dm_table_get_md_mempools(struct dm_table *t); bool dm_table_supports_dax(struct dm_table *t, iterate_devices_callout_fn fn, int *blocksize); -int device_supports_dax(struct dm_target *ti, struct dm_dev *dev, +int device_not_dax_capable(struct dm_target *ti, struct dm_dev *dev, sector_t start, sector_t len, void *data); void dm_lock_md_type(struct mapped_device *md); diff --git a/drivers/md/md-linear.c b/drivers/md/md-linear.c index 68cac7d19278..63ed8329a98d 100644 --- a/drivers/md/md-linear.c +++ b/drivers/md/md-linear.c @@ -252,7 +252,7 @@ static bool linear_make_request(struct mddev *mddev, struct bio *bio) start_sector + data_offset; if (unlikely((bio_op(bio) == REQ_OP_DISCARD) && - !blk_queue_discard(bio->bi_disk->queue))) { + !blk_queue_discard(bio->bi_bdev->bd_disk->queue))) { /* Just ignore it */ bio_endio(bio); } else { diff --git a/drivers/md/md.c b/drivers/md/md.c index ca409428b4fc..21da0c48f6c2 100644 --- a/drivers/md/md.c +++ b/drivers/md/md.c @@ -340,24 +340,6 @@ static int start_readonly; */ static bool create_on_open = true; -struct bio *bio_alloc_mddev(gfp_t gfp_mask, int nr_iovecs, - struct mddev *mddev) -{ - if (!mddev || !bioset_initialized(&mddev->bio_set)) - return bio_alloc(gfp_mask, nr_iovecs); - - return bio_alloc_bioset(gfp_mask, nr_iovecs, &mddev->bio_set); -} -EXPORT_SYMBOL_GPL(bio_alloc_mddev); - -static struct bio *md_bio_alloc_sync(struct mddev *mddev) -{ - if (!mddev || !bioset_initialized(&mddev->sync_set)) - return bio_alloc(GFP_NOIO, 1); - - return bio_alloc_bioset(GFP_NOIO, 1, &mddev->sync_set); -} - /* * We have a system wide 'event count' that is incremented * on any 'interesting' event, and readers of /proc/mdstat @@ -463,8 +445,8 @@ struct md_io { struct mddev *mddev; bio_end_io_t *orig_bi_end_io; void *orig_bi_private; + struct block_device *orig_bi_bdev; unsigned long start_time; - struct block_device *part; }; static void md_end_io(struct bio *bio) @@ -472,7 +454,7 @@ static void md_end_io(struct bio *bio) struct md_io *md_io = bio->bi_private; struct mddev *mddev = md_io->mddev; - part_end_io_acct(md_io->part, bio, md_io->start_time); + bio_end_io_acct_remapped(bio, md_io->start_time, md_io->orig_bi_bdev); bio->bi_end_io = md_io->orig_bi_end_io; bio->bi_private = md_io->orig_bi_private; @@ -486,7 +468,7 @@ static void md_end_io(struct bio *bio) static blk_qc_t md_submit_bio(struct bio *bio) { const int rw = bio_data_dir(bio); - struct mddev *mddev = bio->bi_disk->private_data; + struct mddev *mddev = bio->bi_bdev->bd_disk->private_data; if (mddev == NULL || mddev->pers == NULL) { bio_io_error(bio); @@ -514,12 +496,12 @@ static blk_qc_t md_submit_bio(struct bio *bio) md_io->mddev = mddev; md_io->orig_bi_end_io = bio->bi_end_io; md_io->orig_bi_private = bio->bi_private; + md_io->orig_bi_bdev = bio->bi_bdev; bio->bi_end_io = md_end_io; bio->bi_private = md_io; - md_io->start_time = part_start_io_acct(mddev->gendisk, - &md_io->part, bio); + md_io->start_time = bio_start_io_acct(bio); } /* bio could be mergeable after passing to underlayer */ @@ -613,7 +595,7 @@ static void submit_flushes(struct work_struct *ws) atomic_inc(&rdev->nr_pending); atomic_inc(&rdev->nr_pending); rcu_read_unlock(); - bi = bio_alloc_mddev(GFP_NOIO, 0, mddev); + bi = bio_alloc_bioset(GFP_NOIO, 0, &mddev->bio_set); bi->bi_end_io = md_end_flush; bi->bi_private = rdev; bio_set_dev(bi, rdev->bdev); @@ -639,8 +621,10 @@ static void md_submit_flush_data(struct work_struct *ws) * could wait for this and below md_handle_request could wait for those * bios because of suspend check */ + spin_lock_irq(&mddev->lock); mddev->prev_flush_start = mddev->start_flush; mddev->flush_bio = NULL; + spin_unlock_irq(&mddev->lock); wake_up(&mddev->sb_wait); if (bio->bi_iter.bi_size == 0) { @@ -997,7 +981,7 @@ void md_super_write(struct mddev *mddev, struct md_rdev *rdev, if (test_bit(Faulty, &rdev->flags)) return; - bio = md_bio_alloc_sync(mddev); + bio = bio_alloc_bioset(GFP_NOIO, 1, &mddev->sync_set); atomic_inc(&rdev->nr_pending); @@ -1029,29 +1013,29 @@ int md_super_wait(struct mddev *mddev) int sync_page_io(struct md_rdev *rdev, sector_t sector, int size, struct page *page, int op, int op_flags, bool metadata_op) { - struct bio *bio = md_bio_alloc_sync(rdev->mddev); - int ret; + struct bio bio; + struct bio_vec bvec; + + bio_init(&bio, &bvec, 1); if (metadata_op && rdev->meta_bdev) - bio_set_dev(bio, rdev->meta_bdev); + bio_set_dev(&bio, rdev->meta_bdev); else - bio_set_dev(bio, rdev->bdev); - bio_set_op_attrs(bio, op, op_flags); + bio_set_dev(&bio, rdev->bdev); + bio.bi_opf = op | op_flags; if (metadata_op) - bio->bi_iter.bi_sector = sector + rdev->sb_start; + bio.bi_iter.bi_sector = sector + rdev->sb_start; else if (rdev->mddev->reshape_position != MaxSector && (rdev->mddev->reshape_backwards == (sector >= rdev->mddev->reshape_position))) - bio->bi_iter.bi_sector = sector + rdev->new_data_offset; + bio.bi_iter.bi_sector = sector + rdev->new_data_offset; else - bio->bi_iter.bi_sector = sector + rdev->data_offset; - bio_add_page(bio, page, size, 0); + bio.bi_iter.bi_sector = sector + rdev->data_offset; + bio_add_page(&bio, page, size, 0); - submit_bio_wait(bio); + submit_bio_wait(&bio); - ret = !bio->bi_status; - bio_put(bio); - return ret; + return !bio.bi_status; } EXPORT_SYMBOL_GPL(sync_page_io); @@ -2415,6 +2399,12 @@ int md_integrity_add_rdev(struct md_rdev *rdev, struct mddev *mddev) } EXPORT_SYMBOL(md_integrity_add_rdev); +static bool rdev_read_only(struct md_rdev *rdev) +{ + return bdev_read_only(rdev->bdev) || + (rdev->meta_bdev && bdev_read_only(rdev->meta_bdev)); +} + static int bind_rdev_to_array(struct md_rdev *rdev, struct mddev *mddev) { char b[BDEVNAME_SIZE]; @@ -2424,8 +2414,7 @@ static int bind_rdev_to_array(struct md_rdev *rdev, struct mddev *mddev) if (find_rdev(mddev, rdev->bdev->bd_dev)) return -EEXIST; - if ((bdev_read_only(rdev->bdev) || bdev_read_only(rdev->meta_bdev)) && - mddev->pers) + if (rdev_read_only(rdev) && mddev->pers) return -EROFS; /* make sure rdev->sectors exceeds mddev->dev_sectors */ @@ -5859,9 +5848,7 @@ int md_run(struct mddev *mddev) continue; sync_blockdev(rdev->bdev); invalidate_bdev(rdev->bdev); - if (mddev->ro != 1 && - (bdev_read_only(rdev->bdev) || - bdev_read_only(rdev->meta_bdev))) { + if (mddev->ro != 1 && rdev_read_only(rdev)) { mddev->ro = 1; if (mddev->gendisk) set_disk_ro(mddev->gendisk, 1); @@ -6156,7 +6143,7 @@ static int restart_array(struct mddev *mddev) if (test_bit(Journal, &rdev->flags) && !test_bit(Faulty, &rdev->flags)) has_journal = true; - if (bdev_read_only(rdev->bdev)) + if (rdev_read_only(rdev)) has_readonly = true; } rcu_read_unlock(); diff --git a/drivers/md/md.h b/drivers/md/md.h index 34070ab30a8a..bcbba1b5ec4a 100644 --- a/drivers/md/md.h +++ b/drivers/md/md.h @@ -556,7 +556,7 @@ static inline void md_sync_acct(struct block_device *bdev, unsigned long nr_sect static inline void md_sync_acct_bio(struct bio *bio, unsigned long nr_sectors) { - atomic_add(nr_sectors, &bio->bi_disk->sync_io); + md_sync_acct(bio->bi_bdev, nr_sectors); } struct md_personality @@ -742,8 +742,6 @@ extern void md_rdev_clear(struct md_rdev *rdev); extern void md_handle_request(struct mddev *mddev, struct bio *bio); extern void mddev_suspend(struct mddev *mddev); extern void mddev_resume(struct mddev *mddev); -extern struct bio *bio_alloc_mddev(gfp_t gfp_mask, int nr_iovecs, - struct mddev *mddev); extern void md_reload_sb(struct mddev *mddev, int raid_disk); extern void md_update_sb(struct mddev *mddev, int force); @@ -793,14 +791,14 @@ static inline void mddev_clear_unsupported_flags(struct mddev *mddev, static inline void mddev_check_writesame(struct mddev *mddev, struct bio *bio) { if (bio_op(bio) == REQ_OP_WRITE_SAME && - !bio->bi_disk->queue->limits.max_write_same_sectors) + !bio->bi_bdev->bd_disk->queue->limits.max_write_same_sectors) mddev->queue->limits.max_write_same_sectors = 0; } static inline void mddev_check_write_zeroes(struct mddev *mddev, struct bio *bio) { if (bio_op(bio) == REQ_OP_WRITE_ZEROES && - !bio->bi_disk->queue->limits.max_write_zeroes_sectors) + !bio->bi_bdev->bd_disk->queue->limits.max_write_zeroes_sectors) mddev->queue->limits.max_write_zeroes_sectors = 0; } diff --git a/drivers/md/persistent-data/dm-btree-internal.h b/drivers/md/persistent-data/dm-btree-internal.h index 564896659dd4..fe073d92f01e 100644 --- a/drivers/md/persistent-data/dm-btree-internal.h +++ b/drivers/md/persistent-data/dm-btree-internal.h @@ -100,7 +100,7 @@ struct dm_block *shadow_parent(struct shadow_spine *s); int shadow_has_parent(struct shadow_spine *s); -int shadow_root(struct shadow_spine *s); +dm_block_t shadow_root(struct shadow_spine *s); /* * Some inlines. diff --git a/drivers/md/persistent-data/dm-btree-spine.c b/drivers/md/persistent-data/dm-btree-spine.c index e03cb9e48773..8a2bfbfb218b 100644 --- a/drivers/md/persistent-data/dm-btree-spine.c +++ b/drivers/md/persistent-data/dm-btree-spine.c @@ -235,7 +235,7 @@ int shadow_has_parent(struct shadow_spine *s) return s->count >= 2; } -int shadow_root(struct shadow_spine *s) +dm_block_t shadow_root(struct shadow_spine *s) { return s->root; } diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c index c0347997f6ff..d2378765dc15 100644 --- a/drivers/md/raid1.c +++ b/drivers/md/raid1.c @@ -794,13 +794,13 @@ static void flush_bio_list(struct r1conf *conf, struct bio *bio) while (bio) { /* submit pending writes */ struct bio *next = bio->bi_next; - struct md_rdev *rdev = (void *)bio->bi_disk; + struct md_rdev *rdev = (void *)bio->bi_bdev; bio->bi_next = NULL; bio_set_dev(bio, rdev->bdev); if (test_bit(Faulty, &rdev->flags)) { bio_io_error(bio); } else if (unlikely((bio_op(bio) == REQ_OP_DISCARD) && - !blk_queue_discard(bio->bi_disk->queue))) + !blk_queue_discard(bio->bi_bdev->bd_disk->queue))) /* Just ignore it */ bio_endio(bio); else @@ -1104,7 +1104,7 @@ static void alloc_behind_master_bio(struct r1bio *r1_bio, int i = 0; struct bio *behind_bio = NULL; - behind_bio = bio_alloc_mddev(GFP_NOIO, vcnt, r1_bio->mddev); + behind_bio = bio_alloc_bioset(GFP_NOIO, vcnt, &r1_bio->mddev->bio_set); if (!behind_bio) return; @@ -1520,7 +1520,7 @@ static void raid1_write_request(struct mddev *mddev, struct bio *bio, trace_block_bio_remap(mbio, disk_devt(mddev->gendisk), r1_bio->sector); /* flush_pending_writes() needs access to the rdev so...*/ - mbio->bi_disk = (void *)conf->mirrors[i].rdev; + mbio->bi_bdev = (void *)conf->mirrors[i].rdev; cb = blk_check_plugged(raid1_unplug, mddev, sizeof(*plug)); if (cb) diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c index c5d88ef6a45c..a9ae7d113492 100644 --- a/drivers/md/raid10.c +++ b/drivers/md/raid10.c @@ -882,13 +882,13 @@ static void flush_pending_writes(struct r10conf *conf) while (bio) { /* submit pending writes */ struct bio *next = bio->bi_next; - struct md_rdev *rdev = (void*)bio->bi_disk; + struct md_rdev *rdev = (void*)bio->bi_bdev; bio->bi_next = NULL; bio_set_dev(bio, rdev->bdev); if (test_bit(Faulty, &rdev->flags)) { bio_io_error(bio); } else if (unlikely((bio_op(bio) == REQ_OP_DISCARD) && - !blk_queue_discard(bio->bi_disk->queue))) + !blk_queue_discard(bio->bi_bdev->bd_disk->queue))) /* Just ignore it */ bio_endio(bio); else @@ -1075,13 +1075,13 @@ static void raid10_unplug(struct blk_plug_cb *cb, bool from_schedule) while (bio) { /* submit pending writes */ struct bio *next = bio->bi_next; - struct md_rdev *rdev = (void*)bio->bi_disk; + struct md_rdev *rdev = (void*)bio->bi_bdev; bio->bi_next = NULL; bio_set_dev(bio, rdev->bdev); if (test_bit(Faulty, &rdev->flags)) { bio_io_error(bio); } else if (unlikely((bio_op(bio) == REQ_OP_DISCARD) && - !blk_queue_discard(bio->bi_disk->queue))) + !blk_queue_discard(bio->bi_bdev->bd_disk->queue))) /* Just ignore it */ bio_endio(bio); else @@ -1253,7 +1253,7 @@ static void raid10_write_one_disk(struct mddev *mddev, struct r10bio *r10_bio, trace_block_bio_remap(mbio, disk_devt(conf->mddev->gendisk), r10_bio->sector); /* flush_pending_writes() needs access to the rdev so...*/ - mbio->bi_disk = (void *)rdev; + mbio->bi_bdev = (void *)rdev; atomic_inc(&r10_bio->remaining); @@ -3003,7 +3003,7 @@ static sector_t raid10_sync_request(struct mddev *mddev, sector_t sector_nr, /* Again, very different code for resync and recovery. * Both must result in an r10bio with a list of bios that - * have bi_end_io, bi_sector, bi_disk set, + * have bi_end_io, bi_sector, bi_bdev set, * and bi_private set to the r10bio. * For recovery, we may actually create several r10bios * with 2 bios in each, that correspond to the bios in the main one. @@ -4531,7 +4531,7 @@ read_more: return sectors_done; } - read_bio = bio_alloc_mddev(GFP_KERNEL, RESYNC_PAGES, mddev); + read_bio = bio_alloc_bioset(GFP_KERNEL, RESYNC_PAGES, &mddev->bio_set); bio_set_dev(read_bio, rdev->bdev); read_bio->bi_iter.bi_sector = (r10_bio->devs[r10_bio->read_slot].addr @@ -4539,10 +4539,6 @@ read_more: read_bio->bi_private = r10_bio; read_bio->bi_end_io = end_reshape_read; bio_set_op_attrs(read_bio, REQ_OP_READ, 0); - read_bio->bi_flags &= (~0UL << BIO_RESET_BITS); - read_bio->bi_status = 0; - read_bio->bi_vcnt = 0; - read_bio->bi_iter.bi_size = 0; r10_bio->master_bio = read_bio; r10_bio->read_slot = r10_bio->devs[r10_bio->read_slot].devnum; diff --git a/drivers/md/raid5-ppl.c b/drivers/md/raid5-ppl.c index d0f540296fe9..e8c118e05dfd 100644 --- a/drivers/md/raid5-ppl.c +++ b/drivers/md/raid5-ppl.c @@ -1037,7 +1037,7 @@ static int ppl_recover(struct ppl_log *log, struct ppl_header *pplhdr, } /* flush the disk cache after recovery if necessary */ - ret = blkdev_issue_flush(rdev->bdev, GFP_KERNEL); + ret = blkdev_issue_flush(rdev->bdev); out: __free_page(page); return ret; diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c index 3a90cc0e43ca..5d57a5bd171f 100644 --- a/drivers/md/raid5.c +++ b/drivers/md/raid5.c @@ -5310,7 +5310,7 @@ static int in_chunk_boundary(struct mddev *mddev, struct bio *bio) unsigned int chunk_sectors; unsigned int bio_sectors = bio_sectors(bio); - WARN_ON_ONCE(bio->bi_partno); + WARN_ON_ONCE(bio->bi_bdev->bd_partno); chunk_sectors = min(conf->chunk_sectors, conf->prev_chunk_sectors); return chunk_sectors >= @@ -5393,90 +5393,72 @@ static void raid5_align_endio(struct bio *bi) static int raid5_read_one_chunk(struct mddev *mddev, struct bio *raid_bio) { struct r5conf *conf = mddev->private; - int dd_idx; - struct bio* align_bi; + struct bio *align_bio; struct md_rdev *rdev; - sector_t end_sector; + sector_t sector, end_sector, first_bad; + int bad_sectors, dd_idx; if (!in_chunk_boundary(mddev, raid_bio)) { pr_debug("%s: non aligned\n", __func__); return 0; } - /* - * use bio_clone_fast to make a copy of the bio - */ - align_bi = bio_clone_fast(raid_bio, GFP_NOIO, &mddev->bio_set); - if (!align_bi) - return 0; - /* - * set bi_end_io to a new function, and set bi_private to the - * original bio. - */ - align_bi->bi_end_io = raid5_align_endio; - align_bi->bi_private = raid_bio; - /* - * compute position - */ - align_bi->bi_iter.bi_sector = - raid5_compute_sector(conf, raid_bio->bi_iter.bi_sector, - 0, &dd_idx, NULL); - end_sector = bio_end_sector(align_bi); + sector = raid5_compute_sector(conf, raid_bio->bi_iter.bi_sector, 0, + &dd_idx, NULL); + end_sector = bio_end_sector(raid_bio); + rcu_read_lock(); + if (r5c_big_stripe_cached(conf, sector)) + goto out_rcu_unlock; + rdev = rcu_dereference(conf->disks[dd_idx].replacement); if (!rdev || test_bit(Faulty, &rdev->flags) || rdev->recovery_offset < end_sector) { rdev = rcu_dereference(conf->disks[dd_idx].rdev); - if (rdev && - (test_bit(Faulty, &rdev->flags) || + if (!rdev) + goto out_rcu_unlock; + if (test_bit(Faulty, &rdev->flags) || !(test_bit(In_sync, &rdev->flags) || - rdev->recovery_offset >= end_sector))) - rdev = NULL; + rdev->recovery_offset >= end_sector)) + goto out_rcu_unlock; } - if (r5c_big_stripe_cached(conf, align_bi->bi_iter.bi_sector)) { - rcu_read_unlock(); - bio_put(align_bi); + atomic_inc(&rdev->nr_pending); + rcu_read_unlock(); + + align_bio = bio_clone_fast(raid_bio, GFP_NOIO, &mddev->bio_set); + bio_set_dev(align_bio, rdev->bdev); + align_bio->bi_end_io = raid5_align_endio; + align_bio->bi_private = raid_bio; + align_bio->bi_iter.bi_sector = sector; + + raid_bio->bi_next = (void *)rdev; + + if (is_badblock(rdev, sector, bio_sectors(align_bio), &first_bad, + &bad_sectors)) { + bio_put(align_bio); + rdev_dec_pending(rdev, mddev); return 0; } - if (rdev) { - sector_t first_bad; - int bad_sectors; - - atomic_inc(&rdev->nr_pending); - rcu_read_unlock(); - raid_bio->bi_next = (void*)rdev; - bio_set_dev(align_bi, rdev->bdev); - - if (is_badblock(rdev, align_bi->bi_iter.bi_sector, - bio_sectors(align_bi), - &first_bad, &bad_sectors)) { - bio_put(align_bi); - rdev_dec_pending(rdev, mddev); - return 0; - } + /* No reshape active, so we can trust rdev->data_offset */ + align_bio->bi_iter.bi_sector += rdev->data_offset; - /* No reshape active, so we can trust rdev->data_offset */ - align_bi->bi_iter.bi_sector += rdev->data_offset; + spin_lock_irq(&conf->device_lock); + wait_event_lock_irq(conf->wait_for_quiescent, conf->quiesce == 0, + conf->device_lock); + atomic_inc(&conf->active_aligned_reads); + spin_unlock_irq(&conf->device_lock); - spin_lock_irq(&conf->device_lock); - wait_event_lock_irq(conf->wait_for_quiescent, - conf->quiesce == 0, - conf->device_lock); - atomic_inc(&conf->active_aligned_reads); - spin_unlock_irq(&conf->device_lock); + if (mddev->gendisk) + trace_block_bio_remap(align_bio, disk_devt(mddev->gendisk), + raid_bio->bi_iter.bi_sector); + submit_bio_noacct(align_bio); + return 1; - if (mddev->gendisk) - trace_block_bio_remap(align_bi, disk_devt(mddev->gendisk), - raid_bio->bi_iter.bi_sector); - submit_bio_noacct(align_bi); - return 1; - } else { - rcu_read_unlock(); - bio_put(align_bi); - return 0; - } +out_rcu_unlock: + rcu_read_unlock(); + return 0; } static struct bio *chunk_aligned_read(struct mddev *mddev, struct bio *raid_bio) @@ -7661,7 +7643,7 @@ static int raid5_run(struct mddev *mddev) } /* device size must be a multiple of chunk size */ - mddev->dev_sectors &= ~(mddev->chunk_sectors - 1); + mddev->dev_sectors &= ~((sector_t)mddev->chunk_sectors - 1); mddev->resync_max_sectors = mddev->dev_sectors; if (mddev->degraded > dirty_parity_disks && |