diff options
author | Dave Airlie <airlied@redhat.com> | 2020-09-08 07:41:40 +0300 |
---|---|---|
committer | Dave Airlie <airlied@redhat.com> | 2020-09-08 07:41:40 +0300 |
commit | ce5c207c6b8dd9cdeaeeb2345b8a69335c0d98bf (patch) | |
tree | 3a54614d597b7b92ada36c5ea26dc994216b628b /drivers/md | |
parent | 3393649977f9a8847c659e282ea290d4b703295c (diff) | |
parent | f4d51dffc6c01a9e94650d95ce0104964f8ae822 (diff) | |
download | linux-ce5c207c6b8dd9cdeaeeb2345b8a69335c0d98bf.tar.xz |
Merge tag 'v5.9-rc4' into drm-next
Backmerge 5.9-rc4 as there is a nasty qxl conflict
that needs to be resolved.
Signed-off-by: Dave Airlie <airlied@redhat.com>
Diffstat (limited to 'drivers/md')
-rw-r--r-- | drivers/md/bcache/journal.c | 2 | ||||
-rw-r--r-- | drivers/md/bcache/util.c | 14 | ||||
-rw-r--r-- | drivers/md/dm-cache-metadata.c | 8 | ||||
-rw-r--r-- | drivers/md/dm-crypt.c | 6 | ||||
-rw-r--r-- | drivers/md/dm-integrity.c | 12 | ||||
-rw-r--r-- | drivers/md/dm-mpath.c | 24 | ||||
-rw-r--r-- | drivers/md/dm-thin-metadata.c | 10 | ||||
-rw-r--r-- | drivers/md/dm-writecache.c | 12 | ||||
-rw-r--r-- | drivers/md/dm.c | 2 | ||||
-rw-r--r-- | drivers/md/md-autodetect.c | 4 | ||||
-rw-r--r-- | drivers/md/md-bitmap.c | 2 | ||||
-rw-r--r-- | drivers/md/persistent-data/dm-block-manager.c | 14 | ||||
-rw-r--r-- | drivers/md/raid5.c | 11 |
13 files changed, 81 insertions, 40 deletions
diff --git a/drivers/md/bcache/journal.c b/drivers/md/bcache/journal.c index 77fbfd52edcf..c1227bdb57e7 100644 --- a/drivers/md/bcache/journal.c +++ b/drivers/md/bcache/journal.c @@ -608,7 +608,7 @@ static void do_journal_discard(struct cache *ca) ca->sb.njournal_buckets; atomic_set(&ja->discard_in_flight, DISCARD_READY); - /* fallthrough */ + fallthrough; case DISCARD_READY: if (ja->discard_idx == ja->last_idx) diff --git a/drivers/md/bcache/util.c b/drivers/md/bcache/util.c index 62fb917f7a4f..ae380bc3992e 100644 --- a/drivers/md/bcache/util.c +++ b/drivers/md/bcache/util.c @@ -33,27 +33,27 @@ int bch_ ## name ## _h(const char *cp, type *res) \ case 'y': \ case 'z': \ u++; \ - /* fall through */ \ + fallthrough; \ case 'e': \ u++; \ - /* fall through */ \ + fallthrough; \ case 'p': \ u++; \ - /* fall through */ \ + fallthrough; \ case 't': \ u++; \ - /* fall through */ \ + fallthrough; \ case 'g': \ u++; \ - /* fall through */ \ + fallthrough; \ case 'm': \ u++; \ - /* fall through */ \ + fallthrough; \ case 'k': \ u++; \ if (e++ == cp) \ return -EINVAL; \ - /* fall through */ \ + fallthrough; \ case '\n': \ case '\0': \ if (*e == '\n') \ diff --git a/drivers/md/dm-cache-metadata.c b/drivers/md/dm-cache-metadata.c index 151aa95775be..af6d4f898e4c 100644 --- a/drivers/md/dm-cache-metadata.c +++ b/drivers/md/dm-cache-metadata.c @@ -537,12 +537,16 @@ static int __create_persistent_data_objects(struct dm_cache_metadata *cmd, CACHE_MAX_CONCURRENT_LOCKS); if (IS_ERR(cmd->bm)) { DMERR("could not create block manager"); - return PTR_ERR(cmd->bm); + r = PTR_ERR(cmd->bm); + cmd->bm = NULL; + return r; } r = __open_or_format_metadata(cmd, may_format_device); - if (r) + if (r) { dm_block_manager_destroy(cmd->bm); + cmd->bm = NULL; + } return r; } diff --git a/drivers/md/dm-crypt.c b/drivers/md/dm-crypt.c index 148960721254..380386c36921 100644 --- a/drivers/md/dm-crypt.c +++ b/drivers/md/dm-crypt.c @@ -739,7 +739,7 @@ static int crypt_iv_eboiv_gen(struct crypt_config *cc, u8 *iv, u8 buf[MAX_CIPHER_BLOCKSIZE] __aligned(__alignof__(__le64)); struct skcipher_request *req; struct scatterlist src, dst; - struct crypto_wait wait; + DECLARE_CRYPTO_WAIT(wait); int err; req = skcipher_request_alloc(any_tfm(cc), GFP_NOIO); @@ -936,7 +936,7 @@ static int crypt_iv_elephant(struct crypt_config *cc, struct dm_crypt_request *d u8 *es, *ks, *data, *data2, *data_offset; struct skcipher_request *req; struct scatterlist *sg, *sg2, src, dst; - struct crypto_wait wait; + DECLARE_CRYPTO_WAIT(wait); int i, r; req = skcipher_request_alloc(elephant->tfm, GFP_NOIO); @@ -1552,7 +1552,7 @@ static blk_status_t crypt_convert(struct crypt_config *cc, case -EBUSY: wait_for_completion(&ctx->restart); reinit_completion(&ctx->restart); - /* fall through */ + fallthrough; /* * The request is queued and processed asynchronously, * completion function kcryptd_async_done() will be called. diff --git a/drivers/md/dm-integrity.c b/drivers/md/dm-integrity.c index 8c8d940e532e..3fc3757def55 100644 --- a/drivers/md/dm-integrity.c +++ b/drivers/md/dm-integrity.c @@ -2487,6 +2487,7 @@ next_chunk: range.logical_sector = le64_to_cpu(ic->sb->recalc_sector); if (unlikely(range.logical_sector >= ic->provided_data_sectors)) { if (ic->mode == 'B') { + block_bitmap_op(ic, ic->recalc_bitmap, 0, ic->provided_data_sectors, BITMAP_OP_CLEAR); DEBUG_print("queue_delayed_work: bitmap_flush_work\n"); queue_delayed_work(ic->commit_wq, &ic->bitmap_flush_work, 0); } @@ -2564,6 +2565,17 @@ next_chunk: goto err; } + if (ic->mode == 'B') { + sector_t start, end; + start = (range.logical_sector >> + (ic->sb->log2_sectors_per_block + ic->log2_blocks_per_bitmap_bit)) << + (ic->sb->log2_sectors_per_block + ic->log2_blocks_per_bitmap_bit); + end = ((range.logical_sector + range.n_sectors) >> + (ic->sb->log2_sectors_per_block + ic->log2_blocks_per_bitmap_bit)) << + (ic->sb->log2_sectors_per_block + ic->log2_blocks_per_bitmap_bit); + block_bitmap_op(ic, ic->recalc_bitmap, start, end - start, BITMAP_OP_CLEAR); + } + advance_and_next: cond_resched(); diff --git a/drivers/md/dm-mpath.c b/drivers/md/dm-mpath.c index 53645a6f474c..de4da825ade6 100644 --- a/drivers/md/dm-mpath.c +++ b/drivers/md/dm-mpath.c @@ -1287,17 +1287,25 @@ static void multipath_wait_for_pg_init_completion(struct multipath *m) static void flush_multipath_work(struct multipath *m) { if (m->hw_handler_name) { - set_bit(MPATHF_PG_INIT_DISABLED, &m->flags); - smp_mb__after_atomic(); + unsigned long flags; + + if (!atomic_read(&m->pg_init_in_progress)) + goto skip; + + spin_lock_irqsave(&m->lock, flags); + if (atomic_read(&m->pg_init_in_progress) && + !test_and_set_bit(MPATHF_PG_INIT_DISABLED, &m->flags)) { + spin_unlock_irqrestore(&m->lock, flags); - if (atomic_read(&m->pg_init_in_progress)) flush_workqueue(kmpath_handlerd); - multipath_wait_for_pg_init_completion(m); + multipath_wait_for_pg_init_completion(m); - clear_bit(MPATHF_PG_INIT_DISABLED, &m->flags); - smp_mb__after_atomic(); + spin_lock_irqsave(&m->lock, flags); + clear_bit(MPATHF_PG_INIT_DISABLED, &m->flags); + } + spin_unlock_irqrestore(&m->lock, flags); } - +skip: if (m->queue_mode == DM_TYPE_BIO_BASED) flush_work(&m->process_queued_bios); flush_work(&m->trigger_event); @@ -1554,7 +1562,7 @@ static void pg_init_done(void *data, int errors) case SCSI_DH_RETRY: /* Wait before retrying. */ delay_retry = true; - /* fall through */ + fallthrough; case SCSI_DH_IMM_RETRY: case SCSI_DH_RES_TEMP_UNAVAIL: if (pg_init_limit_reached(m, pgpath)) diff --git a/drivers/md/dm-thin-metadata.c b/drivers/md/dm-thin-metadata.c index 76b6b323bf4b..b461836b6d26 100644 --- a/drivers/md/dm-thin-metadata.c +++ b/drivers/md/dm-thin-metadata.c @@ -739,12 +739,16 @@ static int __create_persistent_data_objects(struct dm_pool_metadata *pmd, bool f THIN_MAX_CONCURRENT_LOCKS); if (IS_ERR(pmd->bm)) { DMERR("could not create block manager"); - return PTR_ERR(pmd->bm); + r = PTR_ERR(pmd->bm); + pmd->bm = NULL; + return r; } r = __open_or_format_metadata(pmd, format_device); - if (r) + if (r) { dm_block_manager_destroy(pmd->bm); + pmd->bm = NULL; + } return r; } @@ -954,7 +958,7 @@ int dm_pool_metadata_close(struct dm_pool_metadata *pmd) } pmd_write_lock_in_core(pmd); - if (!dm_bm_is_read_only(pmd->bm) && !pmd->fail_io) { + if (!pmd->fail_io && !dm_bm_is_read_only(pmd->bm)) { r = __commit_transaction(pmd); if (r < 0) DMWARN("%s: __commit_transaction() failed, error = %d", diff --git a/drivers/md/dm-writecache.c b/drivers/md/dm-writecache.c index 86dbe0c8b45c..6271d1e741cf 100644 --- a/drivers/md/dm-writecache.c +++ b/drivers/md/dm-writecache.c @@ -231,6 +231,7 @@ static int persistent_memory_claim(struct dm_writecache *wc) pfn_t pfn; int id; struct page **pages; + sector_t offset; wc->memory_vmapped = false; @@ -245,9 +246,16 @@ static int persistent_memory_claim(struct dm_writecache *wc) goto err1; } + offset = get_start_sect(wc->ssd_dev->bdev); + if (offset & (PAGE_SIZE / 512 - 1)) { + r = -EINVAL; + goto err1; + } + offset >>= PAGE_SHIFT - 9; + id = dax_read_lock(); - da = dax_direct_access(wc->ssd_dev->dax_dev, 0, p, &wc->memory_map, &pfn); + da = dax_direct_access(wc->ssd_dev->dax_dev, offset, p, &wc->memory_map, &pfn); if (da < 0) { wc->memory_map = NULL; r = da; @@ -269,7 +277,7 @@ static int persistent_memory_claim(struct dm_writecache *wc) i = 0; do { long daa; - daa = dax_direct_access(wc->ssd_dev->dax_dev, i, p - i, + daa = dax_direct_access(wc->ssd_dev->dax_dev, offset + i, p - i, NULL, &pfn); if (daa <= 0) { r = daa ? daa : -EINVAL; diff --git a/drivers/md/dm.c b/drivers/md/dm.c index 32fa6499739f..fb0255d25e4b 100644 --- a/drivers/md/dm.c +++ b/drivers/md/dm.c @@ -1021,7 +1021,7 @@ static void clone_endio(struct bio *bio) switch (r) { case DM_ENDIO_REQUEUE: error = BLK_STS_DM_REQUEUE; - /*FALLTHRU*/ + fallthrough; case DM_ENDIO_DONE: break; case DM_ENDIO_INCOMPLETE: diff --git a/drivers/md/md-autodetect.c b/drivers/md/md-autodetect.c index 6bbec89976a7..2cf973722f59 100644 --- a/drivers/md/md-autodetect.c +++ b/drivers/md/md-autodetect.c @@ -102,10 +102,10 @@ static int __init md_setup(char *str) pername = "raid0"; break; } - /* FALL THROUGH */ + fallthrough; case 1: /* the first device is numeric */ str = str1; - /* FALL THROUGH */ + fallthrough; case 0: md_setup_args[ent].level = LEVEL_NONE; pername="super-block"; diff --git a/drivers/md/md-bitmap.c b/drivers/md/md-bitmap.c index d61b524ae440..b10c51988c8e 100644 --- a/drivers/md/md-bitmap.c +++ b/drivers/md/md-bitmap.c @@ -1433,7 +1433,7 @@ int md_bitmap_startwrite(struct bitmap *bitmap, sector_t offset, unsigned long s case 0: md_bitmap_file_set_bit(bitmap, offset); md_bitmap_count_page(&bitmap->counts, offset, 1); - /* fall through */ + fallthrough; case 1: *bmc = 2; } diff --git a/drivers/md/persistent-data/dm-block-manager.c b/drivers/md/persistent-data/dm-block-manager.c index 749ec268d957..54c089a50b15 100644 --- a/drivers/md/persistent-data/dm-block-manager.c +++ b/drivers/md/persistent-data/dm-block-manager.c @@ -493,7 +493,7 @@ int dm_bm_write_lock(struct dm_block_manager *bm, void *p; int r; - if (bm->read_only) + if (dm_bm_is_read_only(bm)) return -EPERM; p = dm_bufio_read(bm->bufio, b, (struct dm_buffer **) result); @@ -562,7 +562,7 @@ int dm_bm_write_lock_zero(struct dm_block_manager *bm, struct buffer_aux *aux; void *p; - if (bm->read_only) + if (dm_bm_is_read_only(bm)) return -EPERM; p = dm_bufio_new(bm->bufio, b, (struct dm_buffer **) result); @@ -602,7 +602,7 @@ EXPORT_SYMBOL_GPL(dm_bm_unlock); int dm_bm_flush(struct dm_block_manager *bm) { - if (bm->read_only) + if (dm_bm_is_read_only(bm)) return -EPERM; return dm_bufio_write_dirty_buffers(bm->bufio); @@ -616,19 +616,21 @@ void dm_bm_prefetch(struct dm_block_manager *bm, dm_block_t b) bool dm_bm_is_read_only(struct dm_block_manager *bm) { - return bm->read_only; + return (bm ? bm->read_only : true); } EXPORT_SYMBOL_GPL(dm_bm_is_read_only); void dm_bm_set_read_only(struct dm_block_manager *bm) { - bm->read_only = true; + if (bm) + bm->read_only = true; } EXPORT_SYMBOL_GPL(dm_bm_set_read_only); void dm_bm_set_read_write(struct dm_block_manager *bm) { - bm->read_only = false; + if (bm) + bm->read_only = false; } EXPORT_SYMBOL_GPL(dm_bm_set_read_write); diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c index ef0fd4830803..225380efd1e2 100644 --- a/drivers/md/raid5.c +++ b/drivers/md/raid5.c @@ -4083,7 +4083,7 @@ static void handle_parity_checks5(struct r5conf *conf, struct stripe_head *sh, break; } dev = &sh->dev[s->failed_num[0]]; - /* fall through */ + fallthrough; case check_state_compute_result: sh->check_state = check_state_idle; if (!dev) @@ -4214,7 +4214,7 @@ static void handle_parity_checks6(struct r5conf *conf, struct stripe_head *sh, /* we have 2-disk failure */ BUG_ON(s->failed != 2); - /* fall through */ + fallthrough; case check_state_compute_result: sh->check_state = check_state_idle; @@ -6514,9 +6514,12 @@ raid5_store_stripe_size(struct mddev *mddev, const char *page, size_t len) /* * The value should not be bigger than PAGE_SIZE. It requires to - * be multiple of DEFAULT_STRIPE_SIZE. + * be multiple of DEFAULT_STRIPE_SIZE and the value should be power + * of two. */ - if (new % DEFAULT_STRIPE_SIZE != 0 || new > PAGE_SIZE || new == 0) + if (new % DEFAULT_STRIPE_SIZE != 0 || + new > PAGE_SIZE || new == 0 || + new != roundup_pow_of_two(new)) return -EINVAL; err = mddev_lock(mddev); |