From 35b785f7691aa82c4b0b262392439cfa6f22816d Mon Sep 17 00:00:00 2001 From: Tomasz Majchrzak Date: Fri, 21 Oct 2016 16:26:57 +0200 Subject: md: add bad block support for external metadata Add new rdev flag which external metadata handler can use to switch on/off bad block support. If new bad block is encountered, notify it via rdev 'unacknowledged_bad_blocks' sysfs file. If bad block has been cleared, notify update to rdev 'bad_blocks' sysfs file. When bad blocks support is being removed, just clear rdev flag. It is not necessary to reset badblocks->shift field. If there are bad blocks cleared or added at the same time, it is ok for those changes to be applied to the structure. The array is in blocked state and the drive which cannot handle bad blocks any more will be removed from the array before it is unlocked. Simplify state_show function by adding a separator at the end of each string and overwrite last separator with new line. Signed-off-by: Tomasz Majchrzak Reviewed-by: Artur Paszkiewicz Signed-off-by: Shaohua Li --- drivers/md/md.h | 3 +++ 1 file changed, 3 insertions(+) (limited to 'drivers/md/md.h') diff --git a/drivers/md/md.h b/drivers/md/md.h index 2b2041773e79..21bd94fad96a 100644 --- a/drivers/md/md.h +++ b/drivers/md/md.h @@ -168,6 +168,9 @@ enum flag_bits { * so it is safe to remove without * another synchronize_rcu() call. */ + ExternalBbl, /* External metadata provides bad + * block management for a disk + */ }; static inline int is_badblock(struct md_rdev *rdev, sector_t s, int sectors, -- cgit v1.2.3 From be306c2989804ca5b90388df66fd3cf28ec74967 Mon Sep 17 00:00:00 2001 From: NeilBrown Date: Wed, 9 Nov 2016 10:21:33 +1100 Subject: md: define mddev flags, recovery flags and r1bio state bits using enums This is less error prone than using individual #defines. Signed-off-by: NeilBrown Signed-off-by: Shaohua Li --- drivers/md/md.h | 76 ++++++++++++++++++++++++++---------------------------- drivers/md/raid1.h | 18 +++++++------ 2 files changed, 46 insertions(+), 48 deletions(-) (limited to 'drivers/md/md.h') diff --git a/drivers/md/md.h b/drivers/md/md.h index 21bd94fad96a..af6b33c30d2d 100644 --- a/drivers/md/md.h +++ b/drivers/md/md.h @@ -192,6 +192,25 @@ extern int rdev_clear_badblocks(struct md_rdev *rdev, sector_t s, int sectors, int is_new); struct md_cluster_info; +enum mddev_flags { + MD_CHANGE_DEVS, /* Some device status has changed */ + MD_CHANGE_CLEAN, /* transition to or from 'clean' */ + MD_CHANGE_PENDING, /* switch from 'clean' to 'active' in progress */ + MD_ARRAY_FIRST_USE, /* First use of array, needs initialization */ + MD_CLOSING, /* If set, we are closing the array, do not open + * it then */ + MD_JOURNAL_CLEAN, /* A raid with journal is already clean */ + MD_HAS_JOURNAL, /* The raid array has journal feature set */ + MD_RELOAD_SB, /* Reload the superblock because another node + * updated it. + */ + MD_CLUSTER_RESYNC_LOCKED, /* cluster raid only, which means node + * already took resync lock, need to + * release the lock */ +}; +#define MD_UPDATE_SB_FLAGS (BIT(MD_CHANGE_DEVS) | \ + BIT(MD_CHANGE_CLEAN) | \ + BIT(MD_CHANGE_PENDING)) /* If these are set, md_update_sb needed */ struct mddev { void *private; struct md_personality *pers; @@ -199,21 +218,6 @@ struct mddev { int md_minor; struct list_head disks; unsigned long flags; -#define MD_CHANGE_DEVS 0 /* Some device status has changed */ -#define MD_CHANGE_CLEAN 1 /* transition to or from 'clean' */ -#define MD_CHANGE_PENDING 2 /* switch from 'clean' to 'active' in progress */ -#define MD_UPDATE_SB_FLAGS (1 | 2 | 4) /* If these are set, md_update_sb needed */ -#define MD_ARRAY_FIRST_USE 3 /* First use of array, needs initialization */ -#define MD_CLOSING 4 /* If set, we are closing the array, do not open - * it then */ -#define MD_JOURNAL_CLEAN 5 /* A raid with journal is already clean */ -#define MD_HAS_JOURNAL 6 /* The raid array has journal feature set */ -#define MD_RELOAD_SB 7 /* Reload the superblock because another node - * updated it. - */ -#define MD_CLUSTER_RESYNC_LOCKED 8 /* cluster raid only, which means node - * already took resync lock, need to - * release the lock */ int suspended; atomic_t active_io; @@ -307,31 +311,6 @@ struct mddev { int parallel_resync; int ok_start_degraded; - /* recovery/resync flags - * NEEDED: we might need to start a resync/recover - * RUNNING: a thread is running, or about to be started - * SYNC: actually doing a resync, not a recovery - * RECOVER: doing recovery, or need to try it. - * INTR: resync needs to be aborted for some reason - * DONE: thread is done and is waiting to be reaped - * REQUEST: user-space has requested a sync (used with SYNC) - * CHECK: user-space request for check-only, no repair - * RESHAPE: A reshape is happening - * ERROR: sync-action interrupted because io-error - * - * If neither SYNC or RESHAPE are set, then it is a recovery. - */ -#define MD_RECOVERY_RUNNING 0 -#define MD_RECOVERY_SYNC 1 -#define MD_RECOVERY_RECOVER 2 -#define MD_RECOVERY_INTR 3 -#define MD_RECOVERY_DONE 4 -#define MD_RECOVERY_NEEDED 5 -#define MD_RECOVERY_REQUESTED 6 -#define MD_RECOVERY_CHECK 7 -#define MD_RECOVERY_RESHAPE 8 -#define MD_RECOVERY_FROZEN 9 -#define MD_RECOVERY_ERROR 10 unsigned long recovery; /* If a RAID personality determines that recovery (of a particular @@ -445,6 +424,23 @@ struct mddev { unsigned int good_device_nr; /* good device num within cluster raid */ }; +enum recovery_flags { + /* + * If neither SYNC or RESHAPE are set, then it is a recovery. + */ + MD_RECOVERY_RUNNING, /* a thread is running, or about to be started */ + MD_RECOVERY_SYNC, /* actually doing a resync, not a recovery */ + MD_RECOVERY_RECOVER, /* doing recovery, or need to try it. */ + MD_RECOVERY_INTR, /* resync needs to be aborted for some reason */ + MD_RECOVERY_DONE, /* thread is done and is waiting to be reaped */ + MD_RECOVERY_NEEDED, /* we might need to start a resync/recover */ + MD_RECOVERY_REQUESTED, /* user-space has requested a sync (used with SYNC) */ + MD_RECOVERY_CHECK, /* user-space request for check-only, no repair */ + MD_RECOVERY_RESHAPE, /* A reshape is happening */ + MD_RECOVERY_FROZEN, /* User request to abort, and not restart, any action */ + MD_RECOVERY_ERROR, /* sync-action interrupted because io-error */ +}; + static inline int __must_check mddev_lock(struct mddev *mddev) { return mutex_lock_interruptible(&mddev->reconfig_mutex); diff --git a/drivers/md/raid1.h b/drivers/md/raid1.h index 61c39b390cd8..5ec19449779d 100644 --- a/drivers/md/raid1.h +++ b/drivers/md/raid1.h @@ -161,14 +161,15 @@ struct r1bio { }; /* bits for r1bio.state */ -#define R1BIO_Uptodate 0 -#define R1BIO_IsSync 1 -#define R1BIO_Degraded 2 -#define R1BIO_BehindIO 3 +enum r1bio_state { + R1BIO_Uptodate, + R1BIO_IsSync, + R1BIO_Degraded, + R1BIO_BehindIO, /* Set ReadError on bios that experience a readerror so that * raid1d knows what to do with them. */ -#define R1BIO_ReadError 4 + R1BIO_ReadError, /* For write-behind requests, we call bi_end_io when * the last non-write-behind device completes, providing * any write was successful. Otherwise we call when @@ -176,10 +177,11 @@ struct r1bio { * with failure when last write completes (and all failed). * Record that bi_end_io was called with this flag... */ -#define R1BIO_Returned 6 + R1BIO_Returned, /* If a write for this request means we can clear some * known-bad-block records, we set this flag */ -#define R1BIO_MadeGood 7 -#define R1BIO_WriteError 8 + R1BIO_MadeGood, + R1BIO_WriteError, +}; #endif -- cgit v1.2.3 From 688834e6ae6b21e3d98b5cf2586aa4a9b515c3a0 Mon Sep 17 00:00:00 2001 From: NeilBrown Date: Fri, 18 Nov 2016 16:16:11 +1100 Subject: md/failfast: add failfast flag for md to be used by some personalities. This patch just adds a 'failfast' per-device flag which can be stored in v0.90 or v1.x metadata. The flag is not used yet but the intent is that it can be used for mirrored (raid1/raid10) arrays where low latency is more important than keeping all devices on-line. Setting the flag for a device effectively gives permission for that device to be marked as Faulty and excluded from the array on the first error. The underlying driver will be directed not to retry requests that result in failures. There is a proviso that the device must not be marked faulty if that would cause the array as a whole to fail, it may only be marked Faulty if the array remains functional, but is degraded. Failures on read requests will cause the device to be marked as Faulty immediately so that further reads will avoid that device. No attempt will be made to correct read errors by over-writing with the correct data. It is expected that if transient errors, such as cable unplug, are possible, then something in user-space will revalidate failed devices and re-add them when they appear to be working again. Signed-off-by: NeilBrown Signed-off-by: Shaohua Li --- drivers/md/md.c | 27 +++++++++++++++++++++++++++ drivers/md/md.h | 6 ++++++ include/uapi/linux/raid/md_p.h | 7 ++++++- 3 files changed, 39 insertions(+), 1 deletion(-) (limited to 'drivers/md/md.h') diff --git a/drivers/md/md.c b/drivers/md/md.c index d3cef771e422..2cf0e89cc56c 100644 --- a/drivers/md/md.c +++ b/drivers/md/md.c @@ -1164,6 +1164,8 @@ static int super_90_validate(struct mddev *mddev, struct md_rdev *rdev) } if (desc->state & (1<flags); + if (desc->state & (1<flags); } else /* MULTIPATH are always insync */ set_bit(In_sync, &rdev->flags); return 0; @@ -1289,6 +1291,8 @@ static void super_90_sync(struct mddev *mddev, struct md_rdev *rdev) } if (test_bit(WriteMostly, &rdev2->flags)) d->state |= (1<flags)) + d->state |= (1<raid_disks ; i++) { @@ -1673,6 +1677,8 @@ static int super_1_validate(struct mddev *mddev, struct md_rdev *rdev) } if (sb->devflags & WriteMostly1) set_bit(WriteMostly, &rdev->flags); + if (sb->devflags & FailFast1) + set_bit(FailFast, &rdev->flags); if (le32_to_cpu(sb->feature_map) & MD_FEATURE_REPLACEMENT) set_bit(Replacement, &rdev->flags); } else /* MULTIPATH are always insync */ @@ -1711,6 +1717,10 @@ static void super_1_sync(struct mddev *mddev, struct md_rdev *rdev) sb->chunksize = cpu_to_le32(mddev->chunk_sectors); sb->level = cpu_to_le32(mddev->level); sb->layout = cpu_to_le32(mddev->layout); + if (test_bit(FailFast, &rdev->flags)) + sb->devflags |= FailFast1; + else + sb->devflags &= ~FailFast1; if (test_bit(WriteMostly, &rdev->flags)) sb->devflags |= WriteMostly1; @@ -2557,6 +2567,8 @@ state_show(struct md_rdev *rdev, char *page) len += sprintf(page+len, "replacement%s", sep); if (test_bit(ExternalBbl, &flags)) len += sprintf(page+len, "external_bbl%s", sep); + if (test_bit(FailFast, &flags)) + len += sprintf(page+len, "failfast%s", sep); if (len) len -= strlen(sep); @@ -2579,6 +2591,7 @@ state_store(struct md_rdev *rdev, const char *buf, size_t len) * so that it gets rebuilt based on bitmap * write_error - sets WriteErrorSeen * -write_error - clears WriteErrorSeen + * {,-}failfast - set/clear FailFast */ int err = -EINVAL; if (cmd_match(buf, "faulty") && rdev->mddev->pers) { @@ -2637,6 +2650,12 @@ state_store(struct md_rdev *rdev, const char *buf, size_t len) } else if (cmd_match(buf, "insync") && rdev->raid_disk == -1) { set_bit(In_sync, &rdev->flags); err = 0; + } else if (cmd_match(buf, "failfast")) { + set_bit(FailFast, &rdev->flags); + err = 0; + } else if (cmd_match(buf, "-failfast")) { + clear_bit(FailFast, &rdev->flags); + err = 0; } else if (cmd_match(buf, "-insync") && rdev->raid_disk >= 0 && !test_bit(Journal, &rdev->flags)) { if (rdev->mddev->pers == NULL) { @@ -5942,6 +5961,8 @@ static int get_disk_info(struct mddev *mddev, void __user * arg) info.state |= (1<flags)) info.state |= (1<flags)) + info.state |= (1<flags); else clear_bit(WriteMostly, &rdev->flags); + if (info->state & (1<flags); + else + clear_bit(FailFast, &rdev->flags); if (info->state & (1<state & (1<flags); + if (info->state & (1<flags); if (!mddev->persistent) { pr_debug("md: nonpersistent superblock ...\n"); diff --git a/drivers/md/md.h b/drivers/md/md.h index af6b33c30d2d..bc6712ef8c81 100644 --- a/drivers/md/md.h +++ b/drivers/md/md.h @@ -171,6 +171,12 @@ enum flag_bits { ExternalBbl, /* External metadata provides bad * block management for a disk */ + FailFast, /* Minimal retries should be attempted on + * this device, so use REQ_FAILFAST_DEV. + * Also don't try to repair failed reads. + * It is expects that no bad block log + * is present. + */ }; static inline int is_badblock(struct md_rdev *rdev, sector_t s, int sectors, diff --git a/include/uapi/linux/raid/md_p.h b/include/uapi/linux/raid/md_p.h index c3e654c6d518..9930f3e9040f 100644 --- a/include/uapi/linux/raid/md_p.h +++ b/include/uapi/linux/raid/md_p.h @@ -84,6 +84,10 @@ #define MD_DISK_CANDIDATE 5 /* disk is added as spare (local) until confirmed * For clustered enviroments only. */ +#define MD_DISK_FAILFAST 10 /* Send REQ_FAILFAST if there are multiple + * devices available - and don't try to + * correct read errors. + */ #define MD_DISK_WRITEMOSTLY 9 /* disk is "write-mostly" is RAID1 config. * read requests will only be sent here in @@ -265,8 +269,9 @@ struct mdp_superblock_1 { __le32 dev_number; /* permanent identifier of this device - not role in raid */ __le32 cnt_corrected_read; /* number of read errors that were corrected by re-writing */ __u8 device_uuid[16]; /* user-space setable, ignored by kernel */ - __u8 devflags; /* per-device flags. Only one defined...*/ + __u8 devflags; /* per-device flags. Only two defined...*/ #define WriteMostly1 1 /* mask for writemostly flag in above */ +#define FailFast1 2 /* Should avoid retries and fixups and just fail */ /* Bad block log. If there are any bad blocks the feature flag is set. * If offset and size are non-zero, that space is reserved and available */ -- cgit v1.2.3 From 46533ff7fefb7e9e3539494f5873b00091caa8eb Mon Sep 17 00:00:00 2001 From: NeilBrown Date: Fri, 18 Nov 2016 16:16:11 +1100 Subject: md: Use REQ_FAILFAST_* on metadata writes where appropriate This can only be supported on personalities which ensure that md_error() never causes an array to enter the 'failed' state. i.e. if marking a device Faulty would cause some data to be inaccessible, the device is status is left as non-Faulty. This is true for RAID1 and RAID10. If we get a failure writing metadata but the device doesn't fail, it must be the last device so we re-write without FAILFAST to improve chance of success. We also flag the device as LastDev so that future metadata updates don't waste time on failfast writes. Signed-off-by: NeilBrown Signed-off-by: Shaohua Li --- drivers/md/bitmap.c | 15 ++++++++++++--- drivers/md/md.c | 44 ++++++++++++++++++++++++++++++++++---------- drivers/md/md.h | 21 ++++++++++++++++++++- drivers/md/raid1.c | 1 + drivers/md/raid10.c | 1 + 5 files changed, 68 insertions(+), 14 deletions(-) (limited to 'drivers/md/md.h') diff --git a/drivers/md/bitmap.c b/drivers/md/bitmap.c index cf77cbf9ed22..c4621571b718 100644 --- a/drivers/md/bitmap.c +++ b/drivers/md/bitmap.c @@ -209,11 +209,13 @@ static struct md_rdev *next_active_rdev(struct md_rdev *rdev, struct mddev *mdde static int write_sb_page(struct bitmap *bitmap, struct page *page, int wait) { - struct md_rdev *rdev = NULL; + struct md_rdev *rdev; struct block_device *bdev; struct mddev *mddev = bitmap->mddev; struct bitmap_storage *store = &bitmap->storage; +restart: + rdev = NULL; while ((rdev = next_active_rdev(rdev, mddev)) != NULL) { int size = PAGE_SIZE; loff_t offset = mddev->bitmap_info.offset; @@ -269,8 +271,8 @@ static int write_sb_page(struct bitmap *bitmap, struct page *page, int wait) page); } - if (wait) - md_super_wait(mddev); + if (wait && md_super_wait(mddev) < 0) + goto restart; return 0; bad_alignment: @@ -428,6 +430,13 @@ static void bitmap_wait_writes(struct bitmap *bitmap) wait_event(bitmap->write_wait, atomic_read(&bitmap->pending_writes)==0); else + /* Note that we ignore the return value. The writes + * might have failed, but that would just mean that + * some bits which should be cleared haven't been, + * which is safe. The relevant bitmap blocks will + * probably get written again, but there is no great + * loss if they aren't. + */ md_super_wait(bitmap->mddev); } diff --git a/drivers/md/md.c b/drivers/md/md.c index 2cf0e89cc56c..62f3fb948b3e 100644 --- a/drivers/md/md.c +++ b/drivers/md/md.c @@ -727,7 +727,13 @@ static void super_written(struct bio *bio) if (bio->bi_error) { pr_err("md: super_written gets error=%d\n", bio->bi_error); md_error(mddev, rdev); - } + if (!test_bit(Faulty, &rdev->flags) + && (bio->bi_opf & MD_FAILFAST)) { + set_bit(MD_NEED_REWRITE, &mddev->flags); + set_bit(LastDev, &rdev->flags); + } + } else + clear_bit(LastDev, &rdev->flags); if (atomic_dec_and_test(&mddev->pending_writes)) wake_up(&mddev->sb_wait); @@ -744,7 +750,13 @@ void md_super_write(struct mddev *mddev, struct md_rdev *rdev, * if zero is reached. * If an error occurred, call md_error */ - struct bio *bio = bio_alloc_mddev(GFP_NOIO, 1, mddev); + struct bio *bio; + int ff = 0; + + if (test_bit(Faulty, &rdev->flags)) + return; + + bio = bio_alloc_mddev(GFP_NOIO, 1, mddev); atomic_inc(&rdev->nr_pending); @@ -753,16 +765,24 @@ void md_super_write(struct mddev *mddev, struct md_rdev *rdev, bio_add_page(bio, page, size, 0); bio->bi_private = rdev; bio->bi_end_io = super_written; - bio_set_op_attrs(bio, REQ_OP_WRITE, WRITE_FLUSH_FUA); + + if (test_bit(MD_FAILFAST_SUPPORTED, &mddev->flags) && + test_bit(FailFast, &rdev->flags) && + !test_bit(LastDev, &rdev->flags)) + ff = MD_FAILFAST; + bio_set_op_attrs(bio, REQ_OP_WRITE, WRITE_FLUSH_FUA | ff); atomic_inc(&mddev->pending_writes); submit_bio(bio); } -void md_super_wait(struct mddev *mddev) +int md_super_wait(struct mddev *mddev) { /* wait for all superblock writes that were scheduled to complete */ wait_event(mddev->sb_wait, atomic_read(&mddev->pending_writes)==0); + if (test_and_clear_bit(MD_NEED_REWRITE, &mddev->flags)) + return -EAGAIN; + return 0; } int sync_page_io(struct md_rdev *rdev, sector_t sector, int size, @@ -1334,9 +1354,10 @@ super_90_rdev_size_change(struct md_rdev *rdev, sector_t num_sectors) if (IS_ENABLED(CONFIG_LBDAF) && (u64)num_sectors >= (2ULL << 32) && rdev->mddev->level >= 1) num_sectors = (sector_t)(2ULL << 32) - 2; - md_super_write(rdev->mddev, rdev, rdev->sb_start, rdev->sb_size, + do { + md_super_write(rdev->mddev, rdev, rdev->sb_start, rdev->sb_size, rdev->sb_page); - md_super_wait(rdev->mddev); + } while (md_super_wait(rdev->mddev) < 0); return num_sectors; } @@ -1877,9 +1898,10 @@ super_1_rdev_size_change(struct md_rdev *rdev, sector_t num_sectors) sb->data_size = cpu_to_le64(num_sectors); sb->super_offset = rdev->sb_start; sb->sb_csum = calc_sb_1_csum(sb); - md_super_write(rdev->mddev, rdev, rdev->sb_start, rdev->sb_size, - rdev->sb_page); - md_super_wait(rdev->mddev); + do { + md_super_write(rdev->mddev, rdev, rdev->sb_start, rdev->sb_size, + rdev->sb_page); + } while (md_super_wait(rdev->mddev) < 0); return num_sectors; } @@ -2416,6 +2438,7 @@ repeat: if (mddev->queue) blk_add_trace_msg(mddev->queue, "md md_update_sb"); +rewrite: bitmap_update_sb(mddev->bitmap); rdev_for_each(rdev, mddev) { char b[BDEVNAME_SIZE]; @@ -2447,7 +2470,8 @@ repeat: /* only need to write one superblock... */ break; } - md_super_wait(mddev); + if (md_super_wait(mddev) < 0) + goto rewrite; /* if there was a failure, MD_CHANGE_DEVS was set, and we re-write super */ if (mddev_is_clustered(mddev) && ret == 0) diff --git a/drivers/md/md.h b/drivers/md/md.h index bc6712ef8c81..5c08f84101fa 100644 --- a/drivers/md/md.h +++ b/drivers/md/md.h @@ -29,6 +29,16 @@ #define MaxSector (~(sector_t)0) +/* + * These flags should really be called "NO_RETRY" rather than + * "FAILFAST" because they don't make any promise about time lapse, + * only about the number of retries, which will be zero. + * REQ_FAILFAST_DRIVER is not included because + * Commit: 4a27446f3e39 ("[SCSI] modify scsi to handle new fail fast flags.") + * seems to suggest that the errors it avoids retrying should usually + * be retried. + */ +#define MD_FAILFAST (REQ_FAILFAST_DEV | REQ_FAILFAST_TRANSPORT) /* * MD's 'extended' device */ @@ -177,6 +187,10 @@ enum flag_bits { * It is expects that no bad block log * is present. */ + LastDev, /* Seems to be the last working dev as + * it didn't fail, so don't use FailFast + * any more for metadata + */ }; static inline int is_badblock(struct md_rdev *rdev, sector_t s, int sectors, @@ -213,6 +227,11 @@ enum mddev_flags { MD_CLUSTER_RESYNC_LOCKED, /* cluster raid only, which means node * already took resync lock, need to * release the lock */ + MD_FAILFAST_SUPPORTED, /* Using MD_FAILFAST on metadata writes is + * supported as calls to md_error() will + * never cause the array to become failed. + */ + MD_NEED_REWRITE, /* metadata write needs to be repeated */ }; #define MD_UPDATE_SB_FLAGS (BIT(MD_CHANGE_DEVS) | \ BIT(MD_CHANGE_CLEAN) | \ @@ -628,7 +647,7 @@ extern int mddev_congested(struct mddev *mddev, int bits); extern void md_flush_request(struct mddev *mddev, struct bio *bio); extern void md_super_write(struct mddev *mddev, struct md_rdev *rdev, sector_t sector, int size, struct page *page); -extern void md_super_wait(struct mddev *mddev); +extern int md_super_wait(struct mddev *mddev); extern int sync_page_io(struct md_rdev *rdev, sector_t sector, int size, struct page *page, int op, int op_flags, bool metadata_op); diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c index d24adc50a31f..4006a9be2eab 100644 --- a/drivers/md/raid1.c +++ b/drivers/md/raid1.c @@ -2988,6 +2988,7 @@ static int raid1_run(struct mddev *mddev) mddev->thread = conf->thread; conf->thread = NULL; mddev->private = conf; + set_bit(MD_FAILFAST_SUPPORTED, &mddev->flags); md_set_array_sectors(mddev, raid1_size(mddev, 0, 0)); diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c index bd8c884d4596..af50866f5ffb 100644 --- a/drivers/md/raid10.c +++ b/drivers/md/raid10.c @@ -3729,6 +3729,7 @@ static int raid10_run(struct mddev *mddev) size = raid10_size(mddev, 0, 0); md_set_array_sectors(mddev, size); mddev->resync_max_sectors = size; + set_bit(MD_FAILFAST_SUPPORTED, &mddev->flags); if (mddev->queue) { int stripe = conf->geo.raid_disks * -- cgit v1.2.3 From 2953079c692da067aeb6345659875b97378f9b0a Mon Sep 17 00:00:00 2001 From: Shaohua Li Date: Thu, 8 Dec 2016 15:48:19 -0800 Subject: md: separate flags for superblock changes The mddev->flags are used for different purposes. There are a lot of places we check/change the flags without masking unrelated flags, we could check/change unrelated flags. These usage are most for superblock write, so spearate superblock related flags. This should make the code clearer and also fix real bugs. Reviewed-by: NeilBrown Signed-off-by: Shaohua Li --- drivers/md/bitmap.c | 4 +- drivers/md/dm-raid.c | 4 +- drivers/md/md.c | 115 ++++++++++++++++++++++++----------------------- drivers/md/md.h | 16 ++++--- drivers/md/multipath.c | 2 +- drivers/md/raid1.c | 12 ++--- drivers/md/raid10.c | 22 ++++----- drivers/md/raid5-cache.c | 6 +-- drivers/md/raid5.c | 26 +++++------ 9 files changed, 106 insertions(+), 101 deletions(-) (limited to 'drivers/md/md.h') diff --git a/drivers/md/bitmap.c b/drivers/md/bitmap.c index c4621571b718..9fb2ccac958a 100644 --- a/drivers/md/bitmap.c +++ b/drivers/md/bitmap.c @@ -1623,7 +1623,7 @@ void bitmap_cond_end_sync(struct bitmap *bitmap, sector_t sector, bool force) atomic_read(&bitmap->mddev->recovery_active) == 0); bitmap->mddev->curr_resync_completed = sector; - set_bit(MD_CHANGE_CLEAN, &bitmap->mddev->flags); + set_bit(MD_SB_CHANGE_CLEAN, &bitmap->mddev->sb_flags); sector &= ~((1ULL << bitmap->counts.chunkshift) - 1); s = 0; while (s < sector && s < bitmap->mddev->resync_max_sectors) { @@ -2296,7 +2296,7 @@ location_store(struct mddev *mddev, const char *buf, size_t len) /* Ensure new bitmap info is stored in * metadata promptly. */ - set_bit(MD_CHANGE_DEVS, &mddev->flags); + set_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags); md_wakeup_thread(mddev->thread); } rv = 0; diff --git a/drivers/md/dm-raid.c b/drivers/md/dm-raid.c index 6d53810963f7..953159d9a825 100644 --- a/drivers/md/dm-raid.c +++ b/drivers/md/dm-raid.c @@ -2011,7 +2011,7 @@ static int super_load(struct md_rdev *rdev, struct md_rdev *refdev) sb->compat_features = cpu_to_le32(FEATURE_FLAG_SUPPORTS_V190); /* Force writing of superblocks to disk */ - set_bit(MD_CHANGE_DEVS, &rdev->mddev->flags); + set_bit(MD_SB_CHANGE_DEVS, &rdev->mddev->sb_flags); /* Any superblock is better than none, choose that if given */ return refdev ? 0 : 1; @@ -3497,7 +3497,7 @@ static void rs_update_sbs(struct raid_set *rs) struct mddev *mddev = &rs->md; int ro = mddev->ro; - set_bit(MD_CHANGE_DEVS, &mddev->flags); + set_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags); mddev->ro = 0; md_update_sb(mddev, 1); mddev->ro = ro; diff --git a/drivers/md/md.c b/drivers/md/md.c index 5e666482db3c..c15e2344e7c8 100644 --- a/drivers/md/md.c +++ b/drivers/md/md.c @@ -729,7 +729,7 @@ static void super_written(struct bio *bio) md_error(mddev, rdev); if (!test_bit(Faulty, &rdev->flags) && (bio->bi_opf & MD_FAILFAST)) { - set_bit(MD_NEED_REWRITE, &mddev->flags); + set_bit(MD_SB_NEED_REWRITE, &mddev->sb_flags); set_bit(LastDev, &rdev->flags); } } else @@ -780,7 +780,7 @@ int md_super_wait(struct mddev *mddev) { /* wait for all superblock writes that were scheduled to complete */ wait_event(mddev->sb_wait, atomic_read(&mddev->pending_writes)==0); - if (test_and_clear_bit(MD_NEED_REWRITE, &mddev->flags)) + if (test_and_clear_bit(MD_SB_NEED_REWRITE, &mddev->sb_flags)) return -EAGAIN; return 0; } @@ -2322,24 +2322,24 @@ void md_update_sb(struct mddev *mddev, int force_change) if (mddev->ro) { if (force_change) - set_bit(MD_CHANGE_DEVS, &mddev->flags); + set_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags); return; } repeat: if (mddev_is_clustered(mddev)) { - if (test_and_clear_bit(MD_CHANGE_DEVS, &mddev->flags)) + if (test_and_clear_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags)) force_change = 1; - if (test_and_clear_bit(MD_CHANGE_CLEAN, &mddev->flags)) + if (test_and_clear_bit(MD_SB_CHANGE_CLEAN, &mddev->sb_flags)) nospares = 1; ret = md_cluster_ops->metadata_update_start(mddev); /* Has someone else has updated the sb */ if (!does_sb_need_changing(mddev)) { if (ret == 0) md_cluster_ops->metadata_update_cancel(mddev); - bit_clear_unless(&mddev->flags, BIT(MD_CHANGE_PENDING), - BIT(MD_CHANGE_DEVS) | - BIT(MD_CHANGE_CLEAN)); + bit_clear_unless(&mddev->sb_flags, BIT(MD_SB_CHANGE_PENDING), + BIT(MD_SB_CHANGE_DEVS) | + BIT(MD_SB_CHANGE_CLEAN)); return; } } @@ -2355,10 +2355,10 @@ repeat: } if (!mddev->persistent) { - clear_bit(MD_CHANGE_CLEAN, &mddev->flags); - clear_bit(MD_CHANGE_DEVS, &mddev->flags); + clear_bit(MD_SB_CHANGE_CLEAN, &mddev->sb_flags); + clear_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags); if (!mddev->external) { - clear_bit(MD_CHANGE_PENDING, &mddev->flags); + clear_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags); rdev_for_each(rdev, mddev) { if (rdev->badblocks.changed) { rdev->badblocks.changed = 0; @@ -2378,9 +2378,9 @@ repeat: mddev->utime = ktime_get_real_seconds(); - if (test_and_clear_bit(MD_CHANGE_DEVS, &mddev->flags)) + if (test_and_clear_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags)) force_change = 1; - if (test_and_clear_bit(MD_CHANGE_CLEAN, &mddev->flags)) + if (test_and_clear_bit(MD_SB_CHANGE_CLEAN, &mddev->sb_flags)) /* just a clean<-> dirty transition, possibly leave spares alone, * though if events isn't the right even/odd, we will have to do * spares after all @@ -2472,14 +2472,14 @@ rewrite: } if (md_super_wait(mddev) < 0) goto rewrite; - /* if there was a failure, MD_CHANGE_DEVS was set, and we re-write super */ + /* if there was a failure, MD_SB_CHANGE_DEVS was set, and we re-write super */ if (mddev_is_clustered(mddev) && ret == 0) md_cluster_ops->metadata_update_finish(mddev); if (mddev->in_sync != sync_req || - !bit_clear_unless(&mddev->flags, BIT(MD_CHANGE_PENDING), - BIT(MD_CHANGE_DEVS) | BIT(MD_CHANGE_CLEAN))) + !bit_clear_unless(&mddev->sb_flags, BIT(MD_SB_CHANGE_PENDING), + BIT(MD_SB_CHANGE_DEVS) | BIT(MD_SB_CHANGE_CLEAN))) /* have to write it out again */ goto repeat; wake_up(&mddev->sb_wait); @@ -2523,7 +2523,7 @@ static int add_bound_rdev(struct md_rdev *rdev) } sysfs_notify_dirent_safe(rdev->sysfs_state); - set_bit(MD_CHANGE_DEVS, &mddev->flags); + set_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags); if (mddev->degraded) set_bit(MD_RECOVERY_RECOVER, &mddev->recovery); set_bit(MD_RECOVERY_NEEDED, &mddev->recovery); @@ -2640,7 +2640,7 @@ state_store(struct md_rdev *rdev, const char *buf, size_t len) if (err == 0) { md_kick_rdev_from_array(rdev); if (mddev->pers) { - set_bit(MD_CHANGE_DEVS, &mddev->flags); + set_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags); md_wakeup_thread(mddev->thread); } md_new_event(mddev); @@ -3651,7 +3651,7 @@ level_store(struct mddev *mddev, const char *buf, size_t len) } blk_set_stacking_limits(&mddev->queue->limits); pers->run(mddev); - set_bit(MD_CHANGE_DEVS, &mddev->flags); + set_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags); mddev_resume(mddev); if (!mddev->thread) md_update_sb(mddev, 1); @@ -3846,7 +3846,7 @@ resync_start_store(struct mddev *mddev, const char *buf, size_t len) if (!err) { mddev->recovery_cp = n; if (mddev->pers) - set_bit(MD_CHANGE_CLEAN, &mddev->flags); + set_bit(MD_SB_CHANGE_CLEAN, &mddev->sb_flags); } mddev_unlock(mddev); return err ?: len; @@ -3920,7 +3920,7 @@ array_state_show(struct mddev *mddev, char *page) st = read_auto; break; case 0: - if (test_bit(MD_CHANGE_PENDING, &mddev->flags)) + if (test_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags)) st = write_pending; else if (mddev->in_sync) st = clean; @@ -3958,7 +3958,7 @@ array_state_store(struct mddev *mddev, const char *buf, size_t len) spin_lock(&mddev->lock); if (st == active) { restart_array(mddev); - clear_bit(MD_CHANGE_PENDING, &mddev->flags); + clear_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags); md_wakeup_thread(mddev->thread); wake_up(&mddev->sb_wait); err = 0; @@ -3969,7 +3969,7 @@ array_state_store(struct mddev *mddev, const char *buf, size_t len) mddev->in_sync = 1; if (mddev->safemode == 1) mddev->safemode = 0; - set_bit(MD_CHANGE_CLEAN, &mddev->flags); + set_bit(MD_SB_CHANGE_CLEAN, &mddev->sb_flags); } err = 0; } else @@ -4035,7 +4035,7 @@ array_state_store(struct mddev *mddev, const char *buf, size_t len) mddev->in_sync = 1; if (mddev->safemode == 1) mddev->safemode = 0; - set_bit(MD_CHANGE_CLEAN, &mddev->flags); + set_bit(MD_SB_CHANGE_CLEAN, &mddev->sb_flags); } err = 0; } else @@ -4049,7 +4049,7 @@ array_state_store(struct mddev *mddev, const char *buf, size_t len) err = restart_array(mddev); if (err) break; - clear_bit(MD_CHANGE_PENDING, &mddev->flags); + clear_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags); wake_up(&mddev->sb_wait); err = 0; } else { @@ -5378,7 +5378,7 @@ int md_run(struct mddev *mddev) set_bit(MD_RECOVERY_RECOVER, &mddev->recovery); set_bit(MD_RECOVERY_NEEDED, &mddev->recovery); - if (mddev->flags & MD_UPDATE_SB_FLAGS) + if (mddev->sb_flags) md_update_sb(mddev, 0); md_new_event(mddev); @@ -5473,6 +5473,7 @@ static void md_clean(struct mddev *mddev) mddev->level = LEVEL_NONE; mddev->clevel[0] = 0; mddev->flags = 0; + mddev->sb_flags = 0; mddev->ro = 0; mddev->metadata_type[0] = 0; mddev->chunk_sectors = 0; @@ -5525,7 +5526,7 @@ static void __md_stop_writes(struct mddev *mddev) if (mddev->ro == 0 && ((!mddev->in_sync && !mddev_is_clustered(mddev)) || - (mddev->flags & MD_UPDATE_SB_FLAGS))) { + mddev->sb_flags)) { /* mark array as shutdown cleanly */ if (!mddev_is_clustered(mddev)) mddev->in_sync = 1; @@ -5608,13 +5609,13 @@ static int md_set_readonly(struct mddev *mddev, struct block_device *bdev) * which will now never happen */ wake_up_process(mddev->sync_thread->tsk); - if (mddev->external && test_bit(MD_CHANGE_PENDING, &mddev->flags)) + if (mddev->external && test_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags)) return -EBUSY; mddev_unlock(mddev); wait_event(resync_wait, !test_bit(MD_RECOVERY_RUNNING, &mddev->recovery)); wait_event(mddev->sb_wait, - !test_bit(MD_CHANGE_PENDING, &mddev->flags)); + !test_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags)); mddev_lock_nointr(mddev); mutex_lock(&mddev->open_mutex); @@ -6234,7 +6235,7 @@ kick_rdev: md_cluster_ops->remove_disk(mddev, rdev); md_kick_rdev_from_array(rdev); - set_bit(MD_CHANGE_DEVS, &mddev->flags); + set_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags); if (mddev->thread) md_wakeup_thread(mddev->thread); else @@ -6303,7 +6304,7 @@ static int hot_add_disk(struct mddev *mddev, dev_t dev) rdev->raid_disk = -1; - set_bit(MD_CHANGE_DEVS, &mddev->flags); + set_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags); if (!mddev->thread) md_update_sb(mddev, 1); /* @@ -6460,9 +6461,11 @@ static int set_array_info(struct mddev *mddev, mdu_array_info_t *info) mddev->max_disks = MD_SB_DISKS; - if (mddev->persistent) + if (mddev->persistent) { mddev->flags = 0; - set_bit(MD_CHANGE_DEVS, &mddev->flags); + mddev->sb_flags = 0; + } + set_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags); mddev->bitmap_info.default_offset = MD_SB_BYTES >> 9; mddev->bitmap_info.default_space = 64*2 - (MD_SB_BYTES >> 9); @@ -7007,11 +7010,11 @@ static int md_ioctl(struct block_device *bdev, fmode_t mode, /* If a device failed while we were read-only, we * need to make sure the metadata is updated now. */ - if (test_bit(MD_CHANGE_DEVS, &mddev->flags)) { + if (test_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags)) { mddev_unlock(mddev); wait_event(mddev->sb_wait, - !test_bit(MD_CHANGE_DEVS, &mddev->flags) && - !test_bit(MD_CHANGE_PENDING, &mddev->flags)); + !test_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags) && + !test_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags)); mddev_lock_nointr(mddev); } } else { @@ -7766,8 +7769,8 @@ void md_write_start(struct mddev *mddev, struct bio *bi) spin_lock(&mddev->lock); if (mddev->in_sync) { mddev->in_sync = 0; - set_bit(MD_CHANGE_CLEAN, &mddev->flags); - set_bit(MD_CHANGE_PENDING, &mddev->flags); + set_bit(MD_SB_CHANGE_CLEAN, &mddev->sb_flags); + set_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags); md_wakeup_thread(mddev->thread); did_change = 1; } @@ -7776,7 +7779,7 @@ void md_write_start(struct mddev *mddev, struct bio *bi) if (did_change) sysfs_notify_dirent_safe(mddev->sysfs_state); wait_event(mddev->sb_wait, - !test_bit(MD_CHANGE_PENDING, &mddev->flags)); + !test_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags)); } EXPORT_SYMBOL(md_write_start); @@ -7797,7 +7800,7 @@ EXPORT_SYMBOL(md_write_end); * attempting a GFP_KERNEL allocation while holding the mddev lock. * Must be called with mddev_lock held. * - * In the ->external case MD_CHANGE_PENDING can not be cleared until mddev->lock + * In the ->external case MD_SB_CHANGE_PENDING can not be cleared until mddev->lock * is dropped, so return -EAGAIN after notifying userspace. */ int md_allow_write(struct mddev *mddev) @@ -7812,8 +7815,8 @@ int md_allow_write(struct mddev *mddev) spin_lock(&mddev->lock); if (mddev->in_sync) { mddev->in_sync = 0; - set_bit(MD_CHANGE_CLEAN, &mddev->flags); - set_bit(MD_CHANGE_PENDING, &mddev->flags); + set_bit(MD_SB_CHANGE_CLEAN, &mddev->sb_flags); + set_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags); if (mddev->safemode_delay && mddev->safemode == 0) mddev->safemode = 1; @@ -7823,7 +7826,7 @@ int md_allow_write(struct mddev *mddev) } else spin_unlock(&mddev->lock); - if (test_bit(MD_CHANGE_PENDING, &mddev->flags)) + if (test_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags)) return -EAGAIN; else return 0; @@ -8058,7 +8061,7 @@ void md_do_sync(struct md_thread *thread) j > mddev->recovery_cp) mddev->recovery_cp = j; update_time = jiffies; - set_bit(MD_CHANGE_CLEAN, &mddev->flags); + set_bit(MD_SB_CHANGE_CLEAN, &mddev->sb_flags); sysfs_notify(&mddev->kobj, NULL, "sync_completed"); } @@ -8206,8 +8209,8 @@ void md_do_sync(struct md_thread *thread) /* set CHANGE_PENDING here since maybe another update is needed, * so other nodes are informed. It should be harmless for normal * raid */ - set_mask_bits(&mddev->flags, 0, - BIT(MD_CHANGE_PENDING) | BIT(MD_CHANGE_DEVS)); + set_mask_bits(&mddev->sb_flags, 0, + BIT(MD_SB_CHANGE_PENDING) | BIT(MD_SB_CHANGE_DEVS)); spin_lock(&mddev->lock); if (!test_bit(MD_RECOVERY_INTR, &mddev->recovery)) { @@ -8307,12 +8310,12 @@ static int remove_and_add_spares(struct mddev *mddev, if (!test_bit(Journal, &rdev->flags)) spares++; md_new_event(mddev); - set_bit(MD_CHANGE_DEVS, &mddev->flags); + set_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags); } } no_add: if (removed) - set_bit(MD_CHANGE_DEVS, &mddev->flags); + set_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags); return spares; } @@ -8385,7 +8388,7 @@ void md_check_recovery(struct mddev *mddev) if (mddev->ro && !test_bit(MD_RECOVERY_NEEDED, &mddev->recovery)) return; if ( ! ( - (mddev->flags & MD_UPDATE_SB_FLAGS & ~ (1<sb_flags & ~ (1<recovery) || test_bit(MD_RECOVERY_DONE, &mddev->recovery) || test_bit(MD_RELOAD_SB, &mddev->flags) || @@ -8423,7 +8426,7 @@ void md_check_recovery(struct mddev *mddev) md_reap_sync_thread(mddev); clear_bit(MD_RECOVERY_RECOVER, &mddev->recovery); clear_bit(MD_RECOVERY_NEEDED, &mddev->recovery); - clear_bit(MD_CHANGE_PENDING, &mddev->flags); + clear_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags); goto unlock; } @@ -8451,7 +8454,7 @@ void md_check_recovery(struct mddev *mddev) mddev->recovery_cp == MaxSector) { mddev->in_sync = 1; did_change = 1; - set_bit(MD_CHANGE_CLEAN, &mddev->flags); + set_bit(MD_SB_CHANGE_CLEAN, &mddev->sb_flags); } if (mddev->safemode == 1) mddev->safemode = 0; @@ -8460,7 +8463,7 @@ void md_check_recovery(struct mddev *mddev) sysfs_notify_dirent_safe(mddev->sysfs_state); } - if (mddev->flags & MD_UPDATE_SB_FLAGS) + if (mddev->sb_flags) md_update_sb(mddev, 0); if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery) && @@ -8556,7 +8559,7 @@ void md_reap_sync_thread(struct mddev *mddev) if (mddev->pers->spare_active(mddev)) { sysfs_notify(&mddev->kobj, NULL, "degraded"); - set_bit(MD_CHANGE_DEVS, &mddev->flags); + set_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags); } } if (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery) && @@ -8571,7 +8574,7 @@ void md_reap_sync_thread(struct mddev *mddev) rdev->saved_raid_disk = -1; md_update_sb(mddev, 1); - /* MD_CHANGE_PENDING should be cleared by md_update_sb, so we can + /* MD_SB_CHANGE_PENDING should be cleared by md_update_sb, so we can * call resync_finish here if MD_CLUSTER_RESYNC_LOCKED is set by * clustered raid */ if (test_and_clear_bit(MD_CLUSTER_RESYNC_LOCKED, &mddev->flags)) @@ -8637,8 +8640,8 @@ int rdev_set_badblocks(struct md_rdev *rdev, sector_t s, int sectors, sysfs_notify(&rdev->kobj, NULL, "unacknowledged_bad_blocks"); sysfs_notify_dirent_safe(rdev->sysfs_state); - set_mask_bits(&mddev->flags, 0, - BIT(MD_CHANGE_CLEAN) | BIT(MD_CHANGE_PENDING)); + set_mask_bits(&mddev->sb_flags, 0, + BIT(MD_SB_CHANGE_CLEAN) | BIT(MD_SB_CHANGE_PENDING)); md_wakeup_thread(rdev->mddev->thread); return 1; } else diff --git a/drivers/md/md.h b/drivers/md/md.h index 5c08f84101fa..e38936d05df1 100644 --- a/drivers/md/md.h +++ b/drivers/md/md.h @@ -213,9 +213,6 @@ extern int rdev_clear_badblocks(struct md_rdev *rdev, sector_t s, int sectors, struct md_cluster_info; enum mddev_flags { - MD_CHANGE_DEVS, /* Some device status has changed */ - MD_CHANGE_CLEAN, /* transition to or from 'clean' */ - MD_CHANGE_PENDING, /* switch from 'clean' to 'active' in progress */ MD_ARRAY_FIRST_USE, /* First use of array, needs initialization */ MD_CLOSING, /* If set, we are closing the array, do not open * it then */ @@ -231,11 +228,15 @@ enum mddev_flags { * supported as calls to md_error() will * never cause the array to become failed. */ - MD_NEED_REWRITE, /* metadata write needs to be repeated */ }; -#define MD_UPDATE_SB_FLAGS (BIT(MD_CHANGE_DEVS) | \ - BIT(MD_CHANGE_CLEAN) | \ - BIT(MD_CHANGE_PENDING)) /* If these are set, md_update_sb needed */ + +enum mddev_sb_flags { + MD_SB_CHANGE_DEVS, /* Some device status has changed */ + MD_SB_CHANGE_CLEAN, /* transition to or from 'clean' */ + MD_SB_CHANGE_PENDING, /* switch from 'clean' to 'active' in progress */ + MD_SB_NEED_REWRITE, /* metadata write needs to be repeated */ +}; + struct mddev { void *private; struct md_personality *pers; @@ -243,6 +244,7 @@ struct mddev { int md_minor; struct list_head disks; unsigned long flags; + unsigned long sb_flags; int suspended; atomic_t active_io; diff --git a/drivers/md/multipath.c b/drivers/md/multipath.c index 589b80775d3f..9fa2d6c5d996 100644 --- a/drivers/md/multipath.c +++ b/drivers/md/multipath.c @@ -208,7 +208,7 @@ static void multipath_error (struct mddev *mddev, struct md_rdev *rdev) spin_unlock_irqrestore(&conf->device_lock, flags); } set_bit(Faulty, &rdev->flags); - set_bit(MD_CHANGE_DEVS, &mddev->flags); + set_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags); pr_err("multipath: IO failure on %s, disabling IO path.\n" "multipath: Operation continuing on %d IO paths.\n", bdevname(rdev->bdev, b), diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c index efc2e744cfd3..a1f3fbed9100 100644 --- a/drivers/md/raid1.c +++ b/drivers/md/raid1.c @@ -1517,8 +1517,8 @@ static void raid1_error(struct mddev *mddev, struct md_rdev *rdev) * if recovery is running, make sure it aborts. */ set_bit(MD_RECOVERY_INTR, &mddev->recovery); - set_mask_bits(&mddev->flags, 0, - BIT(MD_CHANGE_DEVS) | BIT(MD_CHANGE_PENDING)); + set_mask_bits(&mddev->sb_flags, 0, + BIT(MD_SB_CHANGE_DEVS) | BIT(MD_SB_CHANGE_PENDING)); pr_crit("md/raid1:%s: Disk failure on %s, disabling device.\n" "md/raid1:%s: Operation continuing on %d devices.\n", mdname(mddev), bdevname(rdev->bdev, b), @@ -2464,10 +2464,10 @@ static void raid1d(struct md_thread *thread) md_check_recovery(mddev); if (!list_empty_careful(&conf->bio_end_io_list) && - !test_bit(MD_CHANGE_PENDING, &mddev->flags)) { + !test_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags)) { LIST_HEAD(tmp); spin_lock_irqsave(&conf->device_lock, flags); - if (!test_bit(MD_CHANGE_PENDING, &mddev->flags)) { + if (!test_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags)) { while (!list_empty(&conf->bio_end_io_list)) { list_move(conf->bio_end_io_list.prev, &tmp); conf->nr_queued--; @@ -2521,7 +2521,7 @@ static void raid1d(struct md_thread *thread) generic_make_request(r1_bio->bios[r1_bio->read_disk]); cond_resched(); - if (mddev->flags & ~(1<sb_flags & ~(1<flags); + set_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags); *skipped = 1; put_buf(r1_bio); diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c index 525ca9923707..ab5e86209322 100644 --- a/drivers/md/raid10.c +++ b/drivers/md/raid10.c @@ -1138,12 +1138,12 @@ static void __make_request(struct mddev *mddev, struct bio *bio) bio->bi_iter.bi_sector < conf->reshape_progress))) { /* Need to update reshape_position in metadata */ mddev->reshape_position = conf->reshape_progress; - set_mask_bits(&mddev->flags, 0, - BIT(MD_CHANGE_DEVS) | BIT(MD_CHANGE_PENDING)); + set_mask_bits(&mddev->sb_flags, 0, + BIT(MD_SB_CHANGE_DEVS) | BIT(MD_SB_CHANGE_PENDING)); md_wakeup_thread(mddev->thread); raid10_log(conf->mddev, "wait reshape metadata"); wait_event(mddev->sb_wait, - !test_bit(MD_CHANGE_PENDING, &mddev->flags)); + !test_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags)); conf->reshape_safe = mddev->reshape_position; } @@ -1652,8 +1652,8 @@ static void raid10_error(struct mddev *mddev, struct md_rdev *rdev) set_bit(MD_RECOVERY_INTR, &mddev->recovery); set_bit(Blocked, &rdev->flags); set_bit(Faulty, &rdev->flags); - set_mask_bits(&mddev->flags, 0, - BIT(MD_CHANGE_DEVS) | BIT(MD_CHANGE_PENDING)); + set_mask_bits(&mddev->sb_flags, 0, + BIT(MD_SB_CHANGE_DEVS) | BIT(MD_SB_CHANGE_PENDING)); spin_unlock_irqrestore(&conf->device_lock, flags); pr_crit("md/raid10:%s: Disk failure on %s, disabling device.\n" "md/raid10:%s: Operation continuing on %d devices.\n", @@ -2761,10 +2761,10 @@ static void raid10d(struct md_thread *thread) md_check_recovery(mddev); if (!list_empty_careful(&conf->bio_end_io_list) && - !test_bit(MD_CHANGE_PENDING, &mddev->flags)) { + !test_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags)) { LIST_HEAD(tmp); spin_lock_irqsave(&conf->device_lock, flags); - if (!test_bit(MD_CHANGE_PENDING, &mddev->flags)) { + if (!test_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags)) { while (!list_empty(&conf->bio_end_io_list)) { list_move(conf->bio_end_io_list.prev, &tmp); conf->nr_queued--; @@ -2822,7 +2822,7 @@ static void raid10d(struct md_thread *thread) } cond_resched(); - if (mddev->flags & ~(1<sb_flags & ~(1<device_lock); mddev->raid_disks = conf->geo.raid_disks; mddev->reshape_position = conf->reshape_progress; - set_bit(MD_CHANGE_DEVS, &mddev->flags); + set_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags); clear_bit(MD_RECOVERY_SYNC, &mddev->recovery); clear_bit(MD_RECOVERY_CHECK, &mddev->recovery); @@ -4404,9 +4404,9 @@ static sector_t reshape_request(struct mddev *mddev, sector_t sector_nr, else mddev->curr_resync_completed = conf->reshape_progress; conf->reshape_checkpoint = jiffies; - set_bit(MD_CHANGE_DEVS, &mddev->flags); + set_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags); md_wakeup_thread(mddev->thread); - wait_event(mddev->sb_wait, mddev->flags == 0 || + wait_event(mddev->sb_wait, mddev->sb_flags == 0 || test_bit(MD_RECOVERY_INTR, &mddev->recovery)); if (test_bit(MD_RECOVERY_INTR, &mddev->recovery)) { allow_barrier(conf); diff --git a/drivers/md/raid5-cache.c b/drivers/md/raid5-cache.c index de8a4ede0bc9..6d1a150eacd6 100644 --- a/drivers/md/raid5-cache.c +++ b/drivers/md/raid5-cache.c @@ -1206,8 +1206,8 @@ static void r5l_write_super_and_discard_space(struct r5l_log *log, * there is a deadlock. We workaround this issue with a trylock. * FIXME: we could miss discard if we can't take reconfig mutex */ - set_mask_bits(&mddev->flags, 0, - BIT(MD_CHANGE_DEVS) | BIT(MD_CHANGE_PENDING)); + set_mask_bits(&mddev->sb_flags, 0, + BIT(MD_SB_CHANGE_DEVS) | BIT(MD_SB_CHANGE_PENDING)); if (!mddev_trylock(mddev)) return; md_update_sb(mddev, 1); @@ -2197,7 +2197,7 @@ static void r5l_write_super(struct r5l_log *log, sector_t cp) struct mddev *mddev = log->rdev->mddev; log->rdev->journal_tail = cp; - set_bit(MD_CHANGE_DEVS, &mddev->flags); + set_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags); } static ssize_t r5c_journal_mode_show(struct mddev *mddev, char *page) diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c index 3e6a2a0d61e9..d40e94d56410 100644 --- a/drivers/md/raid5.c +++ b/drivers/md/raid5.c @@ -961,7 +961,7 @@ again: if (bad < 0) { set_bit(BlockedBadBlocks, &rdev->flags); if (!conf->mddev->external && - conf->mddev->flags) { + conf->mddev->sb_flags) { /* It is very unlikely, but we might * still need to write out the * bad block log - better give it @@ -2547,8 +2547,8 @@ static void raid5_error(struct mddev *mddev, struct md_rdev *rdev) set_bit(Blocked, &rdev->flags); set_bit(Faulty, &rdev->flags); - set_mask_bits(&mddev->flags, 0, - BIT(MD_CHANGE_DEVS) | BIT(MD_CHANGE_PENDING)); + set_mask_bits(&mddev->sb_flags, 0, + BIT(MD_SB_CHANGE_DEVS) | BIT(MD_SB_CHANGE_PENDING)); pr_crit("md/raid:%s: Disk failure on %s, disabling device.\n" "md/raid:%s: Operation continuing on %d devices.\n", mdname(mddev), @@ -4761,7 +4761,7 @@ finish: } if (!bio_list_empty(&s.return_bi)) { - if (test_bit(MD_CHANGE_PENDING, &conf->mddev->flags)) { + if (test_bit(MD_SB_CHANGE_PENDING, &conf->mddev->sb_flags)) { spin_lock_irq(&conf->device_lock); bio_list_merge(&conf->return_bi, &s.return_bi); spin_unlock_irq(&conf->device_lock); @@ -5617,9 +5617,9 @@ static sector_t reshape_request(struct mddev *mddev, sector_t sector_nr, int *sk mddev->reshape_position = conf->reshape_progress; mddev->curr_resync_completed = sector_nr; conf->reshape_checkpoint = jiffies; - set_bit(MD_CHANGE_DEVS, &mddev->flags); + set_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags); md_wakeup_thread(mddev->thread); - wait_event(mddev->sb_wait, mddev->flags == 0 || + wait_event(mddev->sb_wait, mddev->sb_flags == 0 || test_bit(MD_RECOVERY_INTR, &mddev->recovery)); if (test_bit(MD_RECOVERY_INTR, &mddev->recovery)) return 0; @@ -5715,10 +5715,10 @@ finish: mddev->reshape_position = conf->reshape_progress; mddev->curr_resync_completed = sector_nr; conf->reshape_checkpoint = jiffies; - set_bit(MD_CHANGE_DEVS, &mddev->flags); + set_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags); md_wakeup_thread(mddev->thread); wait_event(mddev->sb_wait, - !test_bit(MD_CHANGE_DEVS, &mddev->flags) + !test_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags) || test_bit(MD_RECOVERY_INTR, &mddev->recovery)); if (test_bit(MD_RECOVERY_INTR, &mddev->recovery)) goto ret; @@ -5993,10 +5993,10 @@ static void raid5d(struct md_thread *thread) md_check_recovery(mddev); if (!bio_list_empty(&conf->return_bi) && - !test_bit(MD_CHANGE_PENDING, &mddev->flags)) { + !test_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags)) { struct bio_list tmp = BIO_EMPTY_LIST; spin_lock_irq(&conf->device_lock); - if (!test_bit(MD_CHANGE_PENDING, &mddev->flags)) { + if (!test_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags)) { bio_list_merge(&tmp, &conf->return_bi); bio_list_init(&conf->return_bi); } @@ -6043,7 +6043,7 @@ static void raid5d(struct md_thread *thread) break; handled += batch_size; - if (mddev->flags & ~(1<sb_flags & ~(1 << MD_SB_CHANGE_PENDING)) { spin_unlock_irq(&conf->device_lock); md_check_recovery(mddev); spin_lock_irq(&conf->device_lock); @@ -7640,7 +7640,7 @@ static int raid5_start_reshape(struct mddev *mddev) } mddev->raid_disks = conf->raid_disks; mddev->reshape_position = conf->reshape_progress; - set_bit(MD_CHANGE_DEVS, &mddev->flags); + set_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags); clear_bit(MD_RECOVERY_SYNC, &mddev->recovery); clear_bit(MD_RECOVERY_CHECK, &mddev->recovery); @@ -7906,7 +7906,7 @@ static int raid5_check_reshape(struct mddev *mddev) conf->chunk_sectors = new_chunk ; mddev->chunk_sectors = new_chunk; } - set_bit(MD_CHANGE_DEVS, &mddev->flags); + set_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags); md_wakeup_thread(mddev->thread); } return check_reshape(mddev); -- cgit v1.2.3