diff options
Diffstat (limited to 'drivers/md/md.c')
-rw-r--r-- | drivers/md/md.c | 39 |
1 files changed, 28 insertions, 11 deletions
diff --git a/drivers/md/md.c b/drivers/md/md.c index 41c050b59ec4..0081ace39a64 100644 --- a/drivers/md/md.c +++ b/drivers/md/md.c @@ -711,7 +711,7 @@ static struct md_rdev *find_rdev(struct mddev *mddev, dev_t dev) return NULL; } -static struct md_rdev *find_rdev_rcu(struct mddev *mddev, dev_t dev) +struct md_rdev *md_find_rdev_rcu(struct mddev *mddev, dev_t dev) { struct md_rdev *rdev; @@ -721,6 +721,7 @@ static struct md_rdev *find_rdev_rcu(struct mddev *mddev, dev_t dev) return NULL; } +EXPORT_SYMBOL_GPL(md_find_rdev_rcu); static struct md_personality *find_pers(int level, char *clevel) { @@ -5560,11 +5561,6 @@ int md_run(struct mddev *mddev) if (start_readonly && mddev->ro == 0) mddev->ro = 2; /* read-only, but switch on first write */ - /* - * NOTE: some pers->run(), for example r5l_recovery_log(), wakes - * up mddev->thread. It is important to initialize critical - * resources for mddev->thread BEFORE calling pers->run(). - */ err = pers->run(mddev); if (err) pr_warn("md: pers->run() failed ...\n"); @@ -5678,6 +5674,9 @@ static int do_md_run(struct mddev *mddev) if (mddev_is_clustered(mddev)) md_allow_write(mddev); + /* run start up tasks that require md_thread */ + md_start(mddev); + md_wakeup_thread(mddev->thread); md_wakeup_thread(mddev->sync_thread); /* possibly kick off a reshape */ @@ -5689,6 +5688,21 @@ out: return err; } +int md_start(struct mddev *mddev) +{ + int ret = 0; + + if (mddev->pers->start) { + set_bit(MD_RECOVERY_WAIT, &mddev->recovery); + md_wakeup_thread(mddev->thread); + ret = mddev->pers->start(mddev); + clear_bit(MD_RECOVERY_WAIT, &mddev->recovery); + md_wakeup_thread(mddev->sync_thread); + } + return ret; +} +EXPORT_SYMBOL_GPL(md_start); + static int restart_array(struct mddev *mddev) { struct gendisk *disk = mddev->gendisk; @@ -6997,7 +7011,7 @@ static int set_disk_faulty(struct mddev *mddev, dev_t dev) return -ENODEV; rcu_read_lock(); - rdev = find_rdev_rcu(mddev, dev); + rdev = md_find_rdev_rcu(mddev, dev); if (!rdev) err = -ENODEV; else { @@ -7605,7 +7619,9 @@ static int status_resync(struct seq_file *seq, struct mddev *mddev) if (test_bit(MD_RECOVERY_DONE, &mddev->recovery)) /* Still cleaning up */ resync = max_sectors; - } else + } else if (resync > max_sectors) + resync = max_sectors; + else resync -= atomic_read(&mddev->recovery_active); if (resync == 0) { @@ -7869,10 +7885,10 @@ static int md_seq_open(struct inode *inode, struct file *file) } static int md_unloading; -static unsigned int mdstat_poll(struct file *filp, poll_table *wait) +static __poll_t mdstat_poll(struct file *filp, poll_table *wait) { struct seq_file *seq = filp->private_data; - int mask; + __poll_t mask; if (md_unloading) return POLLIN|POLLRDNORM|POLLERR|POLLPRI; @@ -8167,7 +8183,8 @@ void md_do_sync(struct md_thread *thread) int ret; /* just incase thread restarts... */ - if (test_bit(MD_RECOVERY_DONE, &mddev->recovery)) + if (test_bit(MD_RECOVERY_DONE, &mddev->recovery) || + test_bit(MD_RECOVERY_WAIT, &mddev->recovery)) return; if (mddev->ro) {/* never try to sync a read-only array */ set_bit(MD_RECOVERY_INTR, &mddev->recovery); |