diff options
Diffstat (limited to 'drivers/md/md.c')
-rw-r--r-- | drivers/md/md.c | 130 |
1 files changed, 71 insertions, 59 deletions
diff --git a/drivers/md/md.c b/drivers/md/md.c index 6c0c3d0d905a..5111ed966947 100644 --- a/drivers/md/md.c +++ b/drivers/md/md.c @@ -41,6 +41,7 @@ #include <linux/sched/signal.h> #include <linux/kthread.h> #include <linux/blkdev.h> +#include <linux/blk-integrity.h> #include <linux/badblocks.h> #include <linux/sysctl.h> #include <linux/seq_file.h> @@ -51,6 +52,7 @@ #include <linux/hdreg.h> #include <linux/proc_fs.h> #include <linux/random.h> +#include <linux/major.h> #include <linux/module.h> #include <linux/reboot.h> #include <linux/file.h> @@ -352,7 +354,7 @@ static bool create_on_open = true; */ static DECLARE_WAIT_QUEUE_HEAD(md_event_waiters); static atomic_t md_event_count; -void md_new_event(struct mddev *mddev) +void md_new_event(void) { atomic_inc(&md_event_count); wake_up(&md_event_waiters); @@ -441,19 +443,19 @@ check_suspended: } EXPORT_SYMBOL(md_handle_request); -static blk_qc_t md_submit_bio(struct bio *bio) +static void md_submit_bio(struct bio *bio) { const int rw = bio_data_dir(bio); struct mddev *mddev = bio->bi_bdev->bd_disk->private_data; if (mddev == NULL || mddev->pers == NULL) { bio_io_error(bio); - return BLK_QC_T_NONE; + return; } if (unlikely(test_bit(MD_BROKEN, &mddev->flags)) && (rw == WRITE)) { bio_io_error(bio); - return BLK_QC_T_NONE; + return; } blk_queue_split(&bio); @@ -462,15 +464,13 @@ static blk_qc_t md_submit_bio(struct bio *bio) if (bio_sectors(bio) != 0) bio->bi_status = BLK_STS_IOERR; bio_endio(bio); - return BLK_QC_T_NONE; + return; } /* bio could be mergeable after passing to underlayer */ bio->bi_opf &= ~REQ_NOMERGE; md_handle_request(mddev, bio); - - return BLK_QC_T_NONE; } /* mddev_suspend makes sure no new requests are submitted @@ -888,8 +888,7 @@ static struct md_personality *find_pers(int level, char *clevel) /* return the offset of the super block in 512byte sectors */ static inline sector_t calc_dev_sboffset(struct md_rdev *rdev) { - sector_t num_sectors = i_size_read(rdev->bdev->bd_inode) / 512; - return MD_NEW_SIZE_SECTORS(num_sectors); + return MD_NEW_SIZE_SECTORS(bdev_nr_sectors(rdev->bdev)); } static int alloc_disk_sb(struct md_rdev *rdev) @@ -1631,8 +1630,7 @@ static int super_1_load(struct md_rdev *rdev, struct md_rdev *refdev, int minor_ */ switch(minor_version) { case 0: - sb_start = i_size_read(rdev->bdev->bd_inode) >> 9; - sb_start -= 8*2; + sb_start = bdev_nr_sectors(rdev->bdev) - 8 * 2; sb_start &= ~(sector_t)(4*2-1); break; case 1: @@ -1787,10 +1785,9 @@ static int super_1_load(struct md_rdev *rdev, struct md_rdev *refdev, int minor_ else ret = 0; } - if (minor_version) { - sectors = (i_size_read(rdev->bdev->bd_inode) >> 9); - sectors -= rdev->data_offset; - } else + if (minor_version) + sectors = bdev_nr_sectors(rdev->bdev) - rdev->data_offset; + else sectors = rdev->sb_start; if (sectors < le64_to_cpu(sb->data_size)) return -EINVAL; @@ -2168,8 +2165,7 @@ super_1_rdev_size_change(struct md_rdev *rdev, sector_t num_sectors) return 0; /* too confusing */ if (rdev->sb_start < rdev->data_offset) { /* minor versions 1 and 2; superblock before data */ - max_sectors = i_size_read(rdev->bdev->bd_inode) >> 9; - max_sectors -= rdev->data_offset; + max_sectors = bdev_nr_sectors(rdev->bdev) - rdev->data_offset; if (!num_sectors || num_sectors > max_sectors) num_sectors = max_sectors; } else if (rdev->mddev->bitmap_info.offset) { @@ -2178,7 +2174,7 @@ super_1_rdev_size_change(struct md_rdev *rdev, sector_t num_sectors) } else { /* minor version 0; superblock after data */ sector_t sb_start, bm_space; - sector_t dev_size = i_size_read(rdev->bdev->bd_inode) >> 9; + sector_t dev_size = bdev_nr_sectors(rdev->bdev); /* 8K is for superblock */ sb_start = dev_size - 8*2; @@ -2886,7 +2882,7 @@ static int add_bound_rdev(struct md_rdev *rdev) if (mddev->degraded) set_bit(MD_RECOVERY_RECOVER, &mddev->recovery); set_bit(MD_RECOVERY_NEEDED, &mddev->recovery); - md_new_event(mddev); + md_new_event(); md_wakeup_thread(mddev->thread); return 0; } @@ -2976,7 +2972,11 @@ state_store(struct md_rdev *rdev, const char *buf, size_t len) * -write_error - clears WriteErrorSeen * {,-}failfast - set/clear FailFast */ + + struct mddev *mddev = rdev->mddev; int err = -EINVAL; + bool need_update_sb = false; + if (cmd_match(buf, "faulty") && rdev->mddev->pers) { md_error(rdev->mddev, rdev); if (test_bit(Faulty, &rdev->flags)) @@ -2991,7 +2991,6 @@ state_store(struct md_rdev *rdev, const char *buf, size_t len) if (rdev->raid_disk >= 0) err = -EBUSY; else { - struct mddev *mddev = rdev->mddev; err = 0; if (mddev_is_clustered(mddev)) err = md_cluster_ops->remove_disk(mddev, rdev); @@ -3002,16 +3001,18 @@ state_store(struct md_rdev *rdev, const char *buf, size_t len) set_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags); md_wakeup_thread(mddev->thread); } - md_new_event(mddev); + md_new_event(); } } } else if (cmd_match(buf, "writemostly")) { set_bit(WriteMostly, &rdev->flags); mddev_create_serial_pool(rdev->mddev, rdev, false); + need_update_sb = true; err = 0; } else if (cmd_match(buf, "-writemostly")) { mddev_destroy_serial_pool(rdev->mddev, rdev, false); clear_bit(WriteMostly, &rdev->flags); + need_update_sb = true; err = 0; } else if (cmd_match(buf, "blocked")) { set_bit(Blocked, &rdev->flags); @@ -3037,9 +3038,11 @@ state_store(struct md_rdev *rdev, const char *buf, size_t len) err = 0; } else if (cmd_match(buf, "failfast")) { set_bit(FailFast, &rdev->flags); + need_update_sb = true; err = 0; } else if (cmd_match(buf, "-failfast")) { clear_bit(FailFast, &rdev->flags); + need_update_sb = true; err = 0; } else if (cmd_match(buf, "-insync") && rdev->raid_disk >= 0 && !test_bit(Journal, &rdev->flags)) { @@ -3118,6 +3121,8 @@ state_store(struct md_rdev *rdev, const char *buf, size_t len) clear_bit(ExternalBbl, &rdev->flags); err = 0; } + if (need_update_sb) + md_update_sb(mddev, 1); if (!err) sysfs_notify_dirent_safe(rdev->sysfs_state); return err ? err : len; @@ -3382,7 +3387,7 @@ rdev_size_store(struct md_rdev *rdev, const char *buf, size_t len) if (!sectors) return -EBUSY; } else if (!sectors) - sectors = (i_size_read(rdev->bdev->bd_inode) >> 9) - + sectors = bdev_nr_sectors(rdev->bdev) - rdev->data_offset; if (!my_mddev->pers->resize) /* Cannot change size for RAID0 or Linear etc */ @@ -3709,7 +3714,7 @@ static struct md_rdev *md_import_device(dev_t newdev, int super_format, int supe kobject_init(&rdev->kobj, &rdev_ktype); - size = i_size_read(rdev->bdev->bd_inode) >> BLOCK_SIZE_BITS; + size = bdev_nr_bytes(rdev->bdev) >> BLOCK_SIZE_BITS; if (!size) { pr_warn("md: %s has zero or unknown size, marking faulty!\n", bdevname(rdev->bdev,b)); @@ -4099,7 +4104,7 @@ level_store(struct mddev *mddev, const char *buf, size_t len) if (!mddev->thread) md_update_sb(mddev, 1); sysfs_notify_dirent_safe(mddev->sysfs_level); - md_new_event(mddev); + md_new_event(); rv = len; out_unlock: mddev_unlock(mddev); @@ -4620,7 +4625,7 @@ new_dev_store(struct mddev *mddev, const char *buf, size_t len) export_rdev(rdev); mddev_unlock(mddev); if (!err) - md_new_event(mddev); + md_new_event(); return err ? err : len; } @@ -5490,6 +5495,10 @@ static struct attribute *md_default_attrs[] = { NULL, }; +static const struct attribute_group md_default_group = { + .attrs = md_default_attrs, +}; + static struct attribute *md_redundancy_attrs[] = { &md_scan_mode.attr, &md_last_scan_mode.attr, @@ -5512,6 +5521,12 @@ static const struct attribute_group md_redundancy_group = { .attrs = md_redundancy_attrs, }; +static const struct attribute_group *md_attr_groups[] = { + &md_default_group, + &md_bitmap_group, + NULL, +}; + static ssize_t md_attr_show(struct kobject *kobj, struct attribute *attr, char *page) { @@ -5587,7 +5602,7 @@ static const struct sysfs_ops md_sysfs_ops = { static struct kobj_type md_ktype = { .release = md_free, .sysfs_ops = &md_sysfs_ops, - .default_attrs = md_default_attrs, + .default_groups = md_attr_groups, }; int mdp_major = 0; @@ -5596,7 +5611,6 @@ static void mddev_delayed_delete(struct work_struct *ws) { struct mddev *mddev = container_of(ws, struct mddev, del_work); - sysfs_remove_group(&mddev->kobj, &md_bitmap_group); kobject_del(&mddev->kobj); kobject_put(&mddev->kobj); } @@ -5663,7 +5677,7 @@ static int md_alloc(dev_t dev, char *name) strcmp(mddev2->gendisk->disk_name, name) == 0) { spin_unlock(&all_mddevs_lock); error = -EEXIST; - goto abort; + goto out_unlock_disks_mutex; } spin_unlock(&all_mddevs_lock); } @@ -5676,7 +5690,7 @@ static int md_alloc(dev_t dev, char *name) error = -ENOMEM; disk = blk_alloc_disk(NUMA_NO_NODE); if (!disk) - goto abort; + goto out_unlock_disks_mutex; disk->major = MAJOR(mddev->unit); disk->first_minor = unit << shift; @@ -5700,27 +5714,25 @@ static int md_alloc(dev_t dev, char *name) disk->flags |= GENHD_FL_EXT_DEVT; disk->events |= DISK_EVENT_MEDIA_CHANGE; mddev->gendisk = disk; - add_disk(disk); + error = add_disk(disk); + if (error) + goto out_cleanup_disk; error = kobject_add(&mddev->kobj, &disk_to_dev(disk)->kobj, "%s", "md"); - if (error) { - /* This isn't possible, but as kobject_init_and_add is marked - * __must_check, we must do something with the result - */ - pr_debug("md: cannot register %s/md - name in use\n", - disk->disk_name); - error = 0; - } - if (mddev->kobj.sd && - sysfs_create_group(&mddev->kobj, &md_bitmap_group)) - pr_debug("pointless warning\n"); - abort: + if (error) + goto out_del_gendisk; + + kobject_uevent(&mddev->kobj, KOBJ_ADD); + mddev->sysfs_state = sysfs_get_dirent_safe(mddev->kobj.sd, "array_state"); + mddev->sysfs_level = sysfs_get_dirent_safe(mddev->kobj.sd, "level"); + goto out_unlock_disks_mutex; + +out_del_gendisk: + del_gendisk(disk); +out_cleanup_disk: + blk_cleanup_disk(disk); +out_unlock_disks_mutex: mutex_unlock(&disks_mutex); - if (!error && mddev->kobj.sd) { - kobject_uevent(&mddev->kobj, KOBJ_ADD); - mddev->sysfs_state = sysfs_get_dirent_safe(mddev->kobj.sd, "array_state"); - mddev->sysfs_level = sysfs_get_dirent_safe(mddev->kobj.sd, "level"); - } mddev_put(mddev); return error; } @@ -6034,7 +6046,7 @@ int md_run(struct mddev *mddev) if (mddev->sb_flags) md_update_sb(mddev, 0); - md_new_event(mddev); + md_new_event(); return 0; bitmap_abort: @@ -6424,7 +6436,7 @@ static int do_md_stop(struct mddev *mddev, int mode, if (mddev->hold_active == UNTIL_STOP) mddev->hold_active = 0; } - md_new_event(mddev); + md_new_event(); sysfs_notify_dirent_safe(mddev->sysfs_state); return 0; } @@ -6880,7 +6892,7 @@ int md_add_new_disk(struct mddev *mddev, struct mdu_disk_info_s *info) if (!mddev->persistent) { pr_debug("md: nonpersistent superblock ...\n"); - rdev->sb_start = i_size_read(rdev->bdev->bd_inode) / 512; + rdev->sb_start = bdev_nr_sectors(rdev->bdev); } else rdev->sb_start = calc_dev_sboffset(rdev); rdev->sectors = rdev->sb_start; @@ -6928,7 +6940,7 @@ kick_rdev: md_wakeup_thread(mddev->thread); else md_update_sb(mddev, 1); - md_new_event(mddev); + md_new_event(); return 0; busy: @@ -6967,7 +6979,7 @@ static int hot_add_disk(struct mddev *mddev, dev_t dev) if (mddev->persistent) rdev->sb_start = calc_dev_sboffset(rdev); else - rdev->sb_start = i_size_read(rdev->bdev->bd_inode) / 512; + rdev->sb_start = bdev_nr_sectors(rdev->bdev); rdev->sectors = rdev->sb_start; @@ -7001,7 +7013,7 @@ static int hot_add_disk(struct mddev *mddev, dev_t dev) */ set_bit(MD_RECOVERY_NEEDED, &mddev->recovery); md_wakeup_thread(mddev->thread); - md_new_event(mddev); + md_new_event(); return 0; abort_export: @@ -7975,7 +7987,7 @@ void md_error(struct mddev *mddev, struct md_rdev *rdev) md_wakeup_thread(mddev->thread); if (mddev->event_work.func) queue_work(md_misc_wq, &mddev->event_work); - md_new_event(mddev); + md_new_event(); } EXPORT_SYMBOL(md_error); @@ -8859,7 +8871,7 @@ void md_do_sync(struct md_thread *thread) mddev->curr_resync = 3; /* no longer delayed */ mddev->curr_resync_completed = j; sysfs_notify_dirent_safe(mddev->sysfs_completed); - md_new_event(mddev); + md_new_event(); update_time = jiffies; blk_start_plug(&plug); @@ -8930,7 +8942,7 @@ void md_do_sync(struct md_thread *thread) /* this is the earliest that rebuild will be * visible in /proc/mdstat */ - md_new_event(mddev); + md_new_event(); if (last_check + window > io_sectors || j == max_sectors) continue; @@ -9154,7 +9166,7 @@ static int remove_and_add_spares(struct mddev *mddev, sysfs_link_rdev(mddev, rdev); if (!test_bit(Journal, &rdev->flags)) spares++; - md_new_event(mddev); + md_new_event(); set_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags); } } @@ -9188,7 +9200,7 @@ static void md_start_sync(struct work_struct *ws) } else md_wakeup_thread(mddev->sync_thread); sysfs_notify_dirent_safe(mddev->sysfs_action); - md_new_event(mddev); + md_new_event(); } /* @@ -9447,7 +9459,7 @@ void md_reap_sync_thread(struct mddev *mddev) /* flag recovery needed just to double check */ set_bit(MD_RECOVERY_NEEDED, &mddev->recovery); sysfs_notify_dirent_safe(mddev->sysfs_action); - md_new_event(mddev); + md_new_event(); if (mddev->event_work.func) queue_work(md_misc_wq, &mddev->event_work); } |