diff options
author | Guoqing Jiang <guoqing.jiang@cloud.ionos.com> | 2019-12-23 12:49:00 +0300 |
---|---|---|
committer | Song Liu <songliubraving@fb.com> | 2020-01-13 22:44:10 +0300 |
commit | 69b00b5bb23552d43e8bbed73ef6624604bb94a2 (patch) | |
tree | 6192d67dc8dab6d064c370666d990f22081c189b /drivers/md/md.c | |
parent | 4d26d32fe4dafd29e168addb7c11949a36e7e5f8 (diff) | |
download | linux-69b00b5bb23552d43e8bbed73ef6624604bb94a2.tar.xz |
md: introduce a new struct for IO serialization
Obviously, IO serialization could cause the degradation of
performance a lot. In order to reduce the degradation, so a
rb interval tree is added in raid1 to speed up the check of
collision.
So, a rb root is needed in md_rdev, then abstract all the
serialize related members to a new struct (serial_in_rdev),
embed it into md_rdev.
Of course, we need to free the struct if it is not needed
anymore, so rdev/rdevs_uninit_serial are added accordingly.
And they should be called when destroty memory pool or can't
alloc memory.
And we need to consider to call mddev_destroy_serial_pool
in case serialize_policy/write-behind is disabled, bitmap
is destroyed or in __md_stop_writes.
Signed-off-by: Guoqing Jiang <guoqing.jiang@cloud.ionos.com>
Signed-off-by: Song Liu <songliubraving@fb.com>
Diffstat (limited to 'drivers/md/md.c')
-rw-r--r-- | drivers/md/md.c | 80 |
1 files changed, 62 insertions, 18 deletions
diff --git a/drivers/md/md.c b/drivers/md/md.c index 788559f42d43..9c4e61c988ac 100644 --- a/drivers/md/md.c +++ b/drivers/md/md.c @@ -125,25 +125,59 @@ static inline int speed_max(struct mddev *mddev) mddev->sync_speed_max : sysctl_speed_limit_max; } +static void rdev_uninit_serial(struct md_rdev *rdev) +{ + if (!test_and_clear_bit(CollisionCheck, &rdev->flags)) + return; + + kfree(rdev->serial); + rdev->serial = NULL; +} + +static void rdevs_uninit_serial(struct mddev *mddev) +{ + struct md_rdev *rdev; + + rdev_for_each(rdev, mddev) + rdev_uninit_serial(rdev); +} + static int rdev_init_serial(struct md_rdev *rdev) { - spin_lock_init(&rdev->serial_list_lock); - INIT_LIST_HEAD(&rdev->serial_list); - init_waitqueue_head(&rdev->serial_io_wait); + struct serial_in_rdev *serial = NULL; + + if (test_bit(CollisionCheck, &rdev->flags)) + return 0; + + serial = kmalloc(sizeof(struct serial_in_rdev), GFP_KERNEL); + if (!serial) + return -ENOMEM; + + spin_lock_init(&serial->serial_lock); + serial->serial_rb = RB_ROOT_CACHED; + init_waitqueue_head(&serial->serial_io_wait); + rdev->serial = serial; set_bit(CollisionCheck, &rdev->flags); - return 1; + return 0; } -static void rdevs_init_serial(struct mddev *mddev) +static int rdevs_init_serial(struct mddev *mddev) { struct md_rdev *rdev; + int ret = 0; rdev_for_each(rdev, mddev) { - if (test_bit(CollisionCheck, &rdev->flags)) - continue; - rdev_init_serial(rdev); + ret = rdev_init_serial(rdev); + if (ret) + break; } + + /* Free all resources if pool is not existed */ + if (ret && !mddev->serial_info_pool) + rdevs_uninit_serial(mddev); + + return ret; } /* @@ -166,6 +200,8 @@ static int rdev_need_serial(struct md_rdev *rdev) void mddev_create_serial_pool(struct mddev *mddev, struct md_rdev *rdev, bool is_suspend) { + int ret = 0; + if (rdev && !rdev_need_serial(rdev) && !test_bit(CollisionCheck, &rdev->flags)) return; @@ -174,9 +210,11 @@ void mddev_create_serial_pool(struct mddev *mddev, struct md_rdev *rdev, mddev_suspend(mddev); if (!rdev) - rdevs_init_serial(mddev); + ret = rdevs_init_serial(mddev); else - rdev_init_serial(rdev); + ret = rdev_init_serial(rdev); + if (ret) + goto abort; if (mddev->serial_info_pool == NULL) { unsigned int noio_flag; @@ -186,9 +224,13 @@ void mddev_create_serial_pool(struct mddev *mddev, struct md_rdev *rdev, mempool_create_kmalloc_pool(NR_SERIAL_INFOS, sizeof(struct serial_info)); memalloc_noio_restore(noio_flag); - if (!mddev->serial_info_pool) + if (!mddev->serial_info_pool) { + rdevs_uninit_serial(mddev); pr_err("can't alloc memory pool for serialization\n"); + } } + +abort: if (!is_suspend) mddev_resume(mddev); } @@ -199,8 +241,8 @@ void mddev_create_serial_pool(struct mddev *mddev, struct md_rdev *rdev, * 2. when bitmap is destroyed while policy is not enabled. * 3. for disable policy, the pool is destroyed only when no rdev needs it. */ -static void mddev_destroy_serial_pool(struct mddev *mddev, struct md_rdev *rdev, - bool is_suspend) +void mddev_destroy_serial_pool(struct mddev *mddev, struct md_rdev *rdev, + bool is_suspend) { if (rdev && !test_bit(CollisionCheck, &rdev->flags)) return; @@ -213,8 +255,9 @@ static void mddev_destroy_serial_pool(struct mddev *mddev, struct md_rdev *rdev, mddev_suspend(mddev); rdev_for_each(temp, mddev) { if (!rdev) { - if (!rdev_need_serial(temp)) - clear_bit(CollisionCheck, &temp->flags); + if (!mddev->serialize_policy || + !rdev_need_serial(temp)) + rdev_uninit_serial(temp); else num++; } else if (temp != rdev && @@ -223,7 +266,7 @@ static void mddev_destroy_serial_pool(struct mddev *mddev, struct md_rdev *rdev, } if (rdev) - clear_bit(CollisionCheck, &rdev->flags); + rdev_uninit_serial(rdev); if (num) pr_info("The mempool could be used by other devices\n"); @@ -6117,8 +6160,9 @@ static void __md_stop_writes(struct mddev *mddev) mddev->in_sync = 1; md_update_sb(mddev, 1); } - mempool_destroy(mddev->serial_info_pool); - mddev->serial_info_pool = NULL; + /* disable policy to guarantee rdevs free resources for serialization */ + mddev->serialize_policy = 0; + mddev_destroy_serial_pool(mddev, NULL, true); } void md_stop_writes(struct mddev *mddev) |