summaryrefslogtreecommitdiff
path: root/drivers/md/raid10.c
diff options
context:
space:
mode:
authorSong Liu <songliubraving@fb.com>2020-12-09 22:38:11 +0300
committerSong Liu <songliubraving@fb.com>2020-12-10 07:46:00 +0300
commit82fe9af77cd11ea7bdc133ceed1f7f5fc08f7d25 (patch)
tree89ef11ea3a1bc8ea2ea14dc0a4a7a42d98b953de /drivers/md/raid10.c
parente2782f560c298efc2e23c7e54b3acf54e8a6ba72 (diff)
downloadlinux-82fe9af77cd11ea7bdc133ceed1f7f5fc08f7d25.tar.xz
Revert "md/raid10: improve discard request for far layout"
This reverts commit d3ee2d8415a6256c1c41e1be36e80e640c3e6359. Matthew Ruffell reported data corruption in raid10 due to the changes in discard handling [1]. Revert these changes before we find a proper fix. [1] https://bugs.launchpad.net/ubuntu/+source/linux/+bug/1907262/ Cc: Matthew Ruffell <matthew.ruffell@canonical.com> Cc: Xiao Ni <xni@redhat.com> Signed-off-by: Song Liu <songliubraving@fb.com>
Diffstat (limited to 'drivers/md/raid10.c')
-rw-r--r--drivers/md/raid10.c86
1 files changed, 23 insertions, 63 deletions
diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c
index b7bca6703df8..05773ee1b5ba 100644
--- a/drivers/md/raid10.c
+++ b/drivers/md/raid10.c
@@ -1534,28 +1534,6 @@ static struct bio *raid10_split_bio(struct r10conf *conf,
return bio;
}
-static void raid_end_discard_bio(struct r10bio *r10bio)
-{
- struct r10conf *conf = r10bio->mddev->private;
- struct r10bio *first_r10bio;
-
- while (atomic_dec_and_test(&r10bio->remaining)) {
-
- allow_barrier(conf);
-
- if (!test_bit(R10BIO_Discard, &r10bio->state)) {
- first_r10bio = (struct r10bio *)r10bio->master_bio;
- free_r10bio(r10bio);
- r10bio = first_r10bio;
- } else {
- md_write_end(r10bio->mddev);
- bio_endio(r10bio->master_bio);
- free_r10bio(r10bio);
- break;
- }
- }
-}
-
static void raid10_end_discard_request(struct bio *bio)
{
struct r10bio *r10_bio = bio->bi_private;
@@ -1582,7 +1560,11 @@ static void raid10_end_discard_request(struct bio *bio)
rdev = conf->mirrors[dev].rdev;
}
- raid_end_discard_bio(r10_bio);
+ if (atomic_dec_and_test(&r10_bio->remaining)) {
+ md_write_end(r10_bio->mddev);
+ raid_end_bio_io(r10_bio);
+ }
+
rdev_dec_pending(rdev, conf->mddev);
}
@@ -1595,9 +1577,7 @@ static int raid10_handle_discard(struct mddev *mddev, struct bio *bio)
{
struct r10conf *conf = mddev->private;
struct geom *geo = &conf->geo;
- struct r10bio *r10_bio, *first_r10bio;
- int far_copies = geo->far_copies;
- bool first_copy = true;
+ struct r10bio *r10_bio;
int disk;
sector_t chunk;
@@ -1636,20 +1616,30 @@ static int raid10_handle_discard(struct mddev *mddev, struct bio *bio)
if (bio_sectors(bio) < stripe_size*2)
goto out;
- /* For far and far offset layout, if bio is not aligned with stripe size,
- * it splits the part that is not aligned with strip size.
+ /* For far offset layout, if bio is not aligned with stripe size, it splits
+ * the part that is not aligned with strip size.
*/
div_u64_rem(bio_start, stripe_size, &remainder);
- if ((far_copies > 1) && remainder) {
+ if (geo->far_offset && remainder) {
split_size = stripe_size - remainder;
bio = raid10_split_bio(conf, bio, split_size, false);
}
div_u64_rem(bio_end, stripe_size, &remainder);
- if ((far_copies > 1) && remainder) {
+ if (geo->far_offset && remainder) {
split_size = bio_sectors(bio) - remainder;
bio = raid10_split_bio(conf, bio, split_size, true);
}
+ r10_bio = mempool_alloc(&conf->r10bio_pool, GFP_NOIO);
+ r10_bio->mddev = mddev;
+ r10_bio->state = 0;
+ r10_bio->sectors = 0;
+ memset(r10_bio->devs, 0, sizeof(r10_bio->devs[0]) * geo->raid_disks);
+
+ wait_blocked_dev(mddev, r10_bio);
+
+ r10_bio->master_bio = bio;
+
bio_start = bio->bi_iter.bi_sector;
bio_end = bio_end_sector(bio);
@@ -1675,28 +1665,6 @@ static int raid10_handle_discard(struct mddev *mddev, struct bio *bio)
end_disk_offset = (bio_end & geo->chunk_mask) +
(last_stripe_index << geo->chunk_shift);
-retry_discard:
- r10_bio = mempool_alloc(&conf->r10bio_pool, GFP_NOIO);
- r10_bio->mddev = mddev;
- r10_bio->state = 0;
- r10_bio->sectors = 0;
- memset(r10_bio->devs, 0, sizeof(r10_bio->devs[0]) * geo->raid_disks);
- wait_blocked_dev(mddev, r10_bio);
-
- /* For far layout it needs more than one r10bio to cover all regions.
- * Inspired by raid10_sync_request, we can use the first r10bio->master_bio
- * to record the discard bio. Other r10bio->master_bio record the first
- * r10bio. The first r10bio only release after all other r10bios finish.
- * The discard bio returns only first r10bio finishes
- */
- if (first_copy) {
- r10_bio->master_bio = bio;
- set_bit(R10BIO_Discard, &r10_bio->state);
- first_copy = false;
- first_r10bio = r10_bio;
- } else
- r10_bio->master_bio = (struct bio *)first_r10bio;
-
rcu_read_lock();
for (disk = 0; disk < geo->raid_disks; disk++) {
struct md_rdev *rdev = rcu_dereference(conf->mirrors[disk].rdev);
@@ -1787,19 +1755,11 @@ retry_discard:
}
}
- if (!geo->far_offset && --far_copies) {
- first_stripe_index += geo->stride >> geo->chunk_shift;
- start_disk_offset += geo->stride;
- last_stripe_index += geo->stride >> geo->chunk_shift;
- end_disk_offset += geo->stride;
- atomic_inc(&first_r10bio->remaining);
- raid_end_discard_bio(r10_bio);
- wait_barrier(conf);
- goto retry_discard;
+ if (atomic_dec_and_test(&r10_bio->remaining)) {
+ md_write_end(r10_bio->mddev);
+ raid_end_bio_io(r10_bio);
}
- raid_end_discard_bio(r10_bio);
-
return 0;
out:
allow_barrier(conf);