summaryrefslogtreecommitdiff
path: root/fs/btrfs/raid56.c
diff options
context:
space:
mode:
Diffstat (limited to 'fs/btrfs/raid56.c')
-rw-r--r--fs/btrfs/raid56.c514
1 files changed, 508 insertions, 6 deletions
diff --git a/fs/btrfs/raid56.c b/fs/btrfs/raid56.c
index 0600bf69199e..b85d68f721b8 100644
--- a/fs/btrfs/raid56.c
+++ b/fs/btrfs/raid56.c
@@ -72,6 +72,7 @@
enum btrfs_rbio_ops {
BTRFS_RBIO_WRITE = 0,
BTRFS_RBIO_READ_REBUILD = 1,
+ BTRFS_RBIO_PARITY_SCRUB = 2,
};
struct btrfs_raid_bio {
@@ -130,6 +131,7 @@ struct btrfs_raid_bio {
/* number of data stripes (no p/q) */
int nr_data;
+ int stripe_npages;
/*
* set if we're doing a parity rebuild
* for a read from higher up, which is handled
@@ -144,6 +146,7 @@ struct btrfs_raid_bio {
/* second bad stripe (for raid6 use) */
int failb;
+ int scrubp;
/*
* number of pages needed to represent the full
* stripe
@@ -178,6 +181,11 @@ struct btrfs_raid_bio {
* here for faster lookup
*/
struct page **bio_pages;
+
+ /*
+ * bitmap to record which horizontal stripe has data
+ */
+ unsigned long *dbitmap;
};
static int __raid56_parity_recover(struct btrfs_raid_bio *rbio);
@@ -192,6 +200,10 @@ static void __free_raid_bio(struct btrfs_raid_bio *rbio);
static void index_rbio_pages(struct btrfs_raid_bio *rbio);
static int alloc_rbio_pages(struct btrfs_raid_bio *rbio);
+static noinline void finish_parity_scrub(struct btrfs_raid_bio *rbio,
+ int need_check);
+static void async_scrub_parity(struct btrfs_raid_bio *rbio);
+
/*
* the stripe hash table is used for locking, and to collect
* bios in hopes of making a full stripe
@@ -593,10 +605,20 @@ static int rbio_can_merge(struct btrfs_raid_bio *last,
cur->raid_map[0])
return 0;
- /* reads can't merge with writes */
- if (last->operation != cur->operation) {
+ /* we can't merge with different operations */
+ if (last->operation != cur->operation)
+ return 0;
+ /*
+ * We've need read the full stripe from the drive.
+ * check and repair the parity and write the new results.
+ *
+ * We're not allowed to add any new bios to the
+ * bio list here, anyone else that wants to
+ * change this stripe needs to do their own rmw.
+ */
+ if (last->operation == BTRFS_RBIO_PARITY_SCRUB ||
+ cur->operation == BTRFS_RBIO_PARITY_SCRUB)
return 0;
- }
return 1;
}
@@ -789,9 +811,12 @@ static noinline void unlock_stripe(struct btrfs_raid_bio *rbio)
if (next->operation == BTRFS_RBIO_READ_REBUILD)
async_read_rebuild(next);
- else if (next->operation == BTRFS_RBIO_WRITE){
+ else if (next->operation == BTRFS_RBIO_WRITE) {
steal_rbio(rbio, next);
async_rmw_stripe(next);
+ } else if (next->operation == BTRFS_RBIO_PARITY_SCRUB) {
+ steal_rbio(rbio, next);
+ async_scrub_parity(next);
}
goto done_nolock;
@@ -957,9 +982,11 @@ static struct btrfs_raid_bio *alloc_rbio(struct btrfs_root *root,
struct btrfs_raid_bio *rbio;
int nr_data = 0;
int num_pages = rbio_nr_pages(stripe_len, bbio->num_stripes);
+ int stripe_npages = DIV_ROUND_UP(stripe_len, PAGE_SIZE);
void *p;
- rbio = kzalloc(sizeof(*rbio) + num_pages * sizeof(struct page *) * 2,
+ rbio = kzalloc(sizeof(*rbio) + num_pages * sizeof(struct page *) * 2 +
+ DIV_ROUND_UP(stripe_npages, BITS_PER_LONG / 8),
GFP_NOFS);
if (!rbio)
return ERR_PTR(-ENOMEM);
@@ -974,6 +1001,7 @@ static struct btrfs_raid_bio *alloc_rbio(struct btrfs_root *root,
rbio->fs_info = root->fs_info;
rbio->stripe_len = stripe_len;
rbio->nr_pages = num_pages;
+ rbio->stripe_npages = stripe_npages;
rbio->faila = -1;
rbio->failb = -1;
atomic_set(&rbio->refs, 1);
@@ -987,6 +1015,7 @@ static struct btrfs_raid_bio *alloc_rbio(struct btrfs_root *root,
p = rbio + 1;
rbio->stripe_pages = p;
rbio->bio_pages = p + sizeof(struct page *) * num_pages;
+ rbio->dbitmap = p + sizeof(struct page *) * num_pages * 2;
if (raid_map[bbio->num_stripes - 1] == RAID6_Q_STRIPE)
nr_data = bbio->num_stripes - 2;
@@ -1781,6 +1810,14 @@ static void __raid_recover_end_io(struct btrfs_raid_bio *rbio)
index_rbio_pages(rbio);
for (pagenr = 0; pagenr < nr_pages; pagenr++) {
+ /*
+ * Now we just use bitmap to mark the horizontal stripes in
+ * which we have data when doing parity scrub.
+ */
+ if (rbio->operation == BTRFS_RBIO_PARITY_SCRUB &&
+ !test_bit(pagenr, rbio->dbitmap))
+ continue;
+
/* setup our array of pointers with pages
* from each stripe
*/
@@ -1925,7 +1962,13 @@ cleanup_io:
} else if (err == 0) {
rbio->faila = -1;
rbio->failb = -1;
- finish_rmw(rbio);
+
+ if (rbio->operation == BTRFS_RBIO_WRITE)
+ finish_rmw(rbio);
+ else if (rbio->operation == BTRFS_RBIO_PARITY_SCRUB)
+ finish_parity_scrub(rbio, 0);
+ else
+ BUG();
} else {
rbio_orig_end_io(rbio, err, 0);
}
@@ -2133,3 +2176,462 @@ static void read_rebuild_work(struct btrfs_work *work)
rbio = container_of(work, struct btrfs_raid_bio, work);
__raid56_parity_recover(rbio);
}
+
+/*
+ * The following code is used to scrub/replace the parity stripe
+ *
+ * Note: We need make sure all the pages that add into the scrub/replace
+ * raid bio are correct and not be changed during the scrub/replace. That
+ * is those pages just hold metadata or file data with checksum.
+ */
+
+struct btrfs_raid_bio *
+raid56_parity_alloc_scrub_rbio(struct btrfs_root *root, struct bio *bio,
+ struct btrfs_bio *bbio, u64 *raid_map,
+ u64 stripe_len, struct btrfs_device *scrub_dev,
+ unsigned long *dbitmap, int stripe_nsectors)
+{
+ struct btrfs_raid_bio *rbio;
+ int i;
+
+ rbio = alloc_rbio(root, bbio, raid_map, stripe_len);
+ if (IS_ERR(rbio))
+ return NULL;
+ bio_list_add(&rbio->bio_list, bio);
+ /*
+ * This is a special bio which is used to hold the completion handler
+ * and make the scrub rbio is similar to the other types
+ */
+ ASSERT(!bio->bi_iter.bi_size);
+ rbio->operation = BTRFS_RBIO_PARITY_SCRUB;
+
+ for (i = 0; i < bbio->num_stripes; i++) {
+ if (bbio->stripes[i].dev == scrub_dev) {
+ rbio->scrubp = i;
+ break;
+ }
+ }
+
+ /* Now we just support the sectorsize equals to page size */
+ ASSERT(root->sectorsize == PAGE_SIZE);
+ ASSERT(rbio->stripe_npages == stripe_nsectors);
+ bitmap_copy(rbio->dbitmap, dbitmap, stripe_nsectors);
+
+ return rbio;
+}
+
+void raid56_parity_add_scrub_pages(struct btrfs_raid_bio *rbio,
+ struct page *page, u64 logical)
+{
+ int stripe_offset;
+ int index;
+
+ ASSERT(logical >= rbio->raid_map[0]);
+ ASSERT(logical + PAGE_SIZE <= rbio->raid_map[0] +
+ rbio->stripe_len * rbio->nr_data);
+ stripe_offset = (int)(logical - rbio->raid_map[0]);
+ index = stripe_offset >> PAGE_CACHE_SHIFT;
+ rbio->bio_pages[index] = page;
+}
+
+/*
+ * We just scrub the parity that we have correct data on the same horizontal,
+ * so we needn't allocate all pages for all the stripes.
+ */
+static int alloc_rbio_essential_pages(struct btrfs_raid_bio *rbio)
+{
+ int i;
+ int bit;
+ int index;
+ struct page *page;
+
+ for_each_set_bit(bit, rbio->dbitmap, rbio->stripe_npages) {
+ for (i = 0; i < rbio->bbio->num_stripes; i++) {
+ index = i * rbio->stripe_npages + bit;
+ if (rbio->stripe_pages[index])
+ continue;
+
+ page = alloc_page(GFP_NOFS | __GFP_HIGHMEM);
+ if (!page)
+ return -ENOMEM;
+ rbio->stripe_pages[index] = page;
+ ClearPageUptodate(page);
+ }
+ }
+ return 0;
+}
+
+/*
+ * end io function used by finish_rmw. When we finally
+ * get here, we've written a full stripe
+ */
+static void raid_write_parity_end_io(struct bio *bio, int err)
+{
+ struct btrfs_raid_bio *rbio = bio->bi_private;
+
+ if (err)
+ fail_bio_stripe(rbio, bio);
+
+ bio_put(bio);
+
+ if (!atomic_dec_and_test(&rbio->stripes_pending))
+ return;
+
+ err = 0;
+
+ if (atomic_read(&rbio->error))
+ err = -EIO;
+
+ rbio_orig_end_io(rbio, err, 0);
+}
+
+static noinline void finish_parity_scrub(struct btrfs_raid_bio *rbio,
+ int need_check)
+{
+ struct btrfs_bio *bbio = rbio->bbio;
+ void *pointers[bbio->num_stripes];
+ int nr_data = rbio->nr_data;
+ int stripe;
+ int pagenr;
+ int p_stripe = -1;
+ int q_stripe = -1;
+ struct page *p_page = NULL;
+ struct page *q_page = NULL;
+ struct bio_list bio_list;
+ struct bio *bio;
+ int ret;
+
+ bio_list_init(&bio_list);
+
+ if (bbio->num_stripes - rbio->nr_data == 1) {
+ p_stripe = bbio->num_stripes - 1;
+ } else if (bbio->num_stripes - rbio->nr_data == 2) {
+ p_stripe = bbio->num_stripes - 2;
+ q_stripe = bbio->num_stripes - 1;
+ } else {
+ BUG();
+ }
+
+ /*
+ * Because the higher layers(scrubber) are unlikely to
+ * use this area of the disk again soon, so don't cache
+ * it.
+ */
+ clear_bit(RBIO_CACHE_READY_BIT, &rbio->flags);
+
+ if (!need_check)
+ goto writeback;
+
+ p_page = alloc_page(GFP_NOFS | __GFP_HIGHMEM);
+ if (!p_page)
+ goto cleanup;
+ SetPageUptodate(p_page);
+
+ if (q_stripe != -1) {
+ q_page = alloc_page(GFP_NOFS | __GFP_HIGHMEM);
+ if (!q_page) {
+ __free_page(p_page);
+ goto cleanup;
+ }
+ SetPageUptodate(q_page);
+ }
+
+ atomic_set(&rbio->error, 0);
+
+ for_each_set_bit(pagenr, rbio->dbitmap, rbio->stripe_npages) {
+ struct page *p;
+ void *parity;
+ /* first collect one page from each data stripe */
+ for (stripe = 0; stripe < nr_data; stripe++) {
+ p = page_in_rbio(rbio, stripe, pagenr, 0);
+ pointers[stripe] = kmap(p);
+ }
+
+ /* then add the parity stripe */
+ pointers[stripe++] = kmap(p_page);
+
+ if (q_stripe != -1) {
+
+ /*
+ * raid6, add the qstripe and call the
+ * library function to fill in our p/q
+ */
+ pointers[stripe++] = kmap(q_page);
+
+ raid6_call.gen_syndrome(bbio->num_stripes, PAGE_SIZE,
+ pointers);
+ } else {
+ /* raid5 */
+ memcpy(pointers[nr_data], pointers[0], PAGE_SIZE);
+ run_xor(pointers + 1, nr_data - 1, PAGE_CACHE_SIZE);
+ }
+
+ /* Check scrubbing pairty and repair it */
+ p = rbio_stripe_page(rbio, rbio->scrubp, pagenr);
+ parity = kmap(p);
+ if (memcmp(parity, pointers[rbio->scrubp], PAGE_CACHE_SIZE))
+ memcpy(parity, pointers[rbio->scrubp], PAGE_CACHE_SIZE);
+ else
+ /* Parity is right, needn't writeback */
+ bitmap_clear(rbio->dbitmap, pagenr, 1);
+ kunmap(p);
+
+ for (stripe = 0; stripe < bbio->num_stripes; stripe++)
+ kunmap(page_in_rbio(rbio, stripe, pagenr, 0));
+ }
+
+ __free_page(p_page);
+ if (q_page)
+ __free_page(q_page);
+
+writeback:
+ /*
+ * time to start writing. Make bios for everything from the
+ * higher layers (the bio_list in our rbio) and our p/q. Ignore
+ * everything else.
+ */
+ for_each_set_bit(pagenr, rbio->dbitmap, rbio->stripe_npages) {
+ struct page *page;
+
+ page = rbio_stripe_page(rbio, rbio->scrubp, pagenr);
+ ret = rbio_add_io_page(rbio, &bio_list,
+ page, rbio->scrubp, pagenr, rbio->stripe_len);
+ if (ret)
+ goto cleanup;
+ }
+
+ nr_data = bio_list_size(&bio_list);
+ if (!nr_data) {
+ /* Every parity is right */
+ rbio_orig_end_io(rbio, 0, 0);
+ return;
+ }
+
+ atomic_set(&rbio->stripes_pending, nr_data);
+
+ while (1) {
+ bio = bio_list_pop(&bio_list);
+ if (!bio)
+ break;
+
+ bio->bi_private = rbio;
+ bio->bi_end_io = raid_write_parity_end_io;
+ BUG_ON(!test_bit(BIO_UPTODATE, &bio->bi_flags));
+ submit_bio(WRITE, bio);
+ }
+ return;
+
+cleanup:
+ rbio_orig_end_io(rbio, -EIO, 0);
+}
+
+static inline int is_data_stripe(struct btrfs_raid_bio *rbio, int stripe)
+{
+ if (stripe >= 0 && stripe < rbio->nr_data)
+ return 1;
+ return 0;
+}
+
+/*
+ * While we're doing the parity check and repair, we could have errors
+ * in reading pages off the disk. This checks for errors and if we're
+ * not able to read the page it'll trigger parity reconstruction. The
+ * parity scrub will be finished after we've reconstructed the failed
+ * stripes
+ */
+static void validate_rbio_for_parity_scrub(struct btrfs_raid_bio *rbio)
+{
+ if (atomic_read(&rbio->error) > rbio->bbio->max_errors)
+ goto cleanup;
+
+ if (rbio->faila >= 0 || rbio->failb >= 0) {
+ int dfail = 0, failp = -1;
+
+ if (is_data_stripe(rbio, rbio->faila))
+ dfail++;
+ else if (is_parity_stripe(rbio->faila))
+ failp = rbio->faila;
+
+ if (is_data_stripe(rbio, rbio->failb))
+ dfail++;
+ else if (is_parity_stripe(rbio->failb))
+ failp = rbio->failb;
+
+ /*
+ * Because we can not use a scrubbing parity to repair
+ * the data, so the capability of the repair is declined.
+ * (In the case of RAID5, we can not repair anything)
+ */
+ if (dfail > rbio->bbio->max_errors - 1)
+ goto cleanup;
+
+ /*
+ * If all data is good, only parity is correctly, just
+ * repair the parity.
+ */
+ if (dfail == 0) {
+ finish_parity_scrub(rbio, 0);
+ return;
+ }
+
+ /*
+ * Here means we got one corrupted data stripe and one
+ * corrupted parity on RAID6, if the corrupted parity
+ * is scrubbing parity, luckly, use the other one to repair
+ * the data, or we can not repair the data stripe.
+ */
+ if (failp != rbio->scrubp)
+ goto cleanup;
+
+ __raid_recover_end_io(rbio);
+ } else {
+ finish_parity_scrub(rbio, 1);
+ }
+ return;
+
+cleanup:
+ rbio_orig_end_io(rbio, -EIO, 0);
+}
+
+/*
+ * end io for the read phase of the rmw cycle. All the bios here are physical
+ * stripe bios we've read from the disk so we can recalculate the parity of the
+ * stripe.
+ *
+ * This will usually kick off finish_rmw once all the bios are read in, but it
+ * may trigger parity reconstruction if we had any errors along the way
+ */
+static void raid56_parity_scrub_end_io(struct bio *bio, int err)
+{
+ struct btrfs_raid_bio *rbio = bio->bi_private;
+
+ if (err)
+ fail_bio_stripe(rbio, bio);
+ else
+ set_bio_pages_uptodate(bio);
+
+ bio_put(bio);
+
+ if (!atomic_dec_and_test(&rbio->stripes_pending))
+ return;
+
+ /*
+ * this will normally call finish_rmw to start our write
+ * but if there are any failed stripes we'll reconstruct
+ * from parity first
+ */
+ validate_rbio_for_parity_scrub(rbio);
+}
+
+static void raid56_parity_scrub_stripe(struct btrfs_raid_bio *rbio)
+{
+ int bios_to_read = 0;
+ struct btrfs_bio *bbio = rbio->bbio;
+ struct bio_list bio_list;
+ int ret;
+ int pagenr;
+ int stripe;
+ struct bio *bio;
+
+ ret = alloc_rbio_essential_pages(rbio);
+ if (ret)
+ goto cleanup;
+
+ bio_list_init(&bio_list);
+
+ atomic_set(&rbio->error, 0);
+ /*
+ * build a list of bios to read all the missing parts of this
+ * stripe
+ */
+ for (stripe = 0; stripe < bbio->num_stripes; stripe++) {
+ for_each_set_bit(pagenr, rbio->dbitmap, rbio->stripe_npages) {
+ struct page *page;
+ /*
+ * we want to find all the pages missing from
+ * the rbio and read them from the disk. If
+ * page_in_rbio finds a page in the bio list
+ * we don't need to read it off the stripe.
+ */
+ page = page_in_rbio(rbio, stripe, pagenr, 1);
+ if (page)
+ continue;
+
+ page = rbio_stripe_page(rbio, stripe, pagenr);
+ /*
+ * the bio cache may have handed us an uptodate
+ * page. If so, be happy and use it
+ */
+ if (PageUptodate(page))
+ continue;
+
+ ret = rbio_add_io_page(rbio, &bio_list, page,
+ stripe, pagenr, rbio->stripe_len);
+ if (ret)
+ goto cleanup;
+ }
+ }
+
+ bios_to_read = bio_list_size(&bio_list);
+ if (!bios_to_read) {
+ /*
+ * this can happen if others have merged with
+ * us, it means there is nothing left to read.
+ * But if there are missing devices it may not be
+ * safe to do the full stripe write yet.
+ */
+ goto finish;
+ }
+
+ /*
+ * the bbio may be freed once we submit the last bio. Make sure
+ * not to touch it after that
+ */
+ atomic_set(&rbio->stripes_pending, bios_to_read);
+ while (1) {
+ bio = bio_list_pop(&bio_list);
+ if (!bio)
+ break;
+
+ bio->bi_private = rbio;
+ bio->bi_end_io = raid56_parity_scrub_end_io;
+
+ btrfs_bio_wq_end_io(rbio->fs_info, bio,
+ BTRFS_WQ_ENDIO_RAID56);
+
+ BUG_ON(!test_bit(BIO_UPTODATE, &bio->bi_flags));
+ submit_bio(READ, bio);
+ }
+ /* the actual write will happen once the reads are done */
+ return;
+
+cleanup:
+ rbio_orig_end_io(rbio, -EIO, 0);
+ return;
+
+finish:
+ validate_rbio_for_parity_scrub(rbio);
+}
+
+static void scrub_parity_work(struct btrfs_work *work)
+{
+ struct btrfs_raid_bio *rbio;
+
+ rbio = container_of(work, struct btrfs_raid_bio, work);
+ raid56_parity_scrub_stripe(rbio);
+}
+
+static void async_scrub_parity(struct btrfs_raid_bio *rbio)
+{
+ btrfs_init_work(&rbio->work, btrfs_rmw_helper,
+ scrub_parity_work, NULL, NULL);
+
+ btrfs_queue_work(rbio->fs_info->rmw_workers,
+ &rbio->work);
+}
+
+void raid56_parity_submit_scrub_rbio(struct btrfs_raid_bio *rbio)
+{
+ if (!lock_stripe_add(rbio))
+ async_scrub_parity(rbio);
+}