diff options
author | Qu Wenruo <wqu@suse.com> | 2022-03-13 13:40:02 +0300 |
---|---|---|
committer | David Sterba <dsterba@suse.com> | 2022-05-16 18:03:07 +0300 |
commit | e360d2f5810434d44ee61b47437ecd5e66528a5a (patch) | |
tree | 2a6d8e9e1a4e0d44eeeed394edd49b139d3d6156 /fs/btrfs/scrub.c | |
parent | 463435017219296e9dbca102961eef55c76e5a72 (diff) | |
download | linux-e360d2f5810434d44ee61b47437ecd5e66528a5a.tar.xz |
btrfs: scrub: rename scrub_bio::pagev and related members
Since the subpage support for scrub, one page no longer always represents
one sector, thus scrub_bio::pagev and scrub_bio::sector_count are no
longer accurate.
Rename them to scrub_bio::sectors and scrub_bio::sector_count respectively.
This also involves scrub_ctx::pages_per_bio and other macros involved.
Now the renaming of pages involved in scrub is be finished.
Signed-off-by: Qu Wenruo <wqu@suse.com>
Reviewed-by: David Sterba <dsterba@suse.com>
Signed-off-by: David Sterba <dsterba@suse.com>
Diffstat (limited to 'fs/btrfs/scrub.c')
-rw-r--r-- | fs/btrfs/scrub.c | 78 |
1 files changed, 39 insertions, 39 deletions
diff --git a/fs/btrfs/scrub.c b/fs/btrfs/scrub.c index 10fc6bd8d199..96a8ad3bc1dd 100644 --- a/fs/btrfs/scrub.c +++ b/fs/btrfs/scrub.c @@ -45,8 +45,8 @@ struct scrub_ctx; * operations. The first one configures an upper limit for the number * of (dynamically allocated) pages that are added to a bio. */ -#define SCRUB_PAGES_PER_BIO 32 /* 128KiB per bio for x86 */ -#define SCRUB_BIOS_PER_SCTX 64 /* 8MiB per device in flight for x86 */ +#define SCRUB_SECTORS_PER_BIO 32 /* 128KiB per bio for 4KiB pages */ +#define SCRUB_BIOS_PER_SCTX 64 /* 8MiB per device in flight for 4KiB pages */ /* * The following value times PAGE_SIZE needs to be large enough to match the @@ -87,8 +87,8 @@ struct scrub_bio { blk_status_t status; u64 logical; u64 physical; - struct scrub_sector *pagev[SCRUB_PAGES_PER_BIO]; - int page_count; + struct scrub_sector *sectors[SCRUB_SECTORS_PER_BIO]; + int sector_count; int next_free; struct btrfs_work work; }; @@ -158,7 +158,7 @@ struct scrub_ctx { struct list_head csum_list; atomic_t cancel_req; int readonly; - int pages_per_bio; + int sectors_per_bio; /* State of IO submission throttling affecting the associated device */ ktime_t throttle_deadline; @@ -535,9 +535,9 @@ static noinline_for_stack void scrub_free_ctx(struct scrub_ctx *sctx) if (sctx->curr != -1) { struct scrub_bio *sbio = sctx->bios[sctx->curr]; - for (i = 0; i < sbio->page_count; i++) { - WARN_ON(!sbio->pagev[i]->page); - scrub_block_put(sbio->pagev[i]->sblock); + for (i = 0; i < sbio->sector_count; i++) { + WARN_ON(!sbio->sectors[i]->page); + scrub_block_put(sbio->sectors[i]->sblock); } bio_put(sbio->bio); } @@ -572,7 +572,7 @@ static noinline_for_stack struct scrub_ctx *scrub_setup_ctx( goto nomem; refcount_set(&sctx->refs, 1); sctx->is_dev_replace = is_dev_replace; - sctx->pages_per_bio = SCRUB_PAGES_PER_BIO; + sctx->sectors_per_bio = SCRUB_SECTORS_PER_BIO; sctx->curr = -1; sctx->fs_info = fs_info; INIT_LIST_HEAD(&sctx->csum_list); @@ -586,7 +586,7 @@ static noinline_for_stack struct scrub_ctx *scrub_setup_ctx( sbio->index = i; sbio->sctx = sctx; - sbio->page_count = 0; + sbio->sector_count = 0; btrfs_init_work(&sbio->work, scrub_bio_end_io_worker, NULL, NULL); @@ -1643,10 +1643,10 @@ again: return -ENOMEM; } sctx->wr_curr_bio->sctx = sctx; - sctx->wr_curr_bio->page_count = 0; + sctx->wr_curr_bio->sector_count = 0; } sbio = sctx->wr_curr_bio; - if (sbio->page_count == 0) { + if (sbio->sector_count == 0) { struct bio *bio; ret = fill_writer_pointer_gap(sctx, sector->physical_for_dev_replace); @@ -1660,7 +1660,7 @@ again: sbio->dev = sctx->wr_tgtdev; bio = sbio->bio; if (!bio) { - bio = btrfs_bio_alloc(sctx->pages_per_bio); + bio = btrfs_bio_alloc(sctx->sectors_per_bio); sbio->bio = bio; } @@ -1670,9 +1670,9 @@ again: bio->bi_iter.bi_sector = sbio->physical >> 9; bio->bi_opf = REQ_OP_WRITE; sbio->status = 0; - } else if (sbio->physical + sbio->page_count * sectorsize != + } else if (sbio->physical + sbio->sector_count * sectorsize != sector->physical_for_dev_replace || - sbio->logical + sbio->page_count * sectorsize != + sbio->logical + sbio->sector_count * sectorsize != sector->logical) { scrub_wr_submit(sctx); goto again; @@ -1680,7 +1680,7 @@ again: ret = bio_add_page(sbio->bio, sector->page, sectorsize, 0); if (ret != sectorsize) { - if (sbio->page_count < 1) { + if (sbio->sector_count < 1) { bio_put(sbio->bio); sbio->bio = NULL; mutex_unlock(&sctx->wr_lock); @@ -1690,10 +1690,10 @@ again: goto again; } - sbio->pagev[sbio->page_count] = sector; + sbio->sectors[sbio->sector_count] = sector; scrub_sector_get(sector); - sbio->page_count++; - if (sbio->page_count == sctx->pages_per_bio) + sbio->sector_count++; + if (sbio->sector_count == sctx->sectors_per_bio) scrub_wr_submit(sctx); mutex_unlock(&sctx->wr_lock); @@ -1718,7 +1718,7 @@ static void scrub_wr_submit(struct scrub_ctx *sctx) btrfsic_submit_bio(sbio->bio); if (btrfs_is_zoned(sctx->fs_info)) - sctx->write_pointer = sbio->physical + sbio->page_count * + sctx->write_pointer = sbio->physical + sbio->sector_count * sctx->fs_info->sectorsize; } @@ -1740,21 +1740,21 @@ static void scrub_wr_bio_end_io_worker(struct btrfs_work *work) struct scrub_ctx *sctx = sbio->sctx; int i; - ASSERT(sbio->page_count <= SCRUB_PAGES_PER_BIO); + ASSERT(sbio->sector_count <= SCRUB_SECTORS_PER_BIO); if (sbio->status) { struct btrfs_dev_replace *dev_replace = &sbio->sctx->fs_info->dev_replace; - for (i = 0; i < sbio->page_count; i++) { - struct scrub_sector *sector = sbio->pagev[i]; + for (i = 0; i < sbio->sector_count; i++) { + struct scrub_sector *sector = sbio->sectors[i]; sector->io_error = 1; atomic64_inc(&dev_replace->num_write_errors); } } - for (i = 0; i < sbio->page_count; i++) - scrub_sector_put(sbio->pagev[i]); + for (i = 0; i < sbio->sector_count; i++) + scrub_sector_put(sbio->sectors[i]); bio_put(sbio->bio); kfree(sbio); @@ -2070,7 +2070,7 @@ again: if (sctx->curr != -1) { sctx->first_free = sctx->bios[sctx->curr]->next_free; sctx->bios[sctx->curr]->next_free = -1; - sctx->bios[sctx->curr]->page_count = 0; + sctx->bios[sctx->curr]->sector_count = 0; spin_unlock(&sctx->list_lock); } else { spin_unlock(&sctx->list_lock); @@ -2078,7 +2078,7 @@ again: } } sbio = sctx->bios[sctx->curr]; - if (sbio->page_count == 0) { + if (sbio->sector_count == 0) { struct bio *bio; sbio->physical = sector->physical; @@ -2086,7 +2086,7 @@ again: sbio->dev = sector->dev; bio = sbio->bio; if (!bio) { - bio = btrfs_bio_alloc(sctx->pages_per_bio); + bio = btrfs_bio_alloc(sctx->sectors_per_bio); sbio->bio = bio; } @@ -2096,19 +2096,19 @@ again: bio->bi_iter.bi_sector = sbio->physical >> 9; bio->bi_opf = REQ_OP_READ; sbio->status = 0; - } else if (sbio->physical + sbio->page_count * sectorsize != + } else if (sbio->physical + sbio->sector_count * sectorsize != sector->physical || - sbio->logical + sbio->page_count * sectorsize != + sbio->logical + sbio->sector_count * sectorsize != sector->logical || sbio->dev != sector->dev) { scrub_submit(sctx); goto again; } - sbio->pagev[sbio->page_count] = sector; + sbio->sectors[sbio->sector_count] = sector; ret = bio_add_page(sbio->bio, sector->page, sectorsize, 0); if (ret != sectorsize) { - if (sbio->page_count < 1) { + if (sbio->sector_count < 1) { bio_put(sbio->bio); sbio->bio = NULL; return -EIO; @@ -2119,8 +2119,8 @@ again: scrub_block_get(sblock); /* one for the page added to the bio */ atomic_inc(&sblock->outstanding_sectors); - sbio->page_count++; - if (sbio->page_count == sctx->pages_per_bio) + sbio->sector_count++; + if (sbio->sector_count == sctx->sectors_per_bio) scrub_submit(sctx); return 0; @@ -2354,10 +2354,10 @@ static void scrub_bio_end_io_worker(struct btrfs_work *work) struct scrub_ctx *sctx = sbio->sctx; int i; - ASSERT(sbio->page_count <= SCRUB_PAGES_PER_BIO); + ASSERT(sbio->sector_count <= SCRUB_SECTORS_PER_BIO); if (sbio->status) { - for (i = 0; i < sbio->page_count; i++) { - struct scrub_sector *sector = sbio->pagev[i]; + for (i = 0; i < sbio->sector_count; i++) { + struct scrub_sector *sector = sbio->sectors[i]; sector->io_error = 1; sector->sblock->no_io_error_seen = 0; @@ -2365,8 +2365,8 @@ static void scrub_bio_end_io_worker(struct btrfs_work *work) } /* Now complete the scrub_block items that have all pages completed */ - for (i = 0; i < sbio->page_count; i++) { - struct scrub_sector *sector = sbio->pagev[i]; + for (i = 0; i < sbio->sector_count; i++) { + struct scrub_sector *sector = sbio->sectors[i]; struct scrub_block *sblock = sector->sblock; if (atomic_dec_and_test(&sblock->outstanding_sectors)) |