From 29e70be261d92829af26fda16769784710f45503 Mon Sep 17 00:00:00 2001 From: Anand Jain Date: Sat, 15 Apr 2023 17:51:23 +0800 Subject: btrfs: use SECTOR_SHIFT to convert physical offset to LBA Use SECTOR_SHIFT while converting a physical address to an LBA, makes it more readable. Signed-off-by: Anand Jain Reviewed-by: David Sterba Signed-off-by: David Sterba --- fs/btrfs/compression.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'fs/btrfs/compression.c') diff --git a/fs/btrfs/compression.c b/fs/btrfs/compression.c index 2d0493f0a184..14f5f25049a0 100644 --- a/fs/btrfs/compression.c +++ b/fs/btrfs/compression.c @@ -421,7 +421,7 @@ static noinline int add_ra_bio_pages(struct inode *inode, */ if (!em || cur < em->start || (cur + fs_info->sectorsize > extent_map_end(em)) || - (em->block_start >> 9) != orig_bio->bi_iter.bi_sector) { + (em->block_start >> SECTOR_SHIFT) != orig_bio->bi_iter.bi_sector) { free_extent_map(em); unlock_extent(tree, cur, page_end, NULL); unlock_page(page); -- cgit v1.2.3 From e19493107675d48eb207b91d9a775c811f247e27 Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Wed, 3 May 2023 17:24:27 +0200 Subject: btrfs: remove the mirror_num argument to btrfs_submit_compressed_read Given that read recovery for data I/O is handled in the storage layer, the mirror_num argument to btrfs_submit_compressed_read is always 0, so remove it. Reviewed-by: Johannes Thumshirn Reviewed-by: Qu Wenruo Reviewed-by: Josef Bacik Signed-off-by: Christoph Hellwig Reviewed-by: David Sterba Signed-off-by: David Sterba --- fs/btrfs/compression.c | 4 ++-- fs/btrfs/compression.h | 2 +- fs/btrfs/extent_io.c | 2 +- 3 files changed, 4 insertions(+), 4 deletions(-) (limited to 'fs/btrfs/compression.c') diff --git a/fs/btrfs/compression.c b/fs/btrfs/compression.c index 14f5f25049a0..04cd5de4f00f 100644 --- a/fs/btrfs/compression.c +++ b/fs/btrfs/compression.c @@ -472,7 +472,7 @@ static noinline int add_ra_bio_pages(struct inode *inode, * After the compressed pages are read, we copy the bytes into the * bio we were passed and then call the bio end_io calls */ -void btrfs_submit_compressed_read(struct btrfs_bio *bbio, int mirror_num) +void btrfs_submit_compressed_read(struct btrfs_bio *bbio) { struct btrfs_inode *inode = bbio->inode; struct btrfs_fs_info *fs_info = inode->root->fs_info; @@ -538,7 +538,7 @@ void btrfs_submit_compressed_read(struct btrfs_bio *bbio, int mirror_num) if (memstall) psi_memstall_leave(&pflags); - btrfs_submit_bio(&cb->bbio, mirror_num); + btrfs_submit_bio(&cb->bbio, 0); return; out_free_compressed_pages: diff --git a/fs/btrfs/compression.h b/fs/btrfs/compression.h index 19ab2abeddc0..b462ab106f43 100644 --- a/fs/btrfs/compression.h +++ b/fs/btrfs/compression.h @@ -93,7 +93,7 @@ void btrfs_submit_compressed_write(struct btrfs_inode *inode, u64 start, unsigned int nr_pages, blk_opf_t write_flags, bool writeback); -void btrfs_submit_compressed_read(struct btrfs_bio *bbio, int mirror_num); +void btrfs_submit_compressed_read(struct btrfs_bio *bbio); unsigned int btrfs_compress_str2level(unsigned int type, const char *str); diff --git a/fs/btrfs/extent_io.c b/fs/btrfs/extent_io.c index 681e6774124f..ea99b2a3755e 100644 --- a/fs/btrfs/extent_io.c +++ b/fs/btrfs/extent_io.c @@ -126,7 +126,7 @@ static void submit_one_bio(struct btrfs_bio_ctrl *bio_ctrl) if (btrfs_op(&bbio->bio) == BTRFS_MAP_READ && bio_ctrl->compress_type != BTRFS_COMPRESS_NONE) - btrfs_submit_compressed_read(bbio, 0); + btrfs_submit_compressed_read(bbio); else btrfs_submit_bio(bbio, 0); -- cgit v1.2.3 From 2b2553f12355577d8de3eaedfa25fce3368d652c Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Wed, 31 May 2023 08:04:57 +0200 Subject: btrfs: stop setting PageError in the data I/O path PageError is not used by the VFS/MM and deprecated because it uses up a page bit and has no coherent rules. Instead read errors are usually propagated by not setting or clearing the uptodate bit, and write errors are propagated through the address_space. Btrfs now only sets the flag and never clears it for data pages, so just remove all places setting it, and the subpage error bit. Note that the error propagation for superblock writes that work on the block device mapping still uses PageError for now, but that will be addressed in a separate series. Reviewed-by: Josef Bacik Signed-off-by: Christoph Hellwig Reviewed-by: David Sterba Signed-off-by: David Sterba --- fs/btrfs/compression.c | 2 -- fs/btrfs/extent_io.c | 24 +++++------------------- fs/btrfs/inode.c | 3 --- fs/btrfs/subpage.c | 35 ----------------------------------- fs/btrfs/subpage.h | 10 ++++------ 5 files changed, 9 insertions(+), 65 deletions(-) (limited to 'fs/btrfs/compression.c') diff --git a/fs/btrfs/compression.c b/fs/btrfs/compression.c index 04cd5de4f00f..bb2d1a4ceca1 100644 --- a/fs/btrfs/compression.c +++ b/fs/btrfs/compression.c @@ -211,8 +211,6 @@ static noinline void end_compressed_writeback(const struct compressed_bio *cb) for (i = 0; i < ret; i++) { struct folio *folio = fbatch.folios[i]; - if (errno) - folio_set_error(folio); btrfs_page_clamp_clear_writeback(fs_info, &folio->page, cb->start, cb->len); } diff --git a/fs/btrfs/extent_io.c b/fs/btrfs/extent_io.c index 33e5f0a31c21..b7f26a4bb663 100644 --- a/fs/btrfs/extent_io.c +++ b/fs/btrfs/extent_io.c @@ -223,8 +223,6 @@ static int process_one_page(struct btrfs_fs_info *fs_info, if (page_ops & PAGE_SET_ORDERED) btrfs_page_clamp_set_ordered(fs_info, page, start, len); - if (page_ops & PAGE_SET_ERROR) - btrfs_page_clamp_set_error(fs_info, page, start, len); if (page_ops & PAGE_START_WRITEBACK) { btrfs_page_clamp_clear_dirty(fs_info, page, start, len); btrfs_page_clamp_set_writeback(fs_info, page, start, len); @@ -497,12 +495,10 @@ static void end_page_read(struct page *page, bool uptodate, u64 start, u32 len) ASSERT(page_offset(page) <= start && start + len <= page_offset(page) + PAGE_SIZE); - if (uptodate && btrfs_verify_page(page, start)) { + if (uptodate && btrfs_verify_page(page, start)) btrfs_page_set_uptodate(fs_info, page, start, len); - } else { + else btrfs_page_clear_uptodate(fs_info, page, start, len); - btrfs_page_set_error(fs_info, page, start, len); - } if (!btrfs_is_subpage(fs_info, page)) unlock_page(page); @@ -530,7 +526,6 @@ void end_extent_writepage(struct page *page, int err, u64 start, u64 end) len = end + 1 - start; btrfs_page_clear_uptodate(fs_info, page, start, len); - btrfs_page_set_error(fs_info, page, start, len); ret = err < 0 ? err : -EIO; mapping_set_error(page->mapping, ret); } @@ -1059,7 +1054,6 @@ static int btrfs_do_readpage(struct page *page, struct extent_map **em_cached, ret = set_page_extent_mapped(page); if (ret < 0) { unlock_extent(tree, start, end, NULL); - btrfs_page_set_error(fs_info, page, start, PAGE_SIZE); unlock_page(page); return ret; } @@ -1263,11 +1257,9 @@ static noinline_for_stack int writepage_delalloc(struct btrfs_inode *inode, } ret = btrfs_run_delalloc_range(inode, page, delalloc_start, delalloc_end, &page_started, &nr_written, wbc); - if (ret) { - btrfs_page_set_error(inode->root->fs_info, page, - page_offset(page), PAGE_SIZE); + if (ret) return ret; - } + /* * delalloc_end is already one less than the total length, so * we don't subtract one from PAGE_SIZE @@ -1420,7 +1412,6 @@ static noinline_for_stack int __extent_writepage_io(struct btrfs_inode *inode, em = btrfs_get_extent(inode, NULL, 0, cur, end - cur + 1); if (IS_ERR(em)) { - btrfs_page_set_error(fs_info, page, cur, end - cur + 1); ret = PTR_ERR_OR_ZERO(em); goto out_error; } @@ -1519,9 +1510,6 @@ static int __extent_writepage(struct page *page, struct btrfs_bio_ctrl *bio_ctrl WARN_ON(!PageLocked(page)); - btrfs_page_clear_error(btrfs_sb(inode->i_sb), page, - page_offset(page), PAGE_SIZE); - pg_offset = offset_in_page(i_size); if (page->index > end_index || (page->index == end_index && !pg_offset)) { @@ -1534,10 +1522,8 @@ static int __extent_writepage(struct page *page, struct btrfs_bio_ctrl *bio_ctrl memzero_page(page, pg_offset, PAGE_SIZE - pg_offset); ret = set_page_extent_mapped(page); - if (ret < 0) { - SetPageError(page); + if (ret < 0) goto done; - } if (!bio_ctrl->extent_locked) { ret = writepage_delalloc(BTRFS_I(inode), page, bio_ctrl->wbc); diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c index 4633ccacefac..14e9aa91293e 100644 --- a/fs/btrfs/inode.c +++ b/fs/btrfs/inode.c @@ -1153,8 +1153,6 @@ static int submit_uncompressed_range(struct btrfs_inode *inode, const u64 page_start = page_offset(locked_page); const u64 page_end = page_start + PAGE_SIZE - 1; - btrfs_page_set_error(inode->root->fs_info, locked_page, - page_start, PAGE_SIZE); set_page_writeback(locked_page); end_page_writeback(locked_page); end_extent_writepage(locked_page, ret, page_start, page_end); @@ -2942,7 +2940,6 @@ out_page: mapping_set_error(page->mapping, ret); end_extent_writepage(page, ret, page_start, page_end); clear_page_dirty_for_io(page); - SetPageError(page); } btrfs_page_clear_checked(inode->root->fs_info, page, page_start, PAGE_SIZE); unlock_page(page); diff --git a/fs/btrfs/subpage.c b/fs/btrfs/subpage.c index 74bc43040531..1b999c6e4193 100644 --- a/fs/btrfs/subpage.c +++ b/fs/btrfs/subpage.c @@ -100,9 +100,6 @@ void btrfs_init_subpage_info(struct btrfs_subpage_info *subpage_info, u32 sector subpage_info->uptodate_offset = cur; cur += nr_bits; - subpage_info->error_offset = cur; - cur += nr_bits; - subpage_info->dirty_offset = cur; cur += nr_bits; @@ -416,35 +413,6 @@ void btrfs_subpage_clear_uptodate(const struct btrfs_fs_info *fs_info, spin_unlock_irqrestore(&subpage->lock, flags); } -void btrfs_subpage_set_error(const struct btrfs_fs_info *fs_info, - struct page *page, u64 start, u32 len) -{ - struct btrfs_subpage *subpage = (struct btrfs_subpage *)page->private; - unsigned int start_bit = subpage_calc_start_bit(fs_info, page, - error, start, len); - unsigned long flags; - - spin_lock_irqsave(&subpage->lock, flags); - bitmap_set(subpage->bitmaps, start_bit, len >> fs_info->sectorsize_bits); - SetPageError(page); - spin_unlock_irqrestore(&subpage->lock, flags); -} - -void btrfs_subpage_clear_error(const struct btrfs_fs_info *fs_info, - struct page *page, u64 start, u32 len) -{ - struct btrfs_subpage *subpage = (struct btrfs_subpage *)page->private; - unsigned int start_bit = subpage_calc_start_bit(fs_info, page, - error, start, len); - unsigned long flags; - - spin_lock_irqsave(&subpage->lock, flags); - bitmap_clear(subpage->bitmaps, start_bit, len >> fs_info->sectorsize_bits); - if (subpage_test_bitmap_all_zero(fs_info, subpage, error)) - ClearPageError(page); - spin_unlock_irqrestore(&subpage->lock, flags); -} - void btrfs_subpage_set_dirty(const struct btrfs_fs_info *fs_info, struct page *page, u64 start, u32 len) { @@ -606,7 +574,6 @@ bool btrfs_subpage_test_##name(const struct btrfs_fs_info *fs_info, \ return ret; \ } IMPLEMENT_BTRFS_SUBPAGE_TEST_OP(uptodate); -IMPLEMENT_BTRFS_SUBPAGE_TEST_OP(error); IMPLEMENT_BTRFS_SUBPAGE_TEST_OP(dirty); IMPLEMENT_BTRFS_SUBPAGE_TEST_OP(writeback); IMPLEMENT_BTRFS_SUBPAGE_TEST_OP(ordered); @@ -674,7 +641,6 @@ bool btrfs_page_clamp_test_##name(const struct btrfs_fs_info *fs_info, \ } IMPLEMENT_BTRFS_PAGE_OPS(uptodate, SetPageUptodate, ClearPageUptodate, PageUptodate); -IMPLEMENT_BTRFS_PAGE_OPS(error, SetPageError, ClearPageError, PageError); IMPLEMENT_BTRFS_PAGE_OPS(dirty, set_page_dirty, clear_page_dirty_for_io, PageDirty); IMPLEMENT_BTRFS_PAGE_OPS(writeback, set_page_writeback, end_page_writeback, @@ -769,7 +735,6 @@ void __cold btrfs_subpage_dump_bitmap(const struct btrfs_fs_info *fs_info, spin_lock_irqsave(&subpage->lock, flags); GET_SUBPAGE_BITMAP(subpage, subpage_info, uptodate, &uptodate_bitmap); - GET_SUBPAGE_BITMAP(subpage, subpage_info, error, &error_bitmap); GET_SUBPAGE_BITMAP(subpage, subpage_info, dirty, &dirty_bitmap); GET_SUBPAGE_BITMAP(subpage, subpage_info, writeback, &writeback_bitmap); GET_SUBPAGE_BITMAP(subpage, subpage_info, ordered, &ordered_bitmap); diff --git a/fs/btrfs/subpage.h b/fs/btrfs/subpage.h index 5905caea8409..5cbf67ccbdeb 100644 --- a/fs/btrfs/subpage.h +++ b/fs/btrfs/subpage.h @@ -8,17 +8,17 @@ /* * Extra info for subpapge bitmap. * - * For subpage we pack all uptodate/error/dirty/writeback/ordered bitmaps into + * For subpage we pack all uptodate/dirty/writeback/ordered bitmaps into * one larger bitmap. * * This structure records how they are organized in the bitmap: * - * /- uptodate_offset /- error_offset /- dirty_offset + * /- uptodate_offset /- dirty_offset /- ordered_offset * | | | * v v v - * |u|u|u|u|........|u|u|e|e|.......|e|e| ... |o|o| + * |u|u|u|u|........|u|u|d|d|.......|d|d|o|o|.......|o|o| * |<- bitmap_nr_bits ->| - * |<--------------- total_nr_bits ---------------->| + * |<----------------- total_nr_bits ------------------>| */ struct btrfs_subpage_info { /* Number of bits for each bitmap */ @@ -32,7 +32,6 @@ struct btrfs_subpage_info { * @bitmap_size, which is calculated from PAGE_SIZE / sectorsize. */ unsigned int uptodate_offset; - unsigned int error_offset; unsigned int dirty_offset; unsigned int writeback_offset; unsigned int ordered_offset; @@ -141,7 +140,6 @@ bool btrfs_page_clamp_test_##name(const struct btrfs_fs_info *fs_info, \ struct page *page, u64 start, u32 len); DECLARE_BTRFS_SUBPAGE_OPS(uptodate); -DECLARE_BTRFS_SUBPAGE_OPS(error); DECLARE_BTRFS_SUBPAGE_OPS(dirty); DECLARE_BTRFS_SUBPAGE_OPS(writeback); DECLARE_BTRFS_SUBPAGE_OPS(ordered); -- cgit v1.2.3 From a39da514eba81e687db05efb1e8b7cb393e2cb71 Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Wed, 31 May 2023 09:53:55 +0200 Subject: btrfs: limit write bios to a single ordered extent Currently buffered writeback bios are allowed to span multiple ordered_extents, although that basically never actually happens since commit 4a445b7b6178 ("btrfs: don't merge pages into bio if their page offset is not contiguous"). Supporting bios than span ordered_extents complicates the file checksumming code, and prevents us from adding an ordered_extent pointer to the btrfs_bio structure. Use the existing code to limit a bio to single ordered_extent for zoned device writes for all writes. This allows to remove the REQ_BTRFS_ONE_ORDERED flags, and the handling of multiple ordered_extents in btrfs_csum_one_bio. Reviewed-by: Johannes Thumshirn Reviewed-by: Josef Bacik Signed-off-by: Christoph Hellwig Reviewed-by: David Sterba Signed-off-by: David Sterba --- fs/btrfs/bio.c | 3 --- fs/btrfs/bio.h | 3 --- fs/btrfs/compression.c | 2 -- fs/btrfs/extent_io.c | 11 ++--------- fs/btrfs/file-item.c | 38 -------------------------------------- 5 files changed, 2 insertions(+), 55 deletions(-) (limited to 'fs/btrfs/compression.c') diff --git a/fs/btrfs/bio.c b/fs/btrfs/bio.c index 846deed98fd0..4a75efd588df 100644 --- a/fs/btrfs/bio.c +++ b/fs/btrfs/bio.c @@ -457,9 +457,6 @@ static void btrfs_submit_mirrored_bio(struct btrfs_io_context *bioc, int dev_nr) static void __btrfs_submit_bio(struct bio *bio, struct btrfs_io_context *bioc, struct btrfs_io_stripe *smap, int mirror_num) { - /* Do not leak our private flag into the block layer. */ - bio->bi_opf &= ~REQ_BTRFS_ONE_ORDERED; - if (!bioc) { /* Single mirror read/write fast path. */ btrfs_bio(bio)->mirror_num = mirror_num; diff --git a/fs/btrfs/bio.h b/fs/btrfs/bio.h index 8a29980159b4..52e7962103ff 100644 --- a/fs/btrfs/bio.h +++ b/fs/btrfs/bio.h @@ -102,9 +102,6 @@ static inline void btrfs_bio_end_io(struct btrfs_bio *bbio, blk_status_t status) bbio->end_io(bbio); } -/* Bio only refers to one ordered extent. */ -#define REQ_BTRFS_ONE_ORDERED REQ_DRV - /* Submit using blkcg_punt_bio_submit. */ #define REQ_BTRFS_CGROUP_PUNT REQ_FS_PRIVATE diff --git a/fs/btrfs/compression.c b/fs/btrfs/compression.c index bb2d1a4ceca1..617b9572f13e 100644 --- a/fs/btrfs/compression.c +++ b/fs/btrfs/compression.c @@ -293,8 +293,6 @@ void btrfs_submit_compressed_write(struct btrfs_inode *inode, u64 start, ASSERT(IS_ALIGNED(start, fs_info->sectorsize) && IS_ALIGNED(len, fs_info->sectorsize)); - write_flags |= REQ_BTRFS_ONE_ORDERED; - cb = alloc_compressed_bio(inode, start, REQ_OP_WRITE | write_flags, end_compressed_bio_write); cb->start = start; diff --git a/fs/btrfs/extent_io.c b/fs/btrfs/extent_io.c index f4d3c56b2900..35141a223a3e 100644 --- a/fs/btrfs/extent_io.c +++ b/fs/btrfs/extent_io.c @@ -826,13 +826,8 @@ static void alloc_new_bio(struct btrfs_inode *inode, bio_ctrl->bbio = bbio; bio_ctrl->len_to_oe_boundary = U32_MAX; - /* - * Limit the extent to the ordered boundary for Zone Append. - * Compressed bios aren't submitted directly, so it doesn't apply to - * them. - */ - if (bio_ctrl->compress_type == BTRFS_COMPRESS_NONE && - btrfs_use_zone_append(bbio)) { + /* Limit data write bios to the ordered boundary. */ + if (bio_ctrl->wbc) { struct btrfs_ordered_extent *ordered; ordered = btrfs_lookup_ordered_extent(inode, file_offset); @@ -842,9 +837,7 @@ static void alloc_new_bio(struct btrfs_inode *inode, ordered->disk_num_bytes - file_offset); btrfs_put_ordered_extent(ordered); } - } - if (bio_ctrl->wbc) { /* * Pick the last added device to support cgroup writeback. For * multi-device file systems this means blk-cgroup policies have diff --git a/fs/btrfs/file-item.c b/fs/btrfs/file-item.c index 0cb4a9921d21..782bbf081c26 100644 --- a/fs/btrfs/file-item.c +++ b/fs/btrfs/file-item.c @@ -733,8 +733,6 @@ blk_status_t btrfs_csum_one_bio(struct btrfs_bio *bbio) struct bio_vec bvec; int index; unsigned int blockcount; - unsigned long total_bytes = 0; - unsigned long this_sum_bytes = 0; int i; unsigned nofs_flag; @@ -776,34 +774,6 @@ blk_status_t btrfs_csum_one_bio(struct btrfs_bio *bbio) - 1); for (i = 0; i < blockcount; i++) { - if (!(bio->bi_opf & REQ_BTRFS_ONE_ORDERED) && - !in_range(offset, ordered->file_offset, - ordered->num_bytes)) { - unsigned long bytes_left; - - sums->len = this_sum_bytes; - this_sum_bytes = 0; - btrfs_add_ordered_sum(ordered, sums); - btrfs_put_ordered_extent(ordered); - - bytes_left = bio->bi_iter.bi_size - total_bytes; - - nofs_flag = memalloc_nofs_save(); - sums = kvzalloc(btrfs_ordered_sum_size(fs_info, - bytes_left), GFP_KERNEL); - memalloc_nofs_restore(nofs_flag); - if (!sums) - return BLK_STS_RESOURCE; - - sums->len = bytes_left; - ordered = btrfs_lookup_ordered_extent(inode, - offset); - ASSERT(ordered); /* Logic error */ - sums->logical = (bio->bi_iter.bi_sector << SECTOR_SHIFT) - + total_bytes; - index = 0; - } - data = bvec_kmap_local(&bvec); crypto_shash_digest(shash, data + (i * fs_info->sectorsize), @@ -812,18 +782,10 @@ blk_status_t btrfs_csum_one_bio(struct btrfs_bio *bbio) kunmap_local(data); index += fs_info->csum_size; offset += fs_info->sectorsize; - this_sum_bytes += fs_info->sectorsize; - total_bytes += fs_info->sectorsize; } } - this_sum_bytes = 0; - /* - * The ->sums assignment is for zoned writes, where a bio never spans - * ordered extents and is only done unconditionally because that's cheaper - * than a branch. - */ bbio->sums = sums; btrfs_add_ordered_sum(ordered, sums); btrfs_put_ordered_extent(ordered); -- cgit v1.2.3 From d611935b5d434a697757463e77632ac1501cfd1e Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Wed, 31 May 2023 09:53:58 +0200 Subject: btrfs: pass an ordered_extent to btrfs_submit_compressed_write btrfs_submit_compressed_write always operates on a single ordered_extent. Make that explicit by using btrfs_alloc_ordered_extent in the callers and passing the ordered_extent to btrfs_submit_compressed_write. This will help with storing and ordered_extent pointer in the btrfs_bio in subsequent patches. Reviewed-by: Johannes Thumshirn Reviewed-by: Josef Bacik Signed-off-by: Christoph Hellwig Reviewed-by: David Sterba Signed-off-by: David Sterba --- fs/btrfs/compression.c | 29 +++++++++++++++-------------- fs/btrfs/compression.h | 5 ++--- fs/btrfs/inode.c | 21 ++++++++++----------- 3 files changed, 27 insertions(+), 28 deletions(-) (limited to 'fs/btrfs/compression.c') diff --git a/fs/btrfs/compression.c b/fs/btrfs/compression.c index 617b9572f13e..aafc846bcd4f 100644 --- a/fs/btrfs/compression.c +++ b/fs/btrfs/compression.c @@ -279,33 +279,34 @@ static void btrfs_add_compressed_bio_pages(struct compressed_bio *cb) * This also checksums the file bytes and gets things ready for * the end io hooks. */ -void btrfs_submit_compressed_write(struct btrfs_inode *inode, u64 start, - unsigned int len, u64 disk_start, - unsigned int compressed_len, - struct page **compressed_pages, - unsigned int nr_pages, - blk_opf_t write_flags, - bool writeback) +void btrfs_submit_compressed_write(struct btrfs_ordered_extent *ordered, + struct page **compressed_pages, + unsigned int nr_pages, + blk_opf_t write_flags, + bool writeback) { + struct btrfs_inode *inode = BTRFS_I(ordered->inode); struct btrfs_fs_info *fs_info = inode->root->fs_info; struct compressed_bio *cb; - ASSERT(IS_ALIGNED(start, fs_info->sectorsize) && - IS_ALIGNED(len, fs_info->sectorsize)); + ASSERT(IS_ALIGNED(ordered->file_offset, fs_info->sectorsize)); + ASSERT(IS_ALIGNED(ordered->num_bytes, fs_info->sectorsize)); - cb = alloc_compressed_bio(inode, start, REQ_OP_WRITE | write_flags, + cb = alloc_compressed_bio(inode, ordered->file_offset, + REQ_OP_WRITE | write_flags, end_compressed_bio_write); - cb->start = start; - cb->len = len; + cb->start = ordered->file_offset; + cb->len = ordered->num_bytes; cb->compressed_pages = compressed_pages; - cb->compressed_len = compressed_len; + cb->compressed_len = ordered->disk_num_bytes; cb->writeback = writeback; INIT_WORK(&cb->write_end_work, btrfs_finish_compressed_write_work); cb->nr_pages = nr_pages; - cb->bbio.bio.bi_iter.bi_sector = disk_start >> SECTOR_SHIFT; + cb->bbio.bio.bi_iter.bi_sector = ordered->disk_bytenr >> SECTOR_SHIFT; btrfs_add_compressed_bio_pages(cb); btrfs_submit_bio(&cb->bbio, 0); + btrfs_put_ordered_extent(ordered); } /* diff --git a/fs/btrfs/compression.h b/fs/btrfs/compression.h index b462ab106f43..03bb9d143fa7 100644 --- a/fs/btrfs/compression.h +++ b/fs/btrfs/compression.h @@ -10,6 +10,7 @@ #include "bio.h" struct btrfs_inode; +struct btrfs_ordered_extent; /* * We want to make sure that amount of RAM required to uncompress an extent is @@ -86,9 +87,7 @@ int btrfs_decompress(int type, const u8 *data_in, struct page *dest_page, int btrfs_decompress_buf2page(const char *buf, u32 buf_len, struct compressed_bio *cb, u32 decompressed); -void btrfs_submit_compressed_write(struct btrfs_inode *inode, u64 start, - unsigned int len, u64 disk_start, - unsigned int compressed_len, +void btrfs_submit_compressed_write(struct btrfs_ordered_extent *ordered, struct page **compressed_pages, unsigned int nr_pages, blk_opf_t write_flags, diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c index d187ddc9673a..8ae448a4f470 100644 --- a/fs/btrfs/inode.c +++ b/fs/btrfs/inode.c @@ -1182,6 +1182,7 @@ static int submit_one_async_extent(struct btrfs_inode *inode, struct extent_io_tree *io_tree = &inode->io_tree; struct btrfs_root *root = inode->root; struct btrfs_fs_info *fs_info = root->fs_info; + struct btrfs_ordered_extent *ordered; struct btrfs_key ins; struct page *locked_page = NULL; struct extent_map *em; @@ -1243,7 +1244,7 @@ static int submit_one_async_extent(struct btrfs_inode *inode, } free_extent_map(em); - ret = btrfs_add_ordered_extent(inode, start, /* file_offset */ + ordered = btrfs_alloc_ordered_extent(inode, start, /* file_offset */ async_extent->ram_size, /* num_bytes */ async_extent->ram_size, /* ram_bytes */ ins.objectid, /* disk_bytenr */ @@ -1251,8 +1252,9 @@ static int submit_one_async_extent(struct btrfs_inode *inode, 0, /* offset */ 1 << BTRFS_ORDERED_COMPRESSED, async_extent->compress_type); - if (ret) { + if (IS_ERR(ordered)) { btrfs_drop_extent_map_range(inode, start, end, false); + ret = PTR_ERR(ordered); goto out_free_reserve; } btrfs_dec_block_group_reservations(fs_info, ins.objectid); @@ -1261,11 +1263,7 @@ static int submit_one_async_extent(struct btrfs_inode *inode, extent_clear_unlock_delalloc(inode, start, end, NULL, EXTENT_LOCKED | EXTENT_DELALLOC, PAGE_UNLOCK | PAGE_START_WRITEBACK); - - btrfs_submit_compressed_write(inode, start, /* file_offset */ - async_extent->ram_size, /* num_bytes */ - ins.objectid, /* disk_bytenr */ - ins.offset, /* compressed_len */ + btrfs_submit_compressed_write(ordered, async_extent->pages, /* compressed_pages */ async_extent->nr_pages, async_chunk->write_flags, true); @@ -10274,6 +10272,7 @@ ssize_t btrfs_do_encoded_write(struct kiocb *iocb, struct iov_iter *from, struct extent_io_tree *io_tree = &inode->io_tree; struct extent_changeset *data_reserved = NULL; struct extent_state *cached_state = NULL; + struct btrfs_ordered_extent *ordered; int compression; size_t orig_count; u64 start, end; @@ -10450,14 +10449,15 @@ ssize_t btrfs_do_encoded_write(struct kiocb *iocb, struct iov_iter *from, } free_extent_map(em); - ret = btrfs_add_ordered_extent(inode, start, num_bytes, ram_bytes, + ordered = btrfs_alloc_ordered_extent(inode, start, num_bytes, ram_bytes, ins.objectid, ins.offset, encoded->unencoded_offset, (1 << BTRFS_ORDERED_ENCODED) | (1 << BTRFS_ORDERED_COMPRESSED), compression); - if (ret) { + if (IS_ERR(ordered)) { btrfs_drop_extent_map_range(inode, start, end, false); + ret = PTR_ERR(ordered); goto out_free_reserved; } btrfs_dec_block_group_reservations(fs_info, ins.objectid); @@ -10469,8 +10469,7 @@ ssize_t btrfs_do_encoded_write(struct kiocb *iocb, struct iov_iter *from, btrfs_delalloc_release_extents(inode, num_bytes); - btrfs_submit_compressed_write(inode, start, num_bytes, ins.objectid, - ins.offset, pages, nr_pages, 0, false); + btrfs_submit_compressed_write(ordered, pages, nr_pages, 0, false); ret = orig_count; goto out; -- cgit v1.2.3 From ec63b84d4611b2f07e9242080f3e8de4a011820b Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Wed, 31 May 2023 09:54:02 +0200 Subject: btrfs: add an ordered_extent pointer to struct btrfs_bio Add a pointer to the ordered_extent to the existing union in struct btrfs_bio, so all code dealing with data write bios can just use a pointer dereference to retrieve the ordered_extent instead of doing multiple rbtree lookups per I/O. The reference to this ordered_extent is dropped at end I/O time, which implies that an extra one must be acquired when the bio is split. This also requires moving the btrfs_extract_ordered_extent call into btrfs_split_bio so that the invariant of always having a valid ordered_extent reference for the btrfs_bio is kept. Reviewed-by: Johannes Thumshirn Reviewed-by: Josef Bacik Signed-off-by: Christoph Hellwig Reviewed-by: David Sterba Signed-off-by: David Sterba --- fs/btrfs/bio.c | 42 ++++++++++++++++++++++++++++++++++++++---- fs/btrfs/bio.h | 9 +++------ fs/btrfs/compression.c | 2 +- fs/btrfs/extent_io.c | 2 +- fs/btrfs/file-item.c | 9 +-------- fs/btrfs/inode.c | 8 +++++--- 6 files changed, 49 insertions(+), 23 deletions(-) (limited to 'fs/btrfs/compression.c') diff --git a/fs/btrfs/bio.c b/fs/btrfs/bio.c index 41a152e87429..12b12443efaa 100644 --- a/fs/btrfs/bio.c +++ b/fs/btrfs/bio.c @@ -33,6 +33,11 @@ static inline bool is_data_bbio(struct btrfs_bio *bbio) return bbio->inode && is_data_inode(&bbio->inode->vfs_inode); } +static bool bbio_has_ordered_extent(struct btrfs_bio *bbio) +{ + return is_data_bbio(bbio) && btrfs_op(&bbio->bio) == BTRFS_MAP_WRITE; +} + /* * Initialize a btrfs_bio structure. This skips the embedded bio itself as it * is already initialized by the block layer. @@ -88,11 +93,40 @@ static struct btrfs_bio *btrfs_split_bio(struct btrfs_fs_info *fs_info, bbio->inode = orig_bbio->inode; bbio->file_offset = orig_bbio->file_offset; orig_bbio->file_offset += map_length; - + if (bbio_has_ordered_extent(bbio)) { + refcount_inc(&orig_bbio->ordered->refs); + bbio->ordered = orig_bbio->ordered; + } atomic_inc(&orig_bbio->pending_ios); return bbio; } +/* Free a bio that was never submitted to the underlying device. */ +static void btrfs_cleanup_bio(struct btrfs_bio *bbio) +{ + if (bbio_has_ordered_extent(bbio)) + btrfs_put_ordered_extent(bbio->ordered); + bio_put(&bbio->bio); +} + +static void __btrfs_bio_end_io(struct btrfs_bio *bbio) +{ + if (bbio_has_ordered_extent(bbio)) { + struct btrfs_ordered_extent *ordered = bbio->ordered; + + bbio->end_io(bbio); + btrfs_put_ordered_extent(ordered); + } else { + bbio->end_io(bbio); + } +} + +void btrfs_bio_end_io(struct btrfs_bio *bbio, blk_status_t status) +{ + bbio->bio.bi_status = status; + __btrfs_bio_end_io(bbio); +} + static void btrfs_orig_write_end_io(struct bio *bio); static void btrfs_bbio_propagate_error(struct btrfs_bio *bbio, @@ -121,12 +155,12 @@ static void btrfs_orig_bbio_end_io(struct btrfs_bio *bbio) if (bbio->bio.bi_status) btrfs_bbio_propagate_error(bbio, orig_bbio); - bio_put(&bbio->bio); + btrfs_cleanup_bio(bbio); bbio = orig_bbio; } if (atomic_dec_and_test(&bbio->pending_ios)) - bbio->end_io(bbio); + __btrfs_bio_end_io(bbio); } static int next_repair_mirror(struct btrfs_failed_bio *fbio, int cur_mirror) @@ -684,7 +718,7 @@ done: fail_put_bio: if (map_length < length) - bio_put(bio); + btrfs_cleanup_bio(bbio); fail: btrfs_bio_counter_dec(fs_info); btrfs_bio_end_io(orig_bbio, ret); diff --git a/fs/btrfs/bio.h b/fs/btrfs/bio.h index 52e7962103ff..ca79decee060 100644 --- a/fs/btrfs/bio.h +++ b/fs/btrfs/bio.h @@ -50,11 +50,13 @@ struct btrfs_bio { /* * For data writes: + * - ordered extent covering the bio * - pointer to the checksums for this bio * - original physical address from the allocator * (for zone append only) */ struct { + struct btrfs_ordered_extent *ordered; struct btrfs_ordered_sum *sums; u64 orig_physical; }; @@ -95,12 +97,7 @@ void btrfs_bio_init(struct btrfs_bio *bbio, struct btrfs_fs_info *fs_info, struct btrfs_bio *btrfs_bio_alloc(unsigned int nr_vecs, blk_opf_t opf, struct btrfs_fs_info *fs_info, btrfs_bio_end_io_t end_io, void *private); - -static inline void btrfs_bio_end_io(struct btrfs_bio *bbio, blk_status_t status) -{ - bbio->bio.bi_status = status; - bbio->end_io(bbio); -} +void btrfs_bio_end_io(struct btrfs_bio *bbio, blk_status_t status); /* Submit using blkcg_punt_bio_submit. */ #define REQ_BTRFS_CGROUP_PUNT REQ_FS_PRIVATE diff --git a/fs/btrfs/compression.c b/fs/btrfs/compression.c index aafc846bcd4f..ce4be0f21c5c 100644 --- a/fs/btrfs/compression.c +++ b/fs/btrfs/compression.c @@ -303,10 +303,10 @@ void btrfs_submit_compressed_write(struct btrfs_ordered_extent *ordered, INIT_WORK(&cb->write_end_work, btrfs_finish_compressed_write_work); cb->nr_pages = nr_pages; cb->bbio.bio.bi_iter.bi_sector = ordered->disk_bytenr >> SECTOR_SHIFT; + cb->bbio.ordered = ordered; btrfs_add_compressed_bio_pages(cb); btrfs_submit_bio(&cb->bbio, 0); - btrfs_put_ordered_extent(ordered); } /* diff --git a/fs/btrfs/extent_io.c b/fs/btrfs/extent_io.c index 35141a223a3e..ad536eaf02b0 100644 --- a/fs/btrfs/extent_io.c +++ b/fs/btrfs/extent_io.c @@ -835,7 +835,7 @@ static void alloc_new_bio(struct btrfs_inode *inode, bio_ctrl->len_to_oe_boundary = min_t(u32, U32_MAX, ordered->file_offset + ordered->disk_num_bytes - file_offset); - btrfs_put_ordered_extent(ordered); + bbio->ordered = ordered; } /* diff --git a/fs/btrfs/file-item.c b/fs/btrfs/file-item.c index 782bbf081c26..cbacaa80526c 100644 --- a/fs/btrfs/file-item.c +++ b/fs/btrfs/file-item.c @@ -799,19 +799,12 @@ blk_status_t btrfs_csum_one_bio(struct btrfs_bio *bbio) */ blk_status_t btrfs_alloc_dummy_sum(struct btrfs_bio *bbio) { - struct btrfs_ordered_extent *ordered = - btrfs_lookup_ordered_extent(bbio->inode, bbio->file_offset); - - if (WARN_ON_ONCE(!ordered)) - return BLK_STS_IOERR; - bbio->sums = kmalloc(sizeof(*bbio->sums), GFP_NOFS); if (!bbio->sums) return BLK_STS_RESOURCE; bbio->sums->len = bbio->bio.bi_iter.bi_size; bbio->sums->logical = bbio->bio.bi_iter.bi_sector << SECTOR_SHIFT; - btrfs_add_ordered_sum(ordered, bbio->sums); - btrfs_put_ordered_extent(ordered); + btrfs_add_ordered_sum(bbio->ordered, bbio->sums); return 0; } diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c index 020d871bb135..b8ec3d95e659 100644 --- a/fs/btrfs/inode.c +++ b/fs/btrfs/inode.c @@ -2716,8 +2716,11 @@ static int btrfs_extract_ordered_extent(struct btrfs_bio *bbio, return -EINVAL; /* No need to split if the ordered extent covers the entire bio. */ - if (ordered->disk_num_bytes == len) + if (ordered->disk_num_bytes == len) { + refcount_inc(&ordered->refs); + bbio->ordered = ordered; return 0; + } /* * Don't split the extent_map for NOCOW extents, as we're writing into @@ -2734,8 +2737,7 @@ static int btrfs_extract_ordered_extent(struct btrfs_bio *bbio, new = btrfs_split_ordered_extent(ordered, len); if (IS_ERR(new)) return PTR_ERR(new); - btrfs_put_ordered_extent(new); - + bbio->ordered = new; return 0; } -- cgit v1.2.3 From 7dd4395490369cfc6b3fe87e4ec2b5dd26259e08 Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Wed, 31 May 2023 09:54:08 +0200 Subject: btrfs: use btrfs_finish_ordered_extent to complete compressed writes Use the btrfs_finish_ordered_extent helper to complete compressed writes using the bbio->ordered pointer instead of requiring an rbtree lookup in the otherwise equivalent btrfs_mark_ordered_io_finished called from btrfs_writepage_endio_finish_ordered. Reviewed-by: Johannes Thumshirn Reviewed-by: Josef Bacik Signed-off-by: Christoph Hellwig Reviewed-by: David Sterba Signed-off-by: David Sterba --- fs/btrfs/compression.c | 9 ++------- 1 file changed, 2 insertions(+), 7 deletions(-) (limited to 'fs/btrfs/compression.c') diff --git a/fs/btrfs/compression.c b/fs/btrfs/compression.c index ce4be0f21c5c..f1004259c3d3 100644 --- a/fs/btrfs/compression.c +++ b/fs/btrfs/compression.c @@ -224,13 +224,8 @@ static void btrfs_finish_compressed_write_work(struct work_struct *work) struct compressed_bio *cb = container_of(work, struct compressed_bio, write_end_work); - /* - * Ok, we're the last bio for this extent, step one is to call back - * into the FS and do all the end_io operations. - */ - btrfs_writepage_endio_finish_ordered(cb->bbio.inode, NULL, - cb->start, cb->start + cb->len - 1, - cb->bbio.bio.bi_status == BLK_STS_OK); + btrfs_finish_ordered_extent(cb->bbio.ordered, NULL, cb->start, cb->len, + cb->bbio.bio.bi_status == BLK_STS_OK); if (cb->writeback) end_compressed_writeback(cb); -- cgit v1.2.3 From e794203e9d2d610faf6c5926345091193b202af5 Mon Sep 17 00:00:00 2001 From: Ben Dooks Date: Fri, 16 Jun 2023 18:24:43 +0100 Subject: btrfs: make btrfs_compressed_bioset static The 'btrfs_compressed_bioset' struct isn't exported outside of the fs/btrfs/compression.c file, so make it static to fix the following sparse warning: fs/btrfs/compression.c:40:16: warning: symbol 'btrfs_compressed_bioset' was not declared. Should it be static? Reviewed-by: Christoph Hellwig Signed-off-by: Ben Dooks Reviewed-by: David Sterba Signed-off-by: David Sterba --- fs/btrfs/compression.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'fs/btrfs/compression.c') diff --git a/fs/btrfs/compression.c b/fs/btrfs/compression.c index f1004259c3d3..8818ed5c390f 100644 --- a/fs/btrfs/compression.c +++ b/fs/btrfs/compression.c @@ -37,7 +37,7 @@ #include "file-item.h" #include "super.h" -struct bio_set btrfs_compressed_bioset; +static struct bio_set btrfs_compressed_bioset; static const char* const btrfs_compress_types[] = { "", "zlib", "lzo", "zstd" }; -- cgit v1.2.3