From f18cc97845aa4ae0e795c088c979fe1642b3b8e5 Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Mon, 8 May 2023 07:58:38 -0700 Subject: btrfs: fix dirty_metadata_bytes for redirtied buffers dirty_metadata_bytes is decremented in both places that clear the dirty bit in a buffer, but only incremented in btrfs_mark_buffer_dirty, which means that a buffer that is redirtied using btrfs_redirty_list_add won't be added to dirty_metadata_bytes, but it will be subtracted when written out, leading an inconsistency in the counter. Move the dirty_metadata_bytes from btrfs_mark_buffer_dirty into set_extent_buffer_dirty to also account for the redirty case, and remove the now unused set_extent_buffer_dirty return value. Fixes: d3575156f662 ("btrfs: zoned: redirty released extent buffers") CC: stable@vger.kernel.org # 5.15+ Reviewed-by: Naohiro Aota Signed-off-by: Christoph Hellwig Signed-off-by: David Sterba --- fs/btrfs/extent_io.c | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) (limited to 'fs/btrfs/extent_io.c') diff --git a/fs/btrfs/extent_io.c b/fs/btrfs/extent_io.c index a1adadd5d25d..a829390632a5 100644 --- a/fs/btrfs/extent_io.c +++ b/fs/btrfs/extent_io.c @@ -4148,7 +4148,7 @@ void btrfs_clear_buffer_dirty(struct btrfs_trans_handle *trans, WARN_ON(atomic_read(&eb->refs) == 0); } -bool set_extent_buffer_dirty(struct extent_buffer *eb) +void set_extent_buffer_dirty(struct extent_buffer *eb) { int i; int num_pages; @@ -4183,13 +4183,14 @@ bool set_extent_buffer_dirty(struct extent_buffer *eb) eb->start, eb->len); if (subpage) unlock_page(eb->pages[0]); + percpu_counter_add_batch(&eb->fs_info->dirty_metadata_bytes, + eb->len, + eb->fs_info->dirty_metadata_batch); } #ifdef CONFIG_BTRFS_DEBUG for (i = 0; i < num_pages; i++) ASSERT(PageDirty(eb->pages[i])); #endif - - return was_dirty; } void clear_extent_buffer_uptodate(struct extent_buffer *eb) -- cgit v1.2.3 From f880fe6e0b4b127d6e2a007fe68bdf5651677ae2 Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Mon, 8 May 2023 07:58:39 -0700 Subject: btrfs: don't hold an extra reference for redirtied buffers When btrfs_redirty_list_add redirties a buffer, it also acquires an extra reference that is released on transaction commit. But this is not required as buffers that are dirty or under writeback are never freed (look for calls to extent_buffer_under_io())). Remove the extra reference and the infrastructure used to drop it again. History behind redirty logic: In the first place, it used releasing_list to hold all the to-be-released extent buffers, and decided which buffers to re-dirty at the commit time. Then, in a later version, the behaviour got changed to re-dirty a necessary buffer and add re-dirtied one to the list in btrfs_free_tree_block(). In short, the list was there mostly for the patch series' historical reason. Reviewed-by: Naohiro Aota Signed-off-by: Christoph Hellwig [ add Naohiro's comment regarding history ] Signed-off-by: David Sterba --- fs/btrfs/disk-io.c | 2 -- fs/btrfs/extent_io.c | 1 - fs/btrfs/extent_io.h | 1 - fs/btrfs/transaction.c | 9 --------- fs/btrfs/transaction.h | 3 --- fs/btrfs/zoned.c | 28 ++++------------------------ fs/btrfs/zoned.h | 2 -- 7 files changed, 4 insertions(+), 42 deletions(-) (limited to 'fs/btrfs/extent_io.c') diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c index 112c99dde57f..16224fd82987 100644 --- a/fs/btrfs/disk-io.c +++ b/fs/btrfs/disk-io.c @@ -5073,8 +5073,6 @@ void btrfs_cleanup_one_transaction(struct btrfs_transaction *cur_trans, EXTENT_DIRTY); btrfs_destroy_pinned_extent(fs_info, &cur_trans->pinned_extents); - btrfs_free_redirty_list(cur_trans); - cur_trans->state =TRANS_STATE_COMPLETED; wake_up(&cur_trans->commit_wait); } diff --git a/fs/btrfs/extent_io.c b/fs/btrfs/extent_io.c index a829390632a5..d8becf1cdbc0 100644 --- a/fs/btrfs/extent_io.c +++ b/fs/btrfs/extent_io.c @@ -3557,7 +3557,6 @@ __alloc_extent_buffer(struct btrfs_fs_info *fs_info, u64 start, init_rwsem(&eb->lock); btrfs_leak_debug_add_eb(eb); - INIT_LIST_HEAD(&eb->release_list); spin_lock_init(&eb->refs_lock); atomic_set(&eb->refs, 1); diff --git a/fs/btrfs/extent_io.h b/fs/btrfs/extent_io.h index f937654230d3..6f3cfadd232c 100644 --- a/fs/btrfs/extent_io.h +++ b/fs/btrfs/extent_io.h @@ -89,7 +89,6 @@ struct extent_buffer { struct rw_semaphore lock; struct page *pages[INLINE_EXTENT_BUFFER_PAGES]; - struct list_head release_list; #ifdef CONFIG_BTRFS_DEBUG struct list_head leak_list; #endif diff --git a/fs/btrfs/transaction.c b/fs/btrfs/transaction.c index 27c616fdfae2..fe0f00e717a8 100644 --- a/fs/btrfs/transaction.c +++ b/fs/btrfs/transaction.c @@ -374,8 +374,6 @@ loop: spin_lock_init(&cur_trans->dirty_bgs_lock); INIT_LIST_HEAD(&cur_trans->deleted_bgs); spin_lock_init(&cur_trans->dropped_roots_lock); - INIT_LIST_HEAD(&cur_trans->releasing_ebs); - spin_lock_init(&cur_trans->releasing_ebs_lock); list_add_tail(&cur_trans->list, &fs_info->trans_list); extent_io_tree_init(fs_info, &cur_trans->dirty_pages, IO_TREE_TRANS_DIRTY_PAGES); @@ -2482,13 +2480,6 @@ int btrfs_commit_transaction(struct btrfs_trans_handle *trans) goto scrub_continue; } - /* - * At this point, we should have written all the tree blocks allocated - * in this transaction. So it's now safe to free the redirtyied extent - * buffers. - */ - btrfs_free_redirty_list(cur_trans); - ret = write_all_supers(fs_info, 0); /* * the super is written, we can safely allow the tree-loggers diff --git a/fs/btrfs/transaction.h b/fs/btrfs/transaction.h index fa728ab80826..8e9fa23bd7fe 100644 --- a/fs/btrfs/transaction.h +++ b/fs/btrfs/transaction.h @@ -94,9 +94,6 @@ struct btrfs_transaction { */ atomic_t pending_ordered; wait_queue_head_t pending_wait; - - spinlock_t releasing_ebs_lock; - struct list_head releasing_ebs; }; enum { diff --git a/fs/btrfs/zoned.c b/fs/btrfs/zoned.c index dac879fe2871..bda301a55cbe 100644 --- a/fs/btrfs/zoned.c +++ b/fs/btrfs/zoned.c @@ -1602,37 +1602,17 @@ void btrfs_calc_zone_unusable(struct btrfs_block_group *cache) void btrfs_redirty_list_add(struct btrfs_transaction *trans, struct extent_buffer *eb) { - struct btrfs_fs_info *fs_info = eb->fs_info; - - if (!btrfs_is_zoned(fs_info) || - btrfs_header_flag(eb, BTRFS_HEADER_FLAG_WRITTEN) || - !list_empty(&eb->release_list)) + if (!btrfs_is_zoned(eb->fs_info) || + btrfs_header_flag(eb, BTRFS_HEADER_FLAG_WRITTEN)) return; + ASSERT(!test_bit(EXTENT_BUFFER_DIRTY, &eb->bflags)); + memzero_extent_buffer(eb, 0, eb->len); set_bit(EXTENT_BUFFER_NO_CHECK, &eb->bflags); set_extent_buffer_dirty(eb); set_extent_bits_nowait(&trans->dirty_pages, eb->start, eb->start + eb->len - 1, EXTENT_DIRTY); - - spin_lock(&trans->releasing_ebs_lock); - list_add_tail(&eb->release_list, &trans->releasing_ebs); - spin_unlock(&trans->releasing_ebs_lock); - atomic_inc(&eb->refs); -} - -void btrfs_free_redirty_list(struct btrfs_transaction *trans) -{ - spin_lock(&trans->releasing_ebs_lock); - while (!list_empty(&trans->releasing_ebs)) { - struct extent_buffer *eb; - - eb = list_first_entry(&trans->releasing_ebs, - struct extent_buffer, release_list); - list_del_init(&eb->release_list); - free_extent_buffer(eb); - } - spin_unlock(&trans->releasing_ebs_lock); } bool btrfs_use_zone_append(struct btrfs_bio *bbio) diff --git a/fs/btrfs/zoned.h b/fs/btrfs/zoned.h index c0570d35fea2..3058ef559c98 100644 --- a/fs/btrfs/zoned.h +++ b/fs/btrfs/zoned.h @@ -54,7 +54,6 @@ int btrfs_load_block_group_zone_info(struct btrfs_block_group *cache, bool new); void btrfs_calc_zone_unusable(struct btrfs_block_group *cache); void btrfs_redirty_list_add(struct btrfs_transaction *trans, struct extent_buffer *eb); -void btrfs_free_redirty_list(struct btrfs_transaction *trans); bool btrfs_use_zone_append(struct btrfs_bio *bbio); void btrfs_record_physical_zoned(struct btrfs_bio *bbio); void btrfs_rewrite_logical_zoned(struct btrfs_ordered_extent *ordered); @@ -179,7 +178,6 @@ static inline void btrfs_calc_zone_unusable(struct btrfs_block_group *cache) { } static inline void btrfs_redirty_list_add(struct btrfs_transaction *trans, struct extent_buffer *eb) { } -static inline void btrfs_free_redirty_list(struct btrfs_transaction *trans) { } static inline bool btrfs_use_zone_append(struct btrfs_bio *bbio) { -- cgit v1.2.3 From 7f26fb1c13f7445adf7890bf27458081655c9a4c Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Wed, 3 May 2023 17:24:21 +0200 Subject: btrfs: mark extent_buffer_under_io static extent_buffer_under_io is only used in extent_io.c, so mark it static. Reviewed-by: Johannes Thumshirn Reviewed-by: Qu Wenruo Signed-off-by: Christoph Hellwig Reviewed-by: David Sterba Signed-off-by: David Sterba --- fs/btrfs/extent_io.c | 2 +- fs/btrfs/extent_io.h | 1 - 2 files changed, 1 insertion(+), 2 deletions(-) (limited to 'fs/btrfs/extent_io.c') diff --git a/fs/btrfs/extent_io.c b/fs/btrfs/extent_io.c index d8becf1cdbc0..9827403ce2fb 100644 --- a/fs/btrfs/extent_io.c +++ b/fs/btrfs/extent_io.c @@ -3421,7 +3421,7 @@ static void __free_extent_buffer(struct extent_buffer *eb) kmem_cache_free(extent_buffer_cache, eb); } -int extent_buffer_under_io(const struct extent_buffer *eb) +static int extent_buffer_under_io(const struct extent_buffer *eb) { return (atomic_read(&eb->io_pages) || test_bit(EXTENT_BUFFER_WRITEBACK, &eb->bflags) || diff --git a/fs/btrfs/extent_io.h b/fs/btrfs/extent_io.h index 6f3cfadd232c..e11ee09ab26b 100644 --- a/fs/btrfs/extent_io.h +++ b/fs/btrfs/extent_io.h @@ -264,7 +264,6 @@ void extent_buffer_bitmap_clear(const struct extent_buffer *eb, void set_extent_buffer_dirty(struct extent_buffer *eb); void set_extent_buffer_uptodate(struct extent_buffer *eb); void clear_extent_buffer_uptodate(struct extent_buffer *eb); -int extent_buffer_under_io(const struct extent_buffer *eb); void extent_range_clear_dirty_for_io(struct inode *inode, u64 start, u64 end); void extent_range_redirty_for_io(struct inode *inode, u64 start, u64 end); void extent_clear_unlock_delalloc(struct btrfs_inode *inode, u64 start, u64 end, -- cgit v1.2.3 From 243984b3b99199678b6ec74787f8fed60c58042d Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Wed, 3 May 2023 17:24:22 +0200 Subject: btrfs: subpage: fix error handling in end_bio_subpage_eb_writepage Call btrfs_page_clear_uptodate instead of ClearPageUptodate to properly manage the uptodate bit for the subpage case. Reported-by: Qu Wenruo Reviewed-by: Qu Wenruo Reviewed-by: Josef Bacik Signed-off-by: Christoph Hellwig Reviewed-by: David Sterba Signed-off-by: David Sterba --- fs/btrfs/extent_io.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) (limited to 'fs/btrfs/extent_io.c') diff --git a/fs/btrfs/extent_io.c b/fs/btrfs/extent_io.c index 9827403ce2fb..a75e05c4f496 100644 --- a/fs/btrfs/extent_io.c +++ b/fs/btrfs/extent_io.c @@ -1915,7 +1915,8 @@ static void end_bio_subpage_eb_writepage(struct btrfs_bio *bbio) if (bio->bi_status || test_bit(EXTENT_BUFFER_WRITE_ERR, &eb->bflags)) { - ClearPageUptodate(page); + btrfs_page_clear_uptodate(fs_info, page, + eb->start, eb->len); set_btree_ioerr(page, eb); } -- cgit v1.2.3 From e95382834cf885b478dbe14a66451b863eb35c94 Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Wed, 3 May 2023 17:24:25 +0200 Subject: btrfs: always read the entire extent_buffer Currently read_extent_buffer_pages skips pages that are already uptodate when reading in an extent_buffer. While this reduces the amount of data read, it increases the number of I/O operations as we now need to do multiple I/Os when reading an extent buffer with one or more uptodate pages in the middle of it. On any modern storage device, be that hard drives or SSDs this actually decreases I/O performance. Fortunately this case is pretty rare as the pages are always initially read together and then aged the same way. Besides simplifying the code a bit as-is this will allow for major simplifications to the I/O completion handler later on. Note that the case where all pages are uptodate is still handled by an optimized fast path that does not read any data from disk. Reviewed-by: Johannes Thumshirn Reviewed-by: Qu Wenruo Signed-off-by: Christoph Hellwig Reviewed-by: David Sterba Signed-off-by: David Sterba --- fs/btrfs/extent_io.c | 17 +++++------------ 1 file changed, 5 insertions(+), 12 deletions(-) (limited to 'fs/btrfs/extent_io.c') diff --git a/fs/btrfs/extent_io.c b/fs/btrfs/extent_io.c index a75e05c4f496..ede7c88e829e 100644 --- a/fs/btrfs/extent_io.c +++ b/fs/btrfs/extent_io.c @@ -4315,7 +4315,6 @@ int read_extent_buffer_pages(struct extent_buffer *eb, int wait, int mirror_num, int locked_pages = 0; int all_uptodate = 1; int num_pages; - unsigned long num_reads = 0; struct btrfs_bio_ctrl bio_ctrl = { .opf = REQ_OP_READ, .mirror_num = mirror_num, @@ -4361,10 +4360,8 @@ int read_extent_buffer_pages(struct extent_buffer *eb, int wait, int mirror_num, */ for (i = 0; i < num_pages; i++) { page = eb->pages[i]; - if (!PageUptodate(page)) { - num_reads++; + if (!PageUptodate(page)) all_uptodate = 0; - } } if (all_uptodate) { @@ -4374,7 +4371,7 @@ int read_extent_buffer_pages(struct extent_buffer *eb, int wait, int mirror_num, clear_bit(EXTENT_BUFFER_READ_ERR, &eb->bflags); eb->read_mirror = 0; - atomic_set(&eb->io_pages, num_reads); + atomic_set(&eb->io_pages, num_pages); /* * It is possible for release_folio to clear the TREE_REF bit before we * set io_pages. See check_buffer_tree_ref for a more detailed comment. @@ -4384,13 +4381,9 @@ int read_extent_buffer_pages(struct extent_buffer *eb, int wait, int mirror_num, for (i = 0; i < num_pages; i++) { page = eb->pages[i]; - if (!PageUptodate(page)) { - ClearPageError(page); - submit_extent_page(&bio_ctrl, page_offset(page), page, - PAGE_SIZE, 0); - } else { - unlock_page(page); - } + ClearPageError(page); + submit_extent_page(&bio_ctrl, page_offset(page), page, + PAGE_SIZE, 0); } submit_one_bio(&bio_ctrl); -- cgit v1.2.3 From b78b98e06fb7f9860da5a7c28e1edbaefc2f7be1 Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Wed, 3 May 2023 17:24:26 +0200 Subject: btrfs: don't use btrfs_bio_ctrl for extent buffer reading The btrfs_bio_ctrl machinery is overkill for reading extent_buffers as we always operate on PAGE_SIZE chunks (or one smaller one for the subpage case) that are contiguous and are guaranteed to fit into a single bio. Replace it with open coded btrfs_bio_alloc, __bio_add_page and btrfs_submit_bio calls in a helper function shared between the subpage and node size >= PAGE_SIZE cases. Reviewed-by: Johannes Thumshirn Reviewed-by: Qu Wenruo Reviewed-by: Josef Bacik Signed-off-by: Christoph Hellwig Reviewed-by: David Sterba Signed-off-by: David Sterba --- fs/btrfs/extent_io.c | 99 +++++++++++++++++++--------------------------------- 1 file changed, 36 insertions(+), 63 deletions(-) (limited to 'fs/btrfs/extent_io.c') diff --git a/fs/btrfs/extent_io.c b/fs/btrfs/extent_io.c index ede7c88e829e..681e6774124f 100644 --- a/fs/btrfs/extent_io.c +++ b/fs/btrfs/extent_io.c @@ -98,22 +98,12 @@ void btrfs_extent_buffer_leak_debug_check(struct btrfs_fs_info *fs_info) */ struct btrfs_bio_ctrl { struct btrfs_bio *bbio; - int mirror_num; enum btrfs_compression_type compress_type; u32 len_to_oe_boundary; blk_opf_t opf; btrfs_bio_end_io_t end_io_func; struct writeback_control *wbc; - /* - * This is for metadata read, to provide the extra needed verification - * info. This has to be provided for submit_one_bio(), as - * submit_one_bio() can submit a bio if it ends at stripe boundary. If - * no such parent_check is provided, the metadata can hit false alert at - * endio time. - */ - struct btrfs_tree_parent_check *parent_check; - /* * Tell writepage not to lock the state bits for this range, it still * does the unlocking. @@ -124,7 +114,6 @@ struct btrfs_bio_ctrl { static void submit_one_bio(struct btrfs_bio_ctrl *bio_ctrl) { struct btrfs_bio *bbio = bio_ctrl->bbio; - int mirror_num = bio_ctrl->mirror_num; if (!bbio) return; @@ -132,25 +121,14 @@ static void submit_one_bio(struct btrfs_bio_ctrl *bio_ctrl) /* Caller should ensure the bio has at least some range added */ ASSERT(bbio->bio.bi_iter.bi_size); - if (!is_data_inode(&bbio->inode->vfs_inode)) { - if (btrfs_op(&bbio->bio) != BTRFS_MAP_WRITE) { - /* - * For metadata read, we should have the parent_check, - * and copy it to bbio for metadata verification. - */ - ASSERT(bio_ctrl->parent_check); - memcpy(&bbio->parent_check, - bio_ctrl->parent_check, - sizeof(struct btrfs_tree_parent_check)); - } + if (!is_data_inode(&bbio->inode->vfs_inode)) bbio->bio.bi_opf |= REQ_META; - } if (btrfs_op(&bbio->bio) == BTRFS_MAP_READ && bio_ctrl->compress_type != BTRFS_COMPRESS_NONE) - btrfs_submit_compressed_read(bbio, mirror_num); + btrfs_submit_compressed_read(bbio, 0); else - btrfs_submit_bio(bbio, mirror_num); + btrfs_submit_bio(bbio, 0); /* The bbio is owned by the end_io handler now */ bio_ctrl->bbio = NULL; @@ -4243,6 +4221,36 @@ void set_extent_buffer_uptodate(struct extent_buffer *eb) } } +static void __read_extent_buffer_pages(struct extent_buffer *eb, int mirror_num, + struct btrfs_tree_parent_check *check) +{ + int num_pages = num_extent_pages(eb), i; + struct btrfs_bio *bbio; + + clear_bit(EXTENT_BUFFER_READ_ERR, &eb->bflags); + eb->read_mirror = 0; + atomic_set(&eb->io_pages, num_pages); + check_buffer_tree_ref(eb); + + bbio = btrfs_bio_alloc(INLINE_EXTENT_BUFFER_PAGES, + REQ_OP_READ | REQ_META, eb->fs_info, + end_bio_extent_readpage, NULL); + bbio->bio.bi_iter.bi_sector = eb->start >> SECTOR_SHIFT; + bbio->inode = BTRFS_I(eb->fs_info->btree_inode); + bbio->file_offset = eb->start; + memcpy(&bbio->parent_check, check, sizeof(*check)); + if (eb->fs_info->nodesize < PAGE_SIZE) { + __bio_add_page(&bbio->bio, eb->pages[0], eb->len, + eb->start - page_offset(eb->pages[0])); + } else { + for (i = 0; i < num_pages; i++) { + ClearPageError(eb->pages[i]); + __bio_add_page(&bbio->bio, eb->pages[i], PAGE_SIZE, 0); + } + } + btrfs_submit_bio(bbio, mirror_num); +} + static int read_extent_buffer_subpage(struct extent_buffer *eb, int wait, int mirror_num, struct btrfs_tree_parent_check *check) @@ -4251,11 +4259,6 @@ static int read_extent_buffer_subpage(struct extent_buffer *eb, int wait, struct extent_io_tree *io_tree; struct page *page = eb->pages[0]; struct extent_state *cached_state = NULL; - struct btrfs_bio_ctrl bio_ctrl = { - .opf = REQ_OP_READ, - .mirror_num = mirror_num, - .parent_check = check, - }; int ret; ASSERT(!test_bit(EXTENT_BUFFER_UNMAPPED, &eb->bflags)); @@ -4283,18 +4286,10 @@ static int read_extent_buffer_subpage(struct extent_buffer *eb, int wait, return 0; } - clear_bit(EXTENT_BUFFER_READ_ERR, &eb->bflags); - eb->read_mirror = 0; - atomic_set(&eb->io_pages, 1); - check_buffer_tree_ref(eb); - bio_ctrl.end_io_func = end_bio_extent_readpage; - btrfs_subpage_clear_error(fs_info, page, eb->start, eb->len); - btrfs_subpage_start_reader(fs_info, page, eb->start, eb->len); - submit_extent_page(&bio_ctrl, eb->start, page, eb->len, - eb->start - page_offset(page)); - submit_one_bio(&bio_ctrl); + + __read_extent_buffer_pages(eb, mirror_num, check); if (wait != WAIT_COMPLETE) { free_extent_state(cached_state); return 0; @@ -4315,11 +4310,6 @@ int read_extent_buffer_pages(struct extent_buffer *eb, int wait, int mirror_num, int locked_pages = 0; int all_uptodate = 1; int num_pages; - struct btrfs_bio_ctrl bio_ctrl = { - .opf = REQ_OP_READ, - .mirror_num = mirror_num, - .parent_check = check, - }; if (test_bit(EXTENT_BUFFER_UPTODATE, &eb->bflags)) return 0; @@ -4369,24 +4359,7 @@ int read_extent_buffer_pages(struct extent_buffer *eb, int wait, int mirror_num, goto unlock_exit; } - clear_bit(EXTENT_BUFFER_READ_ERR, &eb->bflags); - eb->read_mirror = 0; - atomic_set(&eb->io_pages, num_pages); - /* - * It is possible for release_folio to clear the TREE_REF bit before we - * set io_pages. See check_buffer_tree_ref for a more detailed comment. - */ - check_buffer_tree_ref(eb); - bio_ctrl.end_io_func = end_bio_extent_readpage; - for (i = 0; i < num_pages; i++) { - page = eb->pages[i]; - - ClearPageError(page); - submit_extent_page(&bio_ctrl, page_offset(page), page, - PAGE_SIZE, 0); - } - - submit_one_bio(&bio_ctrl); + __read_extent_buffer_pages(eb, mirror_num, check); if (wait != WAIT_COMPLETE) return 0; -- cgit v1.2.3 From e19493107675d48eb207b91d9a775c811f247e27 Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Wed, 3 May 2023 17:24:27 +0200 Subject: btrfs: remove the mirror_num argument to btrfs_submit_compressed_read Given that read recovery for data I/O is handled in the storage layer, the mirror_num argument to btrfs_submit_compressed_read is always 0, so remove it. Reviewed-by: Johannes Thumshirn Reviewed-by: Qu Wenruo Reviewed-by: Josef Bacik Signed-off-by: Christoph Hellwig Reviewed-by: David Sterba Signed-off-by: David Sterba --- fs/btrfs/compression.c | 4 ++-- fs/btrfs/compression.h | 2 +- fs/btrfs/extent_io.c | 2 +- 3 files changed, 4 insertions(+), 4 deletions(-) (limited to 'fs/btrfs/extent_io.c') diff --git a/fs/btrfs/compression.c b/fs/btrfs/compression.c index 14f5f25049a0..04cd5de4f00f 100644 --- a/fs/btrfs/compression.c +++ b/fs/btrfs/compression.c @@ -472,7 +472,7 @@ static noinline int add_ra_bio_pages(struct inode *inode, * After the compressed pages are read, we copy the bytes into the * bio we were passed and then call the bio end_io calls */ -void btrfs_submit_compressed_read(struct btrfs_bio *bbio, int mirror_num) +void btrfs_submit_compressed_read(struct btrfs_bio *bbio) { struct btrfs_inode *inode = bbio->inode; struct btrfs_fs_info *fs_info = inode->root->fs_info; @@ -538,7 +538,7 @@ void btrfs_submit_compressed_read(struct btrfs_bio *bbio, int mirror_num) if (memstall) psi_memstall_leave(&pflags); - btrfs_submit_bio(&cb->bbio, mirror_num); + btrfs_submit_bio(&cb->bbio, 0); return; out_free_compressed_pages: diff --git a/fs/btrfs/compression.h b/fs/btrfs/compression.h index 19ab2abeddc0..b462ab106f43 100644 --- a/fs/btrfs/compression.h +++ b/fs/btrfs/compression.h @@ -93,7 +93,7 @@ void btrfs_submit_compressed_write(struct btrfs_inode *inode, u64 start, unsigned int nr_pages, blk_opf_t write_flags, bool writeback); -void btrfs_submit_compressed_read(struct btrfs_bio *bbio, int mirror_num); +void btrfs_submit_compressed_read(struct btrfs_bio *bbio); unsigned int btrfs_compress_str2level(unsigned int type, const char *str); diff --git a/fs/btrfs/extent_io.c b/fs/btrfs/extent_io.c index 681e6774124f..ea99b2a3755e 100644 --- a/fs/btrfs/extent_io.c +++ b/fs/btrfs/extent_io.c @@ -126,7 +126,7 @@ static void submit_one_bio(struct btrfs_bio_ctrl *bio_ctrl) if (btrfs_op(&bbio->bio) == BTRFS_MAP_READ && bio_ctrl->compress_type != BTRFS_COMPRESS_NONE) - btrfs_submit_compressed_read(bbio, 0); + btrfs_submit_compressed_read(bbio); else btrfs_submit_bio(bbio, 0); -- cgit v1.2.3 From 046b562b20a5cfe205fb78c6f8a1a8b74f01d303 Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Wed, 3 May 2023 17:24:28 +0200 Subject: btrfs: use a separate end_io handler for read_extent_buffer Now that we always use a single bio to read an extent_buffer, the buffer can be passed to the end_io handler as private data. This allows implementing a much simplified dedicated end I/O handler for metadata reads. Reviewed-by: Johannes Thumshirn Reviewed-by: Qu Wenruo Reviewed-by: Josef Bacik Signed-off-by: Christoph Hellwig Signed-off-by: David Sterba --- fs/btrfs/disk-io.c | 105 +-------------------------------------------------- fs/btrfs/disk-io.h | 5 +-- fs/btrfs/extent_io.c | 80 ++++++++++++++++++--------------------- 3 files changed, 41 insertions(+), 149 deletions(-) (limited to 'fs/btrfs/extent_io.c') diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c index d4f7578cda00..db04a957b445 100644 --- a/fs/btrfs/disk-io.c +++ b/fs/btrfs/disk-io.c @@ -427,8 +427,8 @@ static int check_tree_block_fsid(struct extent_buffer *eb) } /* Do basic extent buffer checks at read time */ -static int validate_extent_buffer(struct extent_buffer *eb, - struct btrfs_tree_parent_check *check) +int btrfs_validate_extent_buffer(struct extent_buffer *eb, + struct btrfs_tree_parent_check *check) { struct btrfs_fs_info *fs_info = eb->fs_info; u64 found_start; @@ -541,107 +541,6 @@ out: return ret; } -static int validate_subpage_buffer(struct page *page, u64 start, u64 end, - int mirror, struct btrfs_tree_parent_check *check) -{ - struct btrfs_fs_info *fs_info = btrfs_sb(page->mapping->host->i_sb); - struct extent_buffer *eb; - bool reads_done; - int ret = 0; - - ASSERT(check); - - /* - * We don't allow bio merge for subpage metadata read, so we should - * only get one eb for each endio hook. - */ - ASSERT(end == start + fs_info->nodesize - 1); - ASSERT(PagePrivate(page)); - - eb = find_extent_buffer(fs_info, start); - /* - * When we are reading one tree block, eb must have been inserted into - * the radix tree. If not, something is wrong. - */ - ASSERT(eb); - - reads_done = atomic_dec_and_test(&eb->io_pages); - /* Subpage read must finish in page read */ - ASSERT(reads_done); - - eb->read_mirror = mirror; - if (test_bit(EXTENT_BUFFER_READ_ERR, &eb->bflags)) { - ret = -EIO; - goto err; - } - ret = validate_extent_buffer(eb, check); - if (ret < 0) - goto err; - - set_extent_buffer_uptodate(eb); - - free_extent_buffer(eb); - return ret; -err: - /* - * end_bio_extent_readpage decrements io_pages in case of error, - * make sure it has something to decrement. - */ - atomic_inc(&eb->io_pages); - clear_extent_buffer_uptodate(eb); - free_extent_buffer(eb); - return ret; -} - -int btrfs_validate_metadata_buffer(struct btrfs_bio *bbio, - struct page *page, u64 start, u64 end, - int mirror) -{ - struct extent_buffer *eb; - int ret = 0; - int reads_done; - - ASSERT(page->private); - - if (btrfs_sb(page->mapping->host->i_sb)->nodesize < PAGE_SIZE) - return validate_subpage_buffer(page, start, end, mirror, - &bbio->parent_check); - - eb = (struct extent_buffer *)page->private; - - /* - * The pending IO might have been the only thing that kept this buffer - * in memory. Make sure we have a ref for all this other checks - */ - atomic_inc(&eb->refs); - - reads_done = atomic_dec_and_test(&eb->io_pages); - if (!reads_done) - goto err; - - eb->read_mirror = mirror; - if (test_bit(EXTENT_BUFFER_READ_ERR, &eb->bflags)) { - ret = -EIO; - goto err; - } - ret = validate_extent_buffer(eb, &bbio->parent_check); - if (!ret) - set_extent_buffer_uptodate(eb); -err: - if (ret) { - /* - * our io error hook is going to dec the io pages - * again, we have to make sure it has something - * to decrement - */ - atomic_inc(&eb->io_pages); - clear_extent_buffer_uptodate(eb); - } - free_extent_buffer(eb); - - return ret; -} - #ifdef CONFIG_MIGRATION static int btree_migrate_folio(struct address_space *mapping, struct folio *dst, struct folio *src, enum migrate_mode mode) diff --git a/fs/btrfs/disk-io.h b/fs/btrfs/disk-io.h index a26ab3cbb449..b03767f4d7ed 100644 --- a/fs/btrfs/disk-io.h +++ b/fs/btrfs/disk-io.h @@ -82,9 +82,8 @@ void btrfs_btree_balance_dirty(struct btrfs_fs_info *fs_info); void btrfs_btree_balance_dirty_nodelay(struct btrfs_fs_info *fs_info); void btrfs_drop_and_free_fs_root(struct btrfs_fs_info *fs_info, struct btrfs_root *root); -int btrfs_validate_metadata_buffer(struct btrfs_bio *bbio, - struct page *page, u64 start, u64 end, - int mirror); +int btrfs_validate_extent_buffer(struct extent_buffer *eb, + struct btrfs_tree_parent_check *check); #ifdef CONFIG_BTRFS_FS_RUN_SANITY_TESTS struct btrfs_root *btrfs_alloc_dummy_root(struct btrfs_fs_info *fs_info); #endif diff --git a/fs/btrfs/extent_io.c b/fs/btrfs/extent_io.c index ea99b2a3755e..4581ee5a0835 100644 --- a/fs/btrfs/extent_io.c +++ b/fs/btrfs/extent_io.c @@ -663,35 +663,6 @@ static void begin_page_read(struct btrfs_fs_info *fs_info, struct page *page) btrfs_subpage_start_reader(fs_info, page, page_offset(page), PAGE_SIZE); } -/* - * Find extent buffer for a givne bytenr. - * - * This is for end_bio_extent_readpage(), thus we can't do any unsafe locking - * in endio context. - */ -static struct extent_buffer *find_extent_buffer_readpage( - struct btrfs_fs_info *fs_info, struct page *page, u64 bytenr) -{ - struct extent_buffer *eb; - - /* - * For regular sectorsize, we can use page->private to grab extent - * buffer - */ - if (fs_info->nodesize >= PAGE_SIZE) { - ASSERT(PagePrivate(page) && page->private); - return (struct extent_buffer *)page->private; - } - - /* For subpage case, we need to lookup buffer radix tree */ - rcu_read_lock(); - eb = radix_tree_lookup(&fs_info->buffer_radix, - bytenr >> fs_info->sectorsize_bits); - rcu_read_unlock(); - ASSERT(eb); - return eb; -} - /* * after a readpage IO is done, we need to: * clear the uptodate bits on error @@ -713,7 +684,6 @@ static void end_bio_extent_readpage(struct btrfs_bio *bbio) * larger than UINT_MAX, u32 here is enough. */ u32 bio_offset = 0; - int mirror; struct bvec_iter_all iter_all; ASSERT(!bio_flagged(bio, BIO_CLONED)); @@ -753,11 +723,6 @@ static void end_bio_extent_readpage(struct btrfs_bio *bbio) end = start + bvec->bv_len - 1; len = bvec->bv_len; - mirror = bbio->mirror_num; - if (uptodate && !is_data_inode(inode) && - btrfs_validate_metadata_buffer(bbio, page, start, end, mirror)) - uptodate = false; - if (likely(uptodate)) { loff_t i_size = i_size_read(inode); pgoff_t end_index = i_size >> PAGE_SHIFT; @@ -778,13 +743,6 @@ static void end_bio_extent_readpage(struct btrfs_bio *bbio) zero_user_segment(page, zero_start, offset_in_page(end) + 1); } - } else if (!is_data_inode(inode)) { - struct extent_buffer *eb; - - eb = find_extent_buffer_readpage(fs_info, page, start); - set_bit(EXTENT_BUFFER_READ_ERR, &eb->bflags); - eb->read_mirror = mirror; - atomic_dec(&eb->io_pages); } /* Update page status and unlock. */ @@ -4221,6 +4179,42 @@ void set_extent_buffer_uptodate(struct extent_buffer *eb) } } +static void extent_buffer_read_end_io(struct btrfs_bio *bbio) +{ + struct extent_buffer *eb = bbio->private; + bool uptodate = !bbio->bio.bi_status; + struct bvec_iter_all iter_all; + struct bio_vec *bvec; + u32 bio_offset = 0; + + atomic_inc(&eb->refs); + eb->read_mirror = bbio->mirror_num; + + if (uptodate && + btrfs_validate_extent_buffer(eb, &bbio->parent_check) < 0) + uptodate = false; + + if (uptodate) { + set_extent_buffer_uptodate(eb); + } else { + clear_extent_buffer_uptodate(eb); + set_bit(EXTENT_BUFFER_READ_ERR, &eb->bflags); + } + + bio_for_each_segment_all(bvec, &bbio->bio, iter_all) { + atomic_dec(&eb->io_pages); + end_page_read(bvec->bv_page, uptodate, eb->start + bio_offset, + bvec->bv_len); + bio_offset += bvec->bv_len; + } + + unlock_extent(&bbio->inode->io_tree, eb->start, + eb->start + bio_offset - 1, NULL); + free_extent_buffer(eb); + + bio_put(&bbio->bio); +} + static void __read_extent_buffer_pages(struct extent_buffer *eb, int mirror_num, struct btrfs_tree_parent_check *check) { @@ -4234,7 +4228,7 @@ static void __read_extent_buffer_pages(struct extent_buffer *eb, int mirror_num, bbio = btrfs_bio_alloc(INLINE_EXTENT_BUFFER_PAGES, REQ_OP_READ | REQ_META, eb->fs_info, - end_bio_extent_readpage, NULL); + extent_buffer_read_end_io, eb); bbio->bio.bi_iter.bi_sector = eb->start >> SECTOR_SHIFT; bbio->inode = BTRFS_I(eb->fs_info->btree_inode); bbio->file_offset = eb->start; -- cgit v1.2.3 From 3d66b4b27d2b7f5e74071256acea38108f9dbeb5 Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Wed, 3 May 2023 17:24:29 +0200 Subject: btrfs: do not try to unlock the extent for non-subpage metadata reads Only subpage metadata reads lock the extent. Don't try to unlock it and waste cycles in the extent tree lookup for PAGE_SIZE or larger metadata. Reviewed-by: Johannes Thumshirn Reviewed-by: Josef Bacik Signed-off-by: Christoph Hellwig Signed-off-by: David Sterba --- fs/btrfs/extent_io.c | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) (limited to 'fs/btrfs/extent_io.c') diff --git a/fs/btrfs/extent_io.c b/fs/btrfs/extent_io.c index 4581ee5a0835..e35dfae57e50 100644 --- a/fs/btrfs/extent_io.c +++ b/fs/btrfs/extent_io.c @@ -4208,8 +4208,10 @@ static void extent_buffer_read_end_io(struct btrfs_bio *bbio) bio_offset += bvec->bv_len; } - unlock_extent(&bbio->inode->io_tree, eb->start, - eb->start + bio_offset - 1, NULL); + if (eb->fs_info->nodesize < PAGE_SIZE) { + unlock_extent(&bbio->inode->io_tree, eb->start, + eb->start + bio_offset - 1, NULL); + } free_extent_buffer(eb); bio_put(&bbio->bio); -- cgit v1.2.3 From 9fdd160160f002ac2604c5b8f90598febad44ae7 Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Wed, 3 May 2023 17:24:30 +0200 Subject: btrfs: return bool from lock_extent_buffer_for_io lock_extent_buffer_for_io never returns a negative error value, so switch the return value to a simple bool. Reviewed-by: Johannes Thumshirn Reviewed-by: Josef Bacik Signed-off-by: Christoph Hellwig Reviewed-by: David Sterba [ keep noinline_for_stack ] Signed-off-by: David Sterba --- fs/btrfs/extent_io.c | 37 +++++++++++-------------------------- 1 file changed, 11 insertions(+), 26 deletions(-) (limited to 'fs/btrfs/extent_io.c') diff --git a/fs/btrfs/extent_io.c b/fs/btrfs/extent_io.c index e35dfae57e50..898c608c9b0b 100644 --- a/fs/btrfs/extent_io.c +++ b/fs/btrfs/extent_io.c @@ -1629,18 +1629,17 @@ static void end_extent_buffer_writeback(struct extent_buffer *eb) * * May try to flush write bio if we can't get the lock. * - * Return 0 if the extent buffer doesn't need to be submitted. - * (E.g. the extent buffer is not dirty) - * Return >0 is the extent buffer is submitted to bio. - * Return <0 if something went wrong, no page is locked. + * Return %false if the extent buffer doesn't need to be submitted (e.g. the + * extent buffer is not dirty) + * Return %true is the extent buffer is submitted to bio. */ -static noinline_for_stack int lock_extent_buffer_for_io(struct extent_buffer *eb, +static noinline_for_stack bool lock_extent_buffer_for_io(struct extent_buffer *eb, struct btrfs_bio_ctrl *bio_ctrl) { struct btrfs_fs_info *fs_info = eb->fs_info; int i, num_pages; int flush = 0; - int ret = 0; + bool ret = false; if (!btrfs_try_tree_write_lock(eb)) { submit_write_bio(bio_ctrl, 0); @@ -1651,7 +1650,7 @@ static noinline_for_stack int lock_extent_buffer_for_io(struct extent_buffer *eb if (test_bit(EXTENT_BUFFER_WRITEBACK, &eb->bflags)) { btrfs_tree_unlock(eb); if (bio_ctrl->wbc->sync_mode != WB_SYNC_ALL) - return 0; + return false; if (!flush) { submit_write_bio(bio_ctrl, 0); flush = 1; @@ -1678,7 +1677,7 @@ static noinline_for_stack int lock_extent_buffer_for_io(struct extent_buffer *eb percpu_counter_add_batch(&fs_info->dirty_metadata_bytes, -eb->len, fs_info->dirty_metadata_batch); - ret = 1; + ret = true; } else { spin_unlock(&eb->refs_lock); } @@ -2012,7 +2011,6 @@ static int submit_eb_subpage(struct page *page, struct btrfs_bio_ctrl *bio_ctrl) u64 page_start = page_offset(page); int bit_start = 0; int sectors_per_node = fs_info->nodesize >> fs_info->sectorsize_bits; - int ret; /* Lock and write each dirty extent buffers in the range */ while (bit_start < fs_info->subpage_info->bitmap_nr_bits) { @@ -2058,25 +2056,13 @@ static int submit_eb_subpage(struct page *page, struct btrfs_bio_ctrl *bio_ctrl) if (!eb) continue; - ret = lock_extent_buffer_for_io(eb, bio_ctrl); - if (ret == 0) { - free_extent_buffer(eb); - continue; + if (lock_extent_buffer_for_io(eb, bio_ctrl)) { + write_one_subpage_eb(eb, bio_ctrl); + submitted++; } - if (ret < 0) { - free_extent_buffer(eb); - goto cleanup; - } - write_one_subpage_eb(eb, bio_ctrl); free_extent_buffer(eb); - submitted++; } return submitted; - -cleanup: - /* We hit error, end bio for the submitted extent buffers */ - submit_write_bio(bio_ctrl, ret); - return ret; } /* @@ -2155,8 +2141,7 @@ static int submit_eb_page(struct page *page, struct btrfs_bio_ctrl *bio_ctrl, *eb_context = eb; - ret = lock_extent_buffer_for_io(eb, bio_ctrl); - if (ret <= 0) { + if (!lock_extent_buffer_for_io(eb, bio_ctrl)) { btrfs_revert_meta_write_pointer(cache, eb); if (cache) btrfs_put_block_group(cache); -- cgit v1.2.3 From 50b21d7a066f9a702b1d54ca11fc577302e875cb Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Wed, 3 May 2023 17:24:31 +0200 Subject: btrfs: submit a writeback bio per extent_buffer Stop trying to cluster writes of multiple extent_buffers into a single bio. There is no need for that as the blk_plug mechanism used all the way up in writeback_inodes_wb gives us the same I/O pattern even with multiple bios. Removing the clustering simplifies lock_extent_buffer_for_io a lot and will also allow passing the eb as private data to the end I/O handler. Reviewed-by: Johannes Thumshirn Reviewed-by: Josef Bacik Signed-off-by: Christoph Hellwig Reviewed-by: David Sterba Signed-off-by: David Sterba --- fs/btrfs/extent_io.c | 102 +++++++++++++++++++-------------------------------- 1 file changed, 37 insertions(+), 65 deletions(-) (limited to 'fs/btrfs/extent_io.c') diff --git a/fs/btrfs/extent_io.c b/fs/btrfs/extent_io.c index 898c608c9b0b..40b612cf80a9 100644 --- a/fs/btrfs/extent_io.c +++ b/fs/btrfs/extent_io.c @@ -1627,41 +1627,24 @@ static void end_extent_buffer_writeback(struct extent_buffer *eb) /* * Lock extent buffer status and pages for writeback. * - * May try to flush write bio if we can't get the lock. - * * Return %false if the extent buffer doesn't need to be submitted (e.g. the * extent buffer is not dirty) * Return %true is the extent buffer is submitted to bio. */ static noinline_for_stack bool lock_extent_buffer_for_io(struct extent_buffer *eb, - struct btrfs_bio_ctrl *bio_ctrl) + struct writeback_control *wbc) { struct btrfs_fs_info *fs_info = eb->fs_info; - int i, num_pages; - int flush = 0; bool ret = false; + int i; - if (!btrfs_try_tree_write_lock(eb)) { - submit_write_bio(bio_ctrl, 0); - flush = 1; - btrfs_tree_lock(eb); - } - - if (test_bit(EXTENT_BUFFER_WRITEBACK, &eb->bflags)) { + btrfs_tree_lock(eb); + while (test_bit(EXTENT_BUFFER_WRITEBACK, &eb->bflags)) { btrfs_tree_unlock(eb); - if (bio_ctrl->wbc->sync_mode != WB_SYNC_ALL) + if (wbc->sync_mode != WB_SYNC_ALL) return false; - if (!flush) { - submit_write_bio(bio_ctrl, 0); - flush = 1; - } - while (1) { - wait_on_extent_buffer_writeback(eb); - btrfs_tree_lock(eb); - if (!test_bit(EXTENT_BUFFER_WRITEBACK, &eb->bflags)) - break; - btrfs_tree_unlock(eb); - } + wait_on_extent_buffer_writeback(eb); + btrfs_tree_lock(eb); } /* @@ -1693,19 +1676,8 @@ static noinline_for_stack bool lock_extent_buffer_for_io(struct extent_buffer *e if (!ret || fs_info->nodesize < PAGE_SIZE) return ret; - num_pages = num_extent_pages(eb); - for (i = 0; i < num_pages; i++) { - struct page *p = eb->pages[i]; - - if (!trylock_page(p)) { - if (!flush) { - submit_write_bio(bio_ctrl, 0); - flush = 1; - } - lock_page(p); - } - } - + for (i = 0; i < num_extent_pages(eb); i++) + lock_page(eb->pages[i]); return ret; } @@ -1936,11 +1908,16 @@ static void prepare_eb_write(struct extent_buffer *eb) * Page locking is only utilized at minimum to keep the VMM code happy. */ static void write_one_subpage_eb(struct extent_buffer *eb, - struct btrfs_bio_ctrl *bio_ctrl) + struct writeback_control *wbc) { struct btrfs_fs_info *fs_info = eb->fs_info; struct page *page = eb->pages[0]; bool no_dirty_ebs = false; + struct btrfs_bio_ctrl bio_ctrl = { + .wbc = wbc, + .opf = REQ_OP_WRITE | wbc_to_write_flags(wbc), + .end_io_func = end_bio_subpage_eb_writepage, + }; prepare_eb_write(eb); @@ -1954,40 +1931,43 @@ static void write_one_subpage_eb(struct extent_buffer *eb, if (no_dirty_ebs) clear_page_dirty_for_io(page); - bio_ctrl->end_io_func = end_bio_subpage_eb_writepage; - - submit_extent_page(bio_ctrl, eb->start, page, eb->len, + submit_extent_page(&bio_ctrl, eb->start, page, eb->len, eb->start - page_offset(page)); unlock_page(page); + submit_one_bio(&bio_ctrl); /* * Submission finished without problem, if no range of the page is * dirty anymore, we have submitted a page. Update nr_written in wbc. */ if (no_dirty_ebs) - bio_ctrl->wbc->nr_to_write--; + wbc->nr_to_write--; } static noinline_for_stack void write_one_eb(struct extent_buffer *eb, - struct btrfs_bio_ctrl *bio_ctrl) + struct writeback_control *wbc) { u64 disk_bytenr = eb->start; int i, num_pages; + struct btrfs_bio_ctrl bio_ctrl = { + .wbc = wbc, + .opf = REQ_OP_WRITE | wbc_to_write_flags(wbc), + .end_io_func = end_bio_extent_buffer_writepage, + }; prepare_eb_write(eb); - bio_ctrl->end_io_func = end_bio_extent_buffer_writepage; - num_pages = num_extent_pages(eb); for (i = 0; i < num_pages; i++) { struct page *p = eb->pages[i]; clear_page_dirty_for_io(p); set_page_writeback(p); - submit_extent_page(bio_ctrl, disk_bytenr, p, PAGE_SIZE, 0); + submit_extent_page(&bio_ctrl, disk_bytenr, p, PAGE_SIZE, 0); disk_bytenr += PAGE_SIZE; - bio_ctrl->wbc->nr_to_write--; + wbc->nr_to_write--; unlock_page(p); } + submit_one_bio(&bio_ctrl); } /* @@ -2004,7 +1984,7 @@ static noinline_for_stack void write_one_eb(struct extent_buffer *eb, * Return >=0 for the number of submitted extent buffers. * Return <0 for fatal error. */ -static int submit_eb_subpage(struct page *page, struct btrfs_bio_ctrl *bio_ctrl) +static int submit_eb_subpage(struct page *page, struct writeback_control *wbc) { struct btrfs_fs_info *fs_info = btrfs_sb(page->mapping->host->i_sb); int submitted = 0; @@ -2056,8 +2036,8 @@ static int submit_eb_subpage(struct page *page, struct btrfs_bio_ctrl *bio_ctrl) if (!eb) continue; - if (lock_extent_buffer_for_io(eb, bio_ctrl)) { - write_one_subpage_eb(eb, bio_ctrl); + if (lock_extent_buffer_for_io(eb, wbc)) { + write_one_subpage_eb(eb, wbc); submitted++; } free_extent_buffer(eb); @@ -2085,7 +2065,7 @@ static int submit_eb_subpage(struct page *page, struct btrfs_bio_ctrl *bio_ctrl) * previous call. * Return <0 for fatal error. */ -static int submit_eb_page(struct page *page, struct btrfs_bio_ctrl *bio_ctrl, +static int submit_eb_page(struct page *page, struct writeback_control *wbc, struct extent_buffer **eb_context) { struct address_space *mapping = page->mapping; @@ -2097,7 +2077,7 @@ static int submit_eb_page(struct page *page, struct btrfs_bio_ctrl *bio_ctrl, return 0; if (btrfs_sb(page->mapping->host->i_sb)->nodesize < PAGE_SIZE) - return submit_eb_subpage(page, bio_ctrl); + return submit_eb_subpage(page, wbc); spin_lock(&mapping->private_lock); if (!PagePrivate(page)) { @@ -2130,8 +2110,7 @@ static int submit_eb_page(struct page *page, struct btrfs_bio_ctrl *bio_ctrl, * If for_sync, this hole will be filled with * trasnsaction commit. */ - if (bio_ctrl->wbc->sync_mode == WB_SYNC_ALL && - !bio_ctrl->wbc->for_sync) + if (wbc->sync_mode == WB_SYNC_ALL && !wbc->for_sync) ret = -EAGAIN; else ret = 0; @@ -2141,12 +2120,12 @@ static int submit_eb_page(struct page *page, struct btrfs_bio_ctrl *bio_ctrl, *eb_context = eb; - if (!lock_extent_buffer_for_io(eb, bio_ctrl)) { + if (!lock_extent_buffer_for_io(eb, wbc)) { btrfs_revert_meta_write_pointer(cache, eb); if (cache) btrfs_put_block_group(cache); free_extent_buffer(eb); - return ret; + return 0; } if (cache) { /* @@ -2155,7 +2134,7 @@ static int submit_eb_page(struct page *page, struct btrfs_bio_ctrl *bio_ctrl, btrfs_schedule_zone_finish_bg(cache, eb); btrfs_put_block_group(cache); } - write_one_eb(eb, bio_ctrl); + write_one_eb(eb, wbc); free_extent_buffer(eb); return 1; } @@ -2164,11 +2143,6 @@ int btree_write_cache_pages(struct address_space *mapping, struct writeback_control *wbc) { struct extent_buffer *eb_context = NULL; - struct btrfs_bio_ctrl bio_ctrl = { - .wbc = wbc, - .opf = REQ_OP_WRITE | wbc_to_write_flags(wbc), - .extent_locked = 0, - }; struct btrfs_fs_info *fs_info = BTRFS_I(mapping->host)->root->fs_info; int ret = 0; int done = 0; @@ -2210,7 +2184,7 @@ retry: for (i = 0; i < nr_folios; i++) { struct folio *folio = fbatch.folios[i]; - ret = submit_eb_page(&folio->page, &bio_ctrl, &eb_context); + ret = submit_eb_page(&folio->page, wbc, &eb_context); if (ret == 0) continue; if (ret < 0) { @@ -2271,8 +2245,6 @@ retry: ret = 0; if (!ret && BTRFS_FS_ERROR(fs_info)) ret = -EROFS; - submit_write_bio(&bio_ctrl, ret); - btrfs_zoned_meta_io_unlock(fs_info); return ret; } -- cgit v1.2.3 From 81a79b6ae4519a97c5924d68e1fb77d283cb051d Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Wed, 3 May 2023 17:24:32 +0200 Subject: btrfs: move page locking from lock_extent_buffer_for_io to write_one_eb Locking the pages in lock_extent_buffer_for_io only for the non-subpage case is very confusing. Move it to write_one_eb to mirror the subpage case and simplify the code. Now lock_extent_buffer_for_io does not leave all the pages locked and each is individually locked/unlocked in write_one_eb. Reviewed-by: Johannes Thumshirn Reviewed-by: Josef Bacik Signed-off-by: Christoph Hellwig Reviewed-by: David Sterba Signed-off-by: David Sterba --- fs/btrfs/extent_io.c | 15 +-------------- 1 file changed, 1 insertion(+), 14 deletions(-) (limited to 'fs/btrfs/extent_io.c') diff --git a/fs/btrfs/extent_io.c b/fs/btrfs/extent_io.c index 40b612cf80a9..6284dfd43c91 100644 --- a/fs/btrfs/extent_io.c +++ b/fs/btrfs/extent_io.c @@ -1636,7 +1636,6 @@ static noinline_for_stack bool lock_extent_buffer_for_io(struct extent_buffer *e { struct btrfs_fs_info *fs_info = eb->fs_info; bool ret = false; - int i; btrfs_tree_lock(eb); while (test_bit(EXTENT_BUFFER_WRITEBACK, &eb->bflags)) { @@ -1664,20 +1663,7 @@ static noinline_for_stack bool lock_extent_buffer_for_io(struct extent_buffer *e } else { spin_unlock(&eb->refs_lock); } - btrfs_tree_unlock(eb); - - /* - * Either we don't need to submit any tree block, or we're submitting - * subpage eb. - * Subpage metadata doesn't use page locking at all, so we can skip - * the page locking. - */ - if (!ret || fs_info->nodesize < PAGE_SIZE) - return ret; - - for (i = 0; i < num_extent_pages(eb); i++) - lock_page(eb->pages[i]); return ret; } @@ -1960,6 +1946,7 @@ static noinline_for_stack void write_one_eb(struct extent_buffer *eb, for (i = 0; i < num_pages; i++) { struct page *p = eb->pages[i]; + lock_page(p); clear_page_dirty_for_io(p); set_page_writeback(p); submit_extent_page(&bio_ctrl, disk_bytenr, p, PAGE_SIZE, 0); -- cgit v1.2.3 From b51e6b4bda5bddbd002ace239eb5eadd4d729039 Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Wed, 3 May 2023 17:24:33 +0200 Subject: btrfs: don't use btrfs_bio_ctrl for extent buffer writing The btrfs_bio_ctrl machinery is overkill for writing extent_buffers as we always operate on PAGE_SIZE chunks (or one smaller one for the subpage case) that are contiguous and are guaranteed to fit into a single bio. Replace it with open coded btrfs_bio_alloc, __bio_add_page and btrfs_submit_bio calls. Reviewed-by: Johannes Thumshirn Reviewed-by: Qu Wenruo Reviewed-by: Josef Bacik Signed-off-by: Christoph Hellwig Reviewed-by: David Sterba Signed-off-by: David Sterba --- fs/btrfs/extent_io.c | 45 +++++++++++++++++++++++++-------------------- 1 file changed, 25 insertions(+), 20 deletions(-) (limited to 'fs/btrfs/extent_io.c') diff --git a/fs/btrfs/extent_io.c b/fs/btrfs/extent_io.c index 6284dfd43c91..3d60dde197f1 100644 --- a/fs/btrfs/extent_io.c +++ b/fs/btrfs/extent_io.c @@ -121,9 +121,6 @@ static void submit_one_bio(struct btrfs_bio_ctrl *bio_ctrl) /* Caller should ensure the bio has at least some range added */ ASSERT(bbio->bio.bi_iter.bi_size); - if (!is_data_inode(&bbio->inode->vfs_inode)) - bbio->bio.bi_opf |= REQ_META; - if (btrfs_op(&bbio->bio) == BTRFS_MAP_READ && bio_ctrl->compress_type != BTRFS_COMPRESS_NONE) btrfs_submit_compressed_read(bbio); @@ -1899,11 +1896,7 @@ static void write_one_subpage_eb(struct extent_buffer *eb, struct btrfs_fs_info *fs_info = eb->fs_info; struct page *page = eb->pages[0]; bool no_dirty_ebs = false; - struct btrfs_bio_ctrl bio_ctrl = { - .wbc = wbc, - .opf = REQ_OP_WRITE | wbc_to_write_flags(wbc), - .end_io_func = end_bio_subpage_eb_writepage, - }; + struct btrfs_bio *bbio; prepare_eb_write(eb); @@ -1917,10 +1910,17 @@ static void write_one_subpage_eb(struct extent_buffer *eb, if (no_dirty_ebs) clear_page_dirty_for_io(page); - submit_extent_page(&bio_ctrl, eb->start, page, eb->len, - eb->start - page_offset(page)); + bbio = btrfs_bio_alloc(INLINE_EXTENT_BUFFER_PAGES, + REQ_OP_WRITE | REQ_META | wbc_to_write_flags(wbc), + eb->fs_info, end_bio_subpage_eb_writepage, NULL); + bbio->bio.bi_iter.bi_sector = eb->start >> SECTOR_SHIFT; + bbio->inode = BTRFS_I(eb->fs_info->btree_inode); + bbio->file_offset = eb->start; + __bio_add_page(&bbio->bio, page, eb->len, eb->start - page_offset(page)); + wbc_account_cgroup_owner(wbc, page, eb->len); unlock_page(page); - submit_one_bio(&bio_ctrl); + btrfs_submit_bio(bbio, 0); + /* * Submission finished without problem, if no range of the page is * dirty anymore, we have submitted a page. Update nr_written in wbc. @@ -1932,16 +1932,21 @@ static void write_one_subpage_eb(struct extent_buffer *eb, static noinline_for_stack void write_one_eb(struct extent_buffer *eb, struct writeback_control *wbc) { - u64 disk_bytenr = eb->start; + struct btrfs_bio *bbio; int i, num_pages; - struct btrfs_bio_ctrl bio_ctrl = { - .wbc = wbc, - .opf = REQ_OP_WRITE | wbc_to_write_flags(wbc), - .end_io_func = end_bio_extent_buffer_writepage, - }; prepare_eb_write(eb); + bbio = btrfs_bio_alloc(INLINE_EXTENT_BUFFER_PAGES, + REQ_OP_WRITE | REQ_META | wbc_to_write_flags(wbc), + eb->fs_info, end_bio_extent_buffer_writepage, + NULL); + bbio->bio.bi_iter.bi_sector = eb->start >> SECTOR_SHIFT; + bio_set_dev(&bbio->bio, eb->fs_info->fs_devices->latest_dev->bdev); + wbc_init_bio(wbc, &bbio->bio); + bbio->inode = BTRFS_I(eb->fs_info->btree_inode); + bbio->file_offset = eb->start; + num_pages = num_extent_pages(eb); for (i = 0; i < num_pages; i++) { struct page *p = eb->pages[i]; @@ -1949,12 +1954,12 @@ static noinline_for_stack void write_one_eb(struct extent_buffer *eb, lock_page(p); clear_page_dirty_for_io(p); set_page_writeback(p); - submit_extent_page(&bio_ctrl, disk_bytenr, p, PAGE_SIZE, 0); - disk_bytenr += PAGE_SIZE; + __bio_add_page(&bbio->bio, p, PAGE_SIZE, 0); + wbc_account_cgroup_owner(wbc, p, PAGE_SIZE); wbc->nr_to_write--; unlock_page(p); } - submit_one_bio(&bio_ctrl); + btrfs_submit_bio(bbio, 0); } /* -- cgit v1.2.3 From cd88a4fdbf1e3d55d852dd67c38155ee3ee3469f Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Wed, 3 May 2023 17:24:34 +0200 Subject: btrfs: use a separate end_io handler for extent_buffer writing Now that we always use a single bio to write an extent_buffer, the buffer can be passed to the end_io handler as private data. This allows to simplify the metadata write end I/O handler, and merge the subpage end_io handler into the main one. Reviewed-by: Johannes Thumshirn Reviewed-by: Josef Bacik Signed-off-by: Christoph Hellwig Reviewed-by: David Sterba Signed-off-by: David Sterba --- fs/btrfs/extent_io.c | 126 +++++++++++---------------------------------------- 1 file changed, 26 insertions(+), 100 deletions(-) (limited to 'fs/btrfs/extent_io.c') diff --git a/fs/btrfs/extent_io.c b/fs/btrfs/extent_io.c index 3d60dde197f1..eebcef557bd8 100644 --- a/fs/btrfs/extent_io.c +++ b/fs/btrfs/extent_io.c @@ -1614,13 +1614,6 @@ void wait_on_extent_buffer_writeback(struct extent_buffer *eb) TASK_UNINTERRUPTIBLE); } -static void end_extent_buffer_writeback(struct extent_buffer *eb) -{ - clear_bit(EXTENT_BUFFER_WRITEBACK, &eb->bflags); - smp_mb__after_atomic(); - wake_up_bit(&eb->bflags, EXTENT_BUFFER_WRITEBACK); -} - /* * Lock extent buffer status and pages for writeback. * @@ -1664,13 +1657,11 @@ static noinline_for_stack bool lock_extent_buffer_for_io(struct extent_buffer *e return ret; } -static void set_btree_ioerr(struct page *page, struct extent_buffer *eb) +static void set_btree_ioerr(struct extent_buffer *eb) { struct btrfs_fs_info *fs_info = eb->fs_info; - btrfs_page_set_error(fs_info, page, eb->start, eb->len); - if (test_and_set_bit(EXTENT_BUFFER_WRITE_ERR, &eb->bflags)) - return; + set_bit(EXTENT_BUFFER_WRITE_ERR, &eb->bflags); /* * A read may stumble upon this buffer later, make sure that it gets an @@ -1684,7 +1675,7 @@ static void set_btree_ioerr(struct page *page, struct extent_buffer *eb) * return a 0 because we are readonly if we don't modify the err seq for * the superblock. */ - mapping_set_error(page->mapping, -EIO); + mapping_set_error(eb->fs_info->btree_inode->i_mapping, -EIO); /* * If writeback for a btree extent that doesn't belong to a log tree @@ -1759,102 +1750,38 @@ static struct extent_buffer *find_extent_buffer_nolock( return NULL; } -/* - * The endio function for subpage extent buffer write. - * - * Unlike end_bio_extent_buffer_writepage(), we only call end_page_writeback() - * after all extent buffers in the page has finished their writeback. - */ -static void end_bio_subpage_eb_writepage(struct btrfs_bio *bbio) +static void extent_buffer_write_end_io(struct btrfs_bio *bbio) { - struct bio *bio = &bbio->bio; - struct btrfs_fs_info *fs_info; - struct bio_vec *bvec; + struct extent_buffer *eb = bbio->private; + struct btrfs_fs_info *fs_info = eb->fs_info; + bool uptodate = !bbio->bio.bi_status; struct bvec_iter_all iter_all; + struct bio_vec *bvec; + u32 bio_offset = 0; - fs_info = btrfs_sb(bio_first_page_all(bio)->mapping->host->i_sb); - ASSERT(fs_info->nodesize < PAGE_SIZE); + if (!uptodate) + set_btree_ioerr(eb); - ASSERT(!bio_flagged(bio, BIO_CLONED)); - bio_for_each_segment_all(bvec, bio, iter_all) { + bio_for_each_segment_all(bvec, &bbio->bio, iter_all) { + u64 start = eb->start + bio_offset; struct page *page = bvec->bv_page; - u64 bvec_start = page_offset(page) + bvec->bv_offset; - u64 bvec_end = bvec_start + bvec->bv_len - 1; - u64 cur_bytenr = bvec_start; - - ASSERT(IS_ALIGNED(bvec->bv_len, fs_info->nodesize)); + u32 len = bvec->bv_len; - /* Iterate through all extent buffers in the range */ - while (cur_bytenr <= bvec_end) { - struct extent_buffer *eb; - int done; - - /* - * Here we can't use find_extent_buffer(), as it may - * try to lock eb->refs_lock, which is not safe in endio - * context. - */ - eb = find_extent_buffer_nolock(fs_info, cur_bytenr); - ASSERT(eb); - - cur_bytenr = eb->start + eb->len; - - ASSERT(test_bit(EXTENT_BUFFER_WRITEBACK, &eb->bflags)); - done = atomic_dec_and_test(&eb->io_pages); - ASSERT(done); - - if (bio->bi_status || - test_bit(EXTENT_BUFFER_WRITE_ERR, &eb->bflags)) { - btrfs_page_clear_uptodate(fs_info, page, - eb->start, eb->len); - set_btree_ioerr(page, eb); - } + atomic_dec(&eb->io_pages); - btrfs_subpage_clear_writeback(fs_info, page, eb->start, - eb->len); - end_extent_buffer_writeback(eb); - /* - * free_extent_buffer() will grab spinlock which is not - * safe in endio context. Thus here we manually dec - * the ref. - */ - atomic_dec(&eb->refs); + if (!uptodate) { + btrfs_page_clear_uptodate(fs_info, page, start, len); + btrfs_page_set_error(fs_info, page, start, len); } + btrfs_page_clear_writeback(fs_info, page, start, len); + bio_offset += len; } - bio_put(bio); -} - -static void end_bio_extent_buffer_writepage(struct btrfs_bio *bbio) -{ - struct bio *bio = &bbio->bio; - struct bio_vec *bvec; - struct extent_buffer *eb; - int done; - struct bvec_iter_all iter_all; - - ASSERT(!bio_flagged(bio, BIO_CLONED)); - bio_for_each_segment_all(bvec, bio, iter_all) { - struct page *page = bvec->bv_page; - - eb = (struct extent_buffer *)page->private; - BUG_ON(!eb); - done = atomic_dec_and_test(&eb->io_pages); - - if (bio->bi_status || - test_bit(EXTENT_BUFFER_WRITE_ERR, &eb->bflags)) { - ClearPageUptodate(page); - set_btree_ioerr(page, eb); - } - - end_page_writeback(page); - if (!done) - continue; - - end_extent_buffer_writeback(eb); - } + clear_bit(EXTENT_BUFFER_WRITEBACK, &eb->bflags); + smp_mb__after_atomic(); + wake_up_bit(&eb->bflags, EXTENT_BUFFER_WRITEBACK); - bio_put(bio); + bio_put(&bbio->bio); } static void prepare_eb_write(struct extent_buffer *eb) @@ -1912,7 +1839,7 @@ static void write_one_subpage_eb(struct extent_buffer *eb, bbio = btrfs_bio_alloc(INLINE_EXTENT_BUFFER_PAGES, REQ_OP_WRITE | REQ_META | wbc_to_write_flags(wbc), - eb->fs_info, end_bio_subpage_eb_writepage, NULL); + eb->fs_info, extent_buffer_write_end_io, eb); bbio->bio.bi_iter.bi_sector = eb->start >> SECTOR_SHIFT; bbio->inode = BTRFS_I(eb->fs_info->btree_inode); bbio->file_offset = eb->start; @@ -1939,8 +1866,7 @@ static noinline_for_stack void write_one_eb(struct extent_buffer *eb, bbio = btrfs_bio_alloc(INLINE_EXTENT_BUFFER_PAGES, REQ_OP_WRITE | REQ_META | wbc_to_write_flags(wbc), - eb->fs_info, end_bio_extent_buffer_writepage, - NULL); + eb->fs_info, extent_buffer_write_end_io, eb); bbio->bio.bi_iter.bi_sector = eb->start >> SECTOR_SHIFT; bio_set_dev(&bbio->bio, eb->fs_info->fs_devices->latest_dev->bdev); wbc_init_bio(wbc, &bbio->bio); -- cgit v1.2.3 From 113fa05c2fa1a9ee45a31909e6d78666e426adca Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Wed, 3 May 2023 17:24:36 +0200 Subject: btrfs: remove the io_pages field in struct extent_buffer No need to track the number of pages under I/O now that each extent_buffer is read and written using a single bio. For the read side we need to grab an extra reference for the duration of the I/O to prevent eviction, though. Reviewed-by: Johannes Thumshirn Reviewed-by: Josef Bacik Signed-off-by: Christoph Hellwig Reviewed-by: David Sterba Signed-off-by: David Sterba --- fs/btrfs/extent_io.c | 17 +++++------------ fs/btrfs/extent_io.h | 1 - 2 files changed, 5 insertions(+), 13 deletions(-) (limited to 'fs/btrfs/extent_io.c') diff --git a/fs/btrfs/extent_io.c b/fs/btrfs/extent_io.c index eebcef557bd8..d8e6c8d5ed98 100644 --- a/fs/btrfs/extent_io.c +++ b/fs/btrfs/extent_io.c @@ -1767,8 +1767,6 @@ static void extent_buffer_write_end_io(struct btrfs_bio *bbio) struct page *page = bvec->bv_page; u32 len = bvec->bv_len; - atomic_dec(&eb->io_pages); - if (!uptodate) { btrfs_page_clear_uptodate(fs_info, page, start, len); btrfs_page_set_error(fs_info, page, start, len); @@ -1791,7 +1789,6 @@ static void prepare_eb_write(struct extent_buffer *eb) unsigned long end; clear_bit(EXTENT_BUFFER_WRITE_ERR, &eb->bflags); - atomic_set(&eb->io_pages, num_extent_pages(eb)); /* Set btree blocks beyond nritems with 0 to avoid stale content */ nritems = btrfs_header_nritems(eb); @@ -3235,8 +3232,7 @@ static void __free_extent_buffer(struct extent_buffer *eb) static int extent_buffer_under_io(const struct extent_buffer *eb) { - return (atomic_read(&eb->io_pages) || - test_bit(EXTENT_BUFFER_WRITEBACK, &eb->bflags) || + return (test_bit(EXTENT_BUFFER_WRITEBACK, &eb->bflags) || test_bit(EXTENT_BUFFER_DIRTY, &eb->bflags)); } @@ -3372,7 +3368,6 @@ __alloc_extent_buffer(struct btrfs_fs_info *fs_info, u64 start, spin_lock_init(&eb->refs_lock); atomic_set(&eb->refs, 1); - atomic_set(&eb->io_pages, 0); ASSERT(len <= BTRFS_MAX_METADATA_BLOCKSIZE); @@ -3489,9 +3484,9 @@ static void check_buffer_tree_ref(struct extent_buffer *eb) * adequately protected by the refcount, but the TREE_REF bit and * its corresponding reference are not. To protect against this * class of races, we call check_buffer_tree_ref from the codepaths - * which trigger io after they set eb->io_pages. Note that once io is - * initiated, TREE_REF can no longer be cleared, so that is the - * moment at which any such race is best fixed. + * which trigger io. Note that once io is initiated, TREE_REF can no + * longer be cleared, so that is the moment at which any such race is + * best fixed. */ refs = atomic_read(&eb->refs); if (refs >= 2 && test_bit(EXTENT_BUFFER_TREE_REF, &eb->bflags)) @@ -4062,7 +4057,6 @@ static void extent_buffer_read_end_io(struct btrfs_bio *bbio) struct bio_vec *bvec; u32 bio_offset = 0; - atomic_inc(&eb->refs); eb->read_mirror = bbio->mirror_num; if (uptodate && @@ -4077,7 +4071,6 @@ static void extent_buffer_read_end_io(struct btrfs_bio *bbio) } bio_for_each_segment_all(bvec, &bbio->bio, iter_all) { - atomic_dec(&eb->io_pages); end_page_read(bvec->bv_page, uptodate, eb->start + bio_offset, bvec->bv_len); bio_offset += bvec->bv_len; @@ -4100,8 +4093,8 @@ static void __read_extent_buffer_pages(struct extent_buffer *eb, int mirror_num, clear_bit(EXTENT_BUFFER_READ_ERR, &eb->bflags); eb->read_mirror = 0; - atomic_set(&eb->io_pages, num_pages); check_buffer_tree_ref(eb); + atomic_inc(&eb->refs); bbio = btrfs_bio_alloc(INLINE_EXTENT_BUFFER_PAGES, REQ_OP_READ | REQ_META, eb->fs_info, diff --git a/fs/btrfs/extent_io.h b/fs/btrfs/extent_io.h index e11ee09ab26b..217c433bb999 100644 --- a/fs/btrfs/extent_io.h +++ b/fs/btrfs/extent_io.h @@ -79,7 +79,6 @@ struct extent_buffer { struct btrfs_fs_info *fs_info; spinlock_t refs_lock; atomic_t refs; - atomic_t io_pages; int read_mirror; struct rcu_head rcu_head; pid_t lock_owner; -- cgit v1.2.3 From 011134f444dcc4192b9945b963d4b8340db3667d Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Wed, 3 May 2023 17:24:37 +0200 Subject: btrfs: stop using PageError for extent_buffers PageError is only used to limit the uptodate check in assert_eb_page_uptodate. But we have a much more useful flag indicating the exact condition we are about with the EXTENT_BUFFER_WRITE_ERR flag, so use that instead and help the kernel toward eventually removing PageError. Reviewed-by: Johannes Thumshirn Reviewed-by: Josef Bacik Signed-off-by: Christoph Hellwig Reviewed-by: David Sterba Signed-off-by: David Sterba --- fs/btrfs/extent_io.c | 26 +++++++++----------------- 1 file changed, 9 insertions(+), 17 deletions(-) (limited to 'fs/btrfs/extent_io.c') diff --git a/fs/btrfs/extent_io.c b/fs/btrfs/extent_io.c index d8e6c8d5ed98..16e439fb3d80 100644 --- a/fs/btrfs/extent_io.c +++ b/fs/btrfs/extent_io.c @@ -1767,10 +1767,8 @@ static void extent_buffer_write_end_io(struct btrfs_bio *bbio) struct page *page = bvec->bv_page; u32 len = bvec->bv_len; - if (!uptodate) { + if (!uptodate) btrfs_page_clear_uptodate(fs_info, page, start, len); - btrfs_page_set_error(fs_info, page, start, len); - } btrfs_page_clear_writeback(fs_info, page, start, len); bio_offset += len; } @@ -3948,7 +3946,6 @@ void btrfs_clear_buffer_dirty(struct btrfs_trans_handle *trans, continue; lock_page(page); btree_clear_page_dirty(page); - ClearPageError(page); unlock_page(page); } WARN_ON(atomic_read(&eb->refs) == 0); @@ -4107,10 +4104,8 @@ static void __read_extent_buffer_pages(struct extent_buffer *eb, int mirror_num, __bio_add_page(&bbio->bio, eb->pages[0], eb->len, eb->start - page_offset(eb->pages[0])); } else { - for (i = 0; i < num_pages; i++) { - ClearPageError(eb->pages[i]); + for (i = 0; i < num_pages; i++) __bio_add_page(&bbio->bio, eb->pages[i], PAGE_SIZE, 0); - } } btrfs_submit_bio(bbio, mirror_num); } @@ -4150,7 +4145,6 @@ static int read_extent_buffer_subpage(struct extent_buffer *eb, int wait, return 0; } - btrfs_subpage_clear_error(fs_info, page, eb->start, eb->len); btrfs_subpage_start_reader(fs_info, page, eb->start, eb->len); __read_extent_buffer_pages(eb, mirror_num, check); @@ -4392,18 +4386,16 @@ static void assert_eb_page_uptodate(const struct extent_buffer *eb, * looked up. We don't want to complain in this case, as the page was * valid before, we just didn't write it out. Instead we want to catch * the case where we didn't actually read the block properly, which - * would have !PageUptodate && !PageError, as we clear PageError before - * reading. + * would have !PageUptodate and !EXTENT_BUFFER_WRITE_ERR. */ - if (fs_info->nodesize < PAGE_SIZE) { - bool uptodate, error; + if (test_bit(EXTENT_BUFFER_WRITE_ERR, &eb->bflags)) + return; - uptodate = btrfs_subpage_test_uptodate(fs_info, page, - eb->start, eb->len); - error = btrfs_subpage_test_error(fs_info, page, eb->start, eb->len); - WARN_ON(!uptodate && !error); + if (fs_info->nodesize < PAGE_SIZE) { + WARN_ON(!btrfs_subpage_test_uptodate(fs_info, page, + eb->start, eb->len)); } else { - WARN_ON(!PageUptodate(page) && !PageError(page)); + WARN_ON(!PageUptodate(page)); } } -- cgit v1.2.3 From f3d315eb9372868bce92e7b24f2b92e061fe6fcd Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Wed, 3 May 2023 17:24:38 +0200 Subject: btrfs: don't check for uptodate pages in read_extent_buffer_pages The only place that reads in pages and thus marks them uptodate for the btree inode is read_extent_buffer_pages. Which means that either pages are already uptodate from an old buffer when creating a new one in alloc_extent_buffer, or they will be updated by ca call to read_extent_buffer_pages. This means the checks for uptodate pages in read_extent_buffer_pages and read_extent_buffer_subpage are superfluous and can be removed. Reviewed-by: Johannes Thumshirn Reviewed-by: Josef Bacik Signed-off-by: Christoph Hellwig Reviewed-by: David Sterba Signed-off-by: David Sterba --- fs/btrfs/extent_io.c | 21 +-------------------- 1 file changed, 1 insertion(+), 20 deletions(-) (limited to 'fs/btrfs/extent_io.c') diff --git a/fs/btrfs/extent_io.c b/fs/btrfs/extent_io.c index 16e439fb3d80..40f06392a7cf 100644 --- a/fs/btrfs/extent_io.c +++ b/fs/btrfs/extent_io.c @@ -4136,10 +4136,7 @@ static int read_extent_buffer_subpage(struct extent_buffer *eb, int wait, return ret; } - if (test_bit(EXTENT_BUFFER_UPTODATE, &eb->bflags) || - PageUptodate(page) || - btrfs_subpage_test_uptodate(fs_info, page, eb->start, eb->len)) { - set_bit(EXTENT_BUFFER_UPTODATE, &eb->bflags); + if (test_bit(EXTENT_BUFFER_UPTODATE, &eb->bflags)) { unlock_extent(io_tree, eb->start, eb->start + eb->len - 1, &cached_state); return 0; @@ -4166,7 +4163,6 @@ int read_extent_buffer_pages(struct extent_buffer *eb, int wait, int mirror_num, int i; struct page *page; int locked_pages = 0; - int all_uptodate = 1; int num_pages; if (test_bit(EXTENT_BUFFER_UPTODATE, &eb->bflags)) @@ -4201,21 +4197,6 @@ int read_extent_buffer_pages(struct extent_buffer *eb, int wait, int mirror_num, } locked_pages++; } - /* - * We need to firstly lock all pages to make sure that - * the uptodate bit of our pages won't be affected by - * clear_extent_buffer_uptodate(). - */ - for (i = 0; i < num_pages; i++) { - page = eb->pages[i]; - if (!PageUptodate(page)) - all_uptodate = 0; - } - - if (all_uptodate) { - set_bit(EXTENT_BUFFER_UPTODATE, &eb->bflags); - goto unlock_exit; - } __read_extent_buffer_pages(eb, mirror_num, check); -- cgit v1.2.3 From d7172f52e9933b6ec9305e7fe6e829e3939dba04 Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Wed, 3 May 2023 17:24:40 +0200 Subject: btrfs: use per-buffer locking for extent_buffer reading Instead of locking and unlocking every page or the extent, just add a new EXTENT_BUFFER_READING bit that mirrors EXTENT_BUFFER_WRITEBACK for synchronizing threads trying to read an extent_buffer and to wait for I/O completion. Reviewed-by: Josef Bacik Signed-off-by: Christoph Hellwig Reviewed-by: David Sterba Signed-off-by: David Sterba --- fs/btrfs/extent_io.c | 150 ++++++++++++--------------------------------------- fs/btrfs/extent_io.h | 2 + 2 files changed, 37 insertions(+), 115 deletions(-) (limited to 'fs/btrfs/extent_io.c') diff --git a/fs/btrfs/extent_io.c b/fs/btrfs/extent_io.c index 40f06392a7cf..650cb68dcfb6 100644 --- a/fs/btrfs/extent_io.c +++ b/fs/btrfs/extent_io.c @@ -4049,6 +4049,7 @@ void set_extent_buffer_uptodate(struct extent_buffer *eb) static void extent_buffer_read_end_io(struct btrfs_bio *bbio) { struct extent_buffer *eb = bbio->private; + struct btrfs_fs_info *fs_info = eb->fs_info; bool uptodate = !bbio->bio.bi_status; struct bvec_iter_all iter_all; struct bio_vec *bvec; @@ -4068,26 +4069,47 @@ static void extent_buffer_read_end_io(struct btrfs_bio *bbio) } bio_for_each_segment_all(bvec, &bbio->bio, iter_all) { - end_page_read(bvec->bv_page, uptodate, eb->start + bio_offset, - bvec->bv_len); - bio_offset += bvec->bv_len; - } + u64 start = eb->start + bio_offset; + struct page *page = bvec->bv_page; + u32 len = bvec->bv_len; - if (eb->fs_info->nodesize < PAGE_SIZE) { - unlock_extent(&bbio->inode->io_tree, eb->start, - eb->start + bio_offset - 1, NULL); + if (uptodate) + btrfs_page_set_uptodate(fs_info, page, start, len); + else + btrfs_page_clear_uptodate(fs_info, page, start, len); + + bio_offset += len; } + + clear_bit(EXTENT_BUFFER_READING, &eb->bflags); + smp_mb__after_atomic(); + wake_up_bit(&eb->bflags, EXTENT_BUFFER_READING); free_extent_buffer(eb); bio_put(&bbio->bio); } -static void __read_extent_buffer_pages(struct extent_buffer *eb, int mirror_num, - struct btrfs_tree_parent_check *check) +int read_extent_buffer_pages(struct extent_buffer *eb, int wait, int mirror_num, + struct btrfs_tree_parent_check *check) { int num_pages = num_extent_pages(eb), i; struct btrfs_bio *bbio; + if (test_bit(EXTENT_BUFFER_UPTODATE, &eb->bflags)) + return 0; + + /* + * We could have had EXTENT_BUFFER_UPTODATE cleared by the write + * operation, which could potentially still be in flight. In this case + * we simply want to return an error. + */ + if (unlikely(test_bit(EXTENT_BUFFER_WRITE_ERR, &eb->bflags))) + return -EIO; + + /* Someone else is already reading the buffer, just wait for it. */ + if (test_and_set_bit(EXTENT_BUFFER_READING, &eb->bflags)) + goto done; + clear_bit(EXTENT_BUFFER_READ_ERR, &eb->bflags); eb->read_mirror = 0; check_buffer_tree_ref(eb); @@ -4108,117 +4130,15 @@ static void __read_extent_buffer_pages(struct extent_buffer *eb, int mirror_num, __bio_add_page(&bbio->bio, eb->pages[i], PAGE_SIZE, 0); } btrfs_submit_bio(bbio, mirror_num); -} - -static int read_extent_buffer_subpage(struct extent_buffer *eb, int wait, - int mirror_num, - struct btrfs_tree_parent_check *check) -{ - struct btrfs_fs_info *fs_info = eb->fs_info; - struct extent_io_tree *io_tree; - struct page *page = eb->pages[0]; - struct extent_state *cached_state = NULL; - int ret; - - ASSERT(!test_bit(EXTENT_BUFFER_UNMAPPED, &eb->bflags)); - ASSERT(PagePrivate(page)); - ASSERT(check); - io_tree = &BTRFS_I(fs_info->btree_inode)->io_tree; - - if (wait == WAIT_NONE) { - if (!try_lock_extent(io_tree, eb->start, eb->start + eb->len - 1, - &cached_state)) - return -EAGAIN; - } else { - ret = lock_extent(io_tree, eb->start, eb->start + eb->len - 1, - &cached_state); - if (ret < 0) - return ret; - } - - if (test_bit(EXTENT_BUFFER_UPTODATE, &eb->bflags)) { - unlock_extent(io_tree, eb->start, eb->start + eb->len - 1, - &cached_state); - return 0; - } - - btrfs_subpage_start_reader(fs_info, page, eb->start, eb->len); - __read_extent_buffer_pages(eb, mirror_num, check); - if (wait != WAIT_COMPLETE) { - free_extent_state(cached_state); - return 0; - } - - wait_extent_bit(io_tree, eb->start, eb->start + eb->len - 1, - EXTENT_LOCKED, &cached_state); - if (!test_bit(EXTENT_BUFFER_UPTODATE, &eb->bflags)) - return -EIO; - return 0; -} - -int read_extent_buffer_pages(struct extent_buffer *eb, int wait, int mirror_num, - struct btrfs_tree_parent_check *check) -{ - int i; - struct page *page; - int locked_pages = 0; - int num_pages; - - if (test_bit(EXTENT_BUFFER_UPTODATE, &eb->bflags)) - return 0; - - /* - * We could have had EXTENT_BUFFER_UPTODATE cleared by the write - * operation, which could potentially still be in flight. In this case - * we simply want to return an error. - */ - if (unlikely(test_bit(EXTENT_BUFFER_WRITE_ERR, &eb->bflags))) - return -EIO; - - if (eb->fs_info->nodesize < PAGE_SIZE) - return read_extent_buffer_subpage(eb, wait, mirror_num, check); - - num_pages = num_extent_pages(eb); - for (i = 0; i < num_pages; i++) { - page = eb->pages[i]; - if (wait == WAIT_NONE) { - /* - * WAIT_NONE is only utilized by readahead. If we can't - * acquire the lock atomically it means either the eb - * is being read out or under modification. - * Either way the eb will be or has been cached, - * readahead can exit safely. - */ - if (!trylock_page(page)) - goto unlock_exit; - } else { - lock_page(page); - } - locked_pages++; - } - - __read_extent_buffer_pages(eb, mirror_num, check); - - if (wait != WAIT_COMPLETE) - return 0; - - for (i = 0; i < num_pages; i++) { - page = eb->pages[i]; - wait_on_page_locked(page); - if (!PageUptodate(page)) +done: + if (wait == WAIT_COMPLETE) { + wait_on_bit_io(&eb->bflags, EXTENT_BUFFER_READING, TASK_UNINTERRUPTIBLE); + if (!test_bit(EXTENT_BUFFER_UPTODATE, &eb->bflags)) return -EIO; } return 0; - -unlock_exit: - while (locked_pages > 0) { - locked_pages--; - page = eb->pages[locked_pages]; - unlock_page(page); - } - return 0; } static bool report_eb_range(const struct extent_buffer *eb, unsigned long start, diff --git a/fs/btrfs/extent_io.h b/fs/btrfs/extent_io.h index 217c433bb999..2d91ca91dca5 100644 --- a/fs/btrfs/extent_io.h +++ b/fs/btrfs/extent_io.h @@ -29,6 +29,8 @@ enum { /* write IO error */ EXTENT_BUFFER_WRITE_ERR, EXTENT_BUFFER_NO_CHECK, + /* Indicate that extent buffer pages a being read */ + EXTENT_BUFFER_READING, }; /* these are flags for __process_pages_contig */ -- cgit v1.2.3 From 46672a44b023461d13f063bc95bf832f8124177f Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Wed, 3 May 2023 17:24:41 +0200 Subject: btrfs: merge write_one_subpage_eb into write_one_eb Most of the code in write_one_subpage_eb and write_one_eb is shared, so merge the two functions into one. Reviewed-by: Johannes Thumshirn Reviewed-by: Josef Bacik Signed-off-by: Christoph Hellwig Reviewed-by: David Sterba Signed-off-by: David Sterba --- fs/btrfs/extent_io.c | 80 ++++++++++++++++------------------------------------ 1 file changed, 25 insertions(+), 55 deletions(-) (limited to 'fs/btrfs/extent_io.c') diff --git a/fs/btrfs/extent_io.c b/fs/btrfs/extent_io.c index 650cb68dcfb6..b38fbdd70a6c 100644 --- a/fs/btrfs/extent_io.c +++ b/fs/btrfs/extent_io.c @@ -1808,54 +1808,11 @@ static void prepare_eb_write(struct extent_buffer *eb) } } -/* - * Unlike the work in write_one_eb(), we rely completely on extent locking. - * Page locking is only utilized at minimum to keep the VMM code happy. - */ -static void write_one_subpage_eb(struct extent_buffer *eb, - struct writeback_control *wbc) -{ - struct btrfs_fs_info *fs_info = eb->fs_info; - struct page *page = eb->pages[0]; - bool no_dirty_ebs = false; - struct btrfs_bio *bbio; - - prepare_eb_write(eb); - - /* clear_page_dirty_for_io() in subpage helper needs page locked */ - lock_page(page); - btrfs_subpage_set_writeback(fs_info, page, eb->start, eb->len); - - /* Check if this is the last dirty bit to update nr_written */ - no_dirty_ebs = btrfs_subpage_clear_and_test_dirty(fs_info, page, - eb->start, eb->len); - if (no_dirty_ebs) - clear_page_dirty_for_io(page); - - bbio = btrfs_bio_alloc(INLINE_EXTENT_BUFFER_PAGES, - REQ_OP_WRITE | REQ_META | wbc_to_write_flags(wbc), - eb->fs_info, extent_buffer_write_end_io, eb); - bbio->bio.bi_iter.bi_sector = eb->start >> SECTOR_SHIFT; - bbio->inode = BTRFS_I(eb->fs_info->btree_inode); - bbio->file_offset = eb->start; - __bio_add_page(&bbio->bio, page, eb->len, eb->start - page_offset(page)); - wbc_account_cgroup_owner(wbc, page, eb->len); - unlock_page(page); - btrfs_submit_bio(bbio, 0); - - /* - * Submission finished without problem, if no range of the page is - * dirty anymore, we have submitted a page. Update nr_written in wbc. - */ - if (no_dirty_ebs) - wbc->nr_to_write--; -} - static noinline_for_stack void write_one_eb(struct extent_buffer *eb, struct writeback_control *wbc) { + struct btrfs_fs_info *fs_info = eb->fs_info; struct btrfs_bio *bbio; - int i, num_pages; prepare_eb_write(eb); @@ -1863,22 +1820,35 @@ static noinline_for_stack void write_one_eb(struct extent_buffer *eb, REQ_OP_WRITE | REQ_META | wbc_to_write_flags(wbc), eb->fs_info, extent_buffer_write_end_io, eb); bbio->bio.bi_iter.bi_sector = eb->start >> SECTOR_SHIFT; - bio_set_dev(&bbio->bio, eb->fs_info->fs_devices->latest_dev->bdev); + bio_set_dev(&bbio->bio, fs_info->fs_devices->latest_dev->bdev); wbc_init_bio(wbc, &bbio->bio); bbio->inode = BTRFS_I(eb->fs_info->btree_inode); bbio->file_offset = eb->start; - - num_pages = num_extent_pages(eb); - for (i = 0; i < num_pages; i++) { - struct page *p = eb->pages[i]; + if (fs_info->nodesize < PAGE_SIZE) { + struct page *p = eb->pages[0]; lock_page(p); - clear_page_dirty_for_io(p); - set_page_writeback(p); - __bio_add_page(&bbio->bio, p, PAGE_SIZE, 0); - wbc_account_cgroup_owner(wbc, p, PAGE_SIZE); - wbc->nr_to_write--; + btrfs_subpage_set_writeback(fs_info, p, eb->start, eb->len); + if (btrfs_subpage_clear_and_test_dirty(fs_info, p, eb->start, + eb->len)) { + clear_page_dirty_for_io(p); + wbc->nr_to_write--; + } + __bio_add_page(&bbio->bio, p, eb->len, eb->start - page_offset(p)); + wbc_account_cgroup_owner(wbc, p, eb->len); unlock_page(p); + } else { + for (int i = 0; i < num_extent_pages(eb); i++) { + struct page *p = eb->pages[i]; + + lock_page(p); + clear_page_dirty_for_io(p); + set_page_writeback(p); + __bio_add_page(&bbio->bio, p, PAGE_SIZE, 0); + wbc_account_cgroup_owner(wbc, p, PAGE_SIZE); + wbc->nr_to_write--; + unlock_page(p); + } } btrfs_submit_bio(bbio, 0); } @@ -1950,7 +1920,7 @@ static int submit_eb_subpage(struct page *page, struct writeback_control *wbc) continue; if (lock_extent_buffer_for_io(eb, wbc)) { - write_one_subpage_eb(eb, wbc); + write_one_eb(eb, wbc); submitted++; } free_extent_buffer(eb); -- cgit v1.2.3 From 1d12680044301760485b7f056812cd0d33dca2c6 Mon Sep 17 00:00:00 2001 From: David Sterba Date: Thu, 25 May 2023 01:04:39 +0200 Subject: btrfs: drop gfp from parameter extent state helpers Now that all extent state bit helpers effectively take the GFP_NOFS mask (and GFP_NOWAIT is encoded in the bits) we can remove the parameter. This reduces stack consumption in many functions and simplifies a lot of code. Net effect on module on a release build: text data bss dec hex filename 1250432 20985 16088 1287505 13a551 pre/btrfs.ko 1247074 20985 16088 1284147 139833 post/btrfs.ko DELTA: -3358 Signed-off-by: David Sterba --- fs/btrfs/block-group.c | 2 +- fs/btrfs/defrag.c | 3 +-- fs/btrfs/dev-replace.c | 2 +- fs/btrfs/extent-io-tree.c | 25 +++++++++++++------------ fs/btrfs/extent-io-tree.h | 12 +++++------- fs/btrfs/extent-tree.c | 14 ++++++-------- fs/btrfs/extent_io.c | 3 +-- fs/btrfs/extent_map.c | 4 ++-- fs/btrfs/file-item.c | 4 ++-- fs/btrfs/inode.c | 7 +++---- fs/btrfs/relocation.c | 5 ++--- fs/btrfs/tests/extent-io-tests.c | 13 ++++++------- fs/btrfs/zoned.c | 2 +- 13 files changed, 44 insertions(+), 52 deletions(-) (limited to 'fs/btrfs/extent_io.c') diff --git a/fs/btrfs/block-group.c b/fs/btrfs/block-group.c index 202e2aa949c5..618ba7670e66 100644 --- a/fs/btrfs/block-group.c +++ b/fs/btrfs/block-group.c @@ -3523,7 +3523,7 @@ int btrfs_update_block_group(struct btrfs_trans_handle *trans, set_extent_bit(&trans->transaction->pinned_extents, bytenr, bytenr + num_bytes - 1, - EXTENT_DIRTY, NULL, GFP_NOFS); + EXTENT_DIRTY, NULL); } spin_lock(&trans->transaction->dirty_bgs_lock); diff --git a/fs/btrfs/defrag.c b/fs/btrfs/defrag.c index 4e7a1e0a0441..f2ff4cbe8656 100644 --- a/fs/btrfs/defrag.c +++ b/fs/btrfs/defrag.c @@ -1041,8 +1041,7 @@ static int defrag_one_locked_target(struct btrfs_inode *inode, EXTENT_DELALLOC | EXTENT_DO_ACCOUNTING | EXTENT_DEFRAG, cached_state); set_extent_bit(&inode->io_tree, start, start + len - 1, - EXTENT_DELALLOC | EXTENT_DEFRAG, - cached_state, GFP_NOFS); + EXTENT_DELALLOC | EXTENT_DEFRAG, cached_state); /* Update the page status */ for (i = start_index - first_index; i <= last_index - first_index; i++) { diff --git a/fs/btrfs/dev-replace.c b/fs/btrfs/dev-replace.c index 3a0fc57d5db9..dc3f30c79320 100644 --- a/fs/btrfs/dev-replace.c +++ b/fs/btrfs/dev-replace.c @@ -796,7 +796,7 @@ static int btrfs_set_target_alloc_state(struct btrfs_device *srcdev, &found_start, &found_end, CHUNK_ALLOCATED, &cached_state)) { ret = set_extent_bit(&tgtdev->alloc_state, found_start, - found_end, CHUNK_ALLOCATED, NULL, GFP_NOFS); + found_end, CHUNK_ALLOCATED, NULL); if (ret) break; start = found_end + 1; diff --git a/fs/btrfs/extent-io-tree.c b/fs/btrfs/extent-io-tree.c index 83e40c02f62e..a2315a4b8b75 100644 --- a/fs/btrfs/extent-io-tree.c +++ b/fs/btrfs/extent-io-tree.c @@ -556,7 +556,7 @@ static void set_gfp_mask_from_bits(u32 *bits, gfp_t *mask) */ int __clear_extent_bit(struct extent_io_tree *tree, u64 start, u64 end, u32 bits, struct extent_state **cached_state, - gfp_t mask, struct extent_changeset *changeset) + struct extent_changeset *changeset) { struct extent_state *state; struct extent_state *cached; @@ -566,6 +566,7 @@ int __clear_extent_bit(struct extent_io_tree *tree, u64 start, u64 end, int clear = 0; int wake; int delete = (bits & EXTENT_CLEAR_ALL_BITS); + gfp_t mask; set_gfp_mask_from_bits(&bits, &mask); btrfs_debug_check_extent_io_range(tree, start, end); @@ -964,7 +965,8 @@ out: /* * Set some bits on a range in the tree. This may require allocations or - * sleeping, so the gfp mask is used to indicate what is allowed. + * sleeping. By default all allocations use GFP_NOFS, use EXTENT_NOWAIT for + * GFP_NOWAIT. * * If any of the exclusive bits are set, this will fail with -EEXIST if some * part of the range already has the desired bits set. The extent_state of the @@ -979,7 +981,7 @@ static int __set_extent_bit(struct extent_io_tree *tree, u64 start, u64 end, u32 bits, u64 *failed_start, struct extent_state **failed_state, struct extent_state **cached_state, - struct extent_changeset *changeset, gfp_t mask) + struct extent_changeset *changeset) { struct extent_state *state; struct extent_state *prealloc = NULL; @@ -989,6 +991,7 @@ static int __set_extent_bit(struct extent_io_tree *tree, u64 start, u64 end, u64 last_start; u64 last_end; u32 exclusive_bits = (bits & EXTENT_LOCKED); + gfp_t mask; set_gfp_mask_from_bits(&bits, &mask); btrfs_debug_check_extent_io_range(tree, start, end); @@ -1200,10 +1203,10 @@ out: } int set_extent_bit(struct extent_io_tree *tree, u64 start, u64 end, - u32 bits, struct extent_state **cached_state, gfp_t mask) + u32 bits, struct extent_state **cached_state) { return __set_extent_bit(tree, start, end, bits, NULL, NULL, - cached_state, NULL, mask); + cached_state, NULL); } /* @@ -1699,8 +1702,7 @@ int set_record_extent_bits(struct extent_io_tree *tree, u64 start, u64 end, */ ASSERT(!(bits & EXTENT_LOCKED)); - return __set_extent_bit(tree, start, end, bits, NULL, NULL, NULL, - changeset, GFP_NOFS); + return __set_extent_bit(tree, start, end, bits, NULL, NULL, NULL, changeset); } int clear_record_extent_bits(struct extent_io_tree *tree, u64 start, u64 end, @@ -1712,8 +1714,7 @@ int clear_record_extent_bits(struct extent_io_tree *tree, u64 start, u64 end, */ ASSERT(!(bits & EXTENT_LOCKED)); - return __clear_extent_bit(tree, start, end, bits, NULL, GFP_NOFS, - changeset); + return __clear_extent_bit(tree, start, end, bits, NULL, changeset); } int try_lock_extent(struct extent_io_tree *tree, u64 start, u64 end, @@ -1723,7 +1724,7 @@ int try_lock_extent(struct extent_io_tree *tree, u64 start, u64 end, u64 failed_start; err = __set_extent_bit(tree, start, end, EXTENT_LOCKED, &failed_start, - NULL, cached, NULL, GFP_NOFS); + NULL, cached, NULL); if (err == -EEXIST) { if (failed_start > start) clear_extent_bit(tree, start, failed_start - 1, @@ -1745,7 +1746,7 @@ int lock_extent(struct extent_io_tree *tree, u64 start, u64 end, u64 failed_start; err = __set_extent_bit(tree, start, end, EXTENT_LOCKED, &failed_start, - &failed_state, cached_state, NULL, GFP_NOFS); + &failed_state, cached_state, NULL); while (err == -EEXIST) { if (failed_start != start) clear_extent_bit(tree, start, failed_start - 1, @@ -1755,7 +1756,7 @@ int lock_extent(struct extent_io_tree *tree, u64 start, u64 end, &failed_state); err = __set_extent_bit(tree, start, end, EXTENT_LOCKED, &failed_start, &failed_state, - cached_state, NULL, GFP_NOFS); + cached_state, NULL); } return err; } diff --git a/fs/btrfs/extent-io-tree.h b/fs/btrfs/extent-io-tree.h index d7f5afeb5ce7..fbd3b275ab1c 100644 --- a/fs/btrfs/extent-io-tree.h +++ b/fs/btrfs/extent-io-tree.h @@ -136,22 +136,20 @@ int test_range_bit(struct extent_io_tree *tree, u64 start, u64 end, int clear_record_extent_bits(struct extent_io_tree *tree, u64 start, u64 end, u32 bits, struct extent_changeset *changeset); int __clear_extent_bit(struct extent_io_tree *tree, u64 start, u64 end, - u32 bits, struct extent_state **cached, gfp_t mask, + u32 bits, struct extent_state **cached, struct extent_changeset *changeset); static inline int clear_extent_bit(struct extent_io_tree *tree, u64 start, u64 end, u32 bits, struct extent_state **cached) { - return __clear_extent_bit(tree, start, end, bits, cached, - GFP_NOFS, NULL); + return __clear_extent_bit(tree, start, end, bits, cached, NULL); } static inline int unlock_extent(struct extent_io_tree *tree, u64 start, u64 end, struct extent_state **cached) { - return __clear_extent_bit(tree, start, end, EXTENT_LOCKED, cached, - GFP_NOFS, NULL); + return __clear_extent_bit(tree, start, end, EXTENT_LOCKED, cached, NULL); } static inline int clear_extent_bits(struct extent_io_tree *tree, u64 start, @@ -163,13 +161,13 @@ static inline int clear_extent_bits(struct extent_io_tree *tree, u64 start, int set_record_extent_bits(struct extent_io_tree *tree, u64 start, u64 end, u32 bits, struct extent_changeset *changeset); int set_extent_bit(struct extent_io_tree *tree, u64 start, u64 end, - u32 bits, struct extent_state **cached_state, gfp_t mask); + u32 bits, struct extent_state **cached_state); static inline int clear_extent_uptodate(struct extent_io_tree *tree, u64 start, u64 end, struct extent_state **cached_state) { return __clear_extent_bit(tree, start, end, EXTENT_UPTODATE, - cached_state, GFP_NOFS, NULL); + cached_state, NULL); } static inline int clear_extent_dirty(struct extent_io_tree *tree, u64 start, diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c index 6e319100e3a3..71aaf28fafc9 100644 --- a/fs/btrfs/extent-tree.c +++ b/fs/btrfs/extent-tree.c @@ -74,7 +74,7 @@ int btrfs_add_excluded_extent(struct btrfs_fs_info *fs_info, { u64 end = start + num_bytes - 1; set_extent_bit(&fs_info->excluded_extents, start, end, - EXTENT_UPTODATE, NULL, GFP_NOFS); + EXTENT_UPTODATE, NULL); return 0; } @@ -2508,7 +2508,7 @@ static int pin_down_extent(struct btrfs_trans_handle *trans, spin_unlock(&cache->space_info->lock); set_extent_bit(&trans->transaction->pinned_extents, bytenr, - bytenr + num_bytes - 1, EXTENT_DIRTY, NULL, GFP_NOFS); + bytenr + num_bytes - 1, EXTENT_DIRTY, NULL); return 0; } @@ -4831,16 +4831,15 @@ btrfs_init_new_buffer(struct btrfs_trans_handle *trans, struct btrfs_root *root, if (buf->log_index == 0) set_extent_bit(&root->dirty_log_pages, buf->start, buf->start + buf->len - 1, - EXTENT_DIRTY, NULL, GFP_NOFS); + EXTENT_DIRTY, NULL); else set_extent_bit(&root->dirty_log_pages, buf->start, buf->start + buf->len - 1, - EXTENT_NEW, NULL, GFP_NOFS); + EXTENT_NEW, NULL); } else { buf->log_index = -1; set_extent_bit(&trans->transaction->dirty_pages, buf->start, - buf->start + buf->len - 1, EXTENT_DIRTY, NULL, - GFP_NOFS); + buf->start + buf->len - 1, EXTENT_DIRTY, NULL); } /* this returns a buffer locked for blocking */ return buf; @@ -5981,8 +5980,7 @@ static int btrfs_trim_free_extents(struct btrfs_device *device, u64 *trimmed) &bytes); if (!ret) set_extent_bit(&device->alloc_state, start, - start + bytes - 1, - CHUNK_TRIMMED, NULL, GFP_NOFS); + start + bytes - 1, CHUNK_TRIMMED, NULL); mutex_unlock(&fs_info->chunk_mutex); if (ret) diff --git a/fs/btrfs/extent_io.c b/fs/btrfs/extent_io.c index b38fbdd70a6c..3c7773995d63 100644 --- a/fs/btrfs/extent_io.c +++ b/fs/btrfs/extent_io.c @@ -2456,8 +2456,7 @@ static int try_release_extent_state(struct extent_io_tree *tree, * The delalloc new bit will be cleared by ordered extent * completion. */ - ret = __clear_extent_bit(tree, start, end, clear_bits, NULL, - mask, NULL); + ret = __clear_extent_bit(tree, start, end, clear_bits, NULL, NULL); /* if clear_extent_bit failed for enomem reasons, * we can't allow the release to continue. diff --git a/fs/btrfs/extent_map.c b/fs/btrfs/extent_map.c index 4c8c87524d62..4d86d4739217 100644 --- a/fs/btrfs/extent_map.c +++ b/fs/btrfs/extent_map.c @@ -366,7 +366,7 @@ static void extent_map_device_set_bits(struct extent_map *em, unsigned bits) set_extent_bit(&device->alloc_state, stripe->physical, stripe->physical + stripe_size - 1, - bits | EXTENT_NOWAIT, NULL, GFP_NOWAIT); + bits | EXTENT_NOWAIT, NULL); } } @@ -383,7 +383,7 @@ static void extent_map_device_clear_bits(struct extent_map *em, unsigned bits) __clear_extent_bit(&device->alloc_state, stripe->physical, stripe->physical + stripe_size - 1, bits | EXTENT_NOWAIT, - NULL, GFP_NOWAIT, NULL); + NULL, NULL); } } diff --git a/fs/btrfs/file-item.c b/fs/btrfs/file-item.c index 1e364a7b74aa..594b69d94241 100644 --- a/fs/btrfs/file-item.c +++ b/fs/btrfs/file-item.c @@ -95,7 +95,7 @@ int btrfs_inode_set_file_extent_range(struct btrfs_inode *inode, u64 start, if (btrfs_fs_incompat(inode->root->fs_info, NO_HOLES)) return 0; return set_extent_bit(&inode->file_extent_tree, start, start + len - 1, - EXTENT_DIRTY, NULL, GFP_NOFS); + EXTENT_DIRTY, NULL); } /* @@ -440,7 +440,7 @@ blk_status_t btrfs_lookup_bio_sums(struct btrfs_bio *bbio) set_extent_bit(&inode->io_tree, file_offset, file_offset + sectorsize - 1, - EXTENT_NODATASUM, NULL, GFP_NOFS); + EXTENT_NODATASUM, NULL); } else { btrfs_warn_rl(fs_info, "csum hole found for disk bytenr range [%llu, %llu)", diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c index 8dd979b2f084..40b5f2df9ecc 100644 --- a/fs/btrfs/inode.c +++ b/fs/btrfs/inode.c @@ -2888,8 +2888,7 @@ static int btrfs_find_new_delalloc_bytes(struct btrfs_inode *inode, ret = set_extent_bit(&inode->io_tree, search_start, search_start + em_len - 1, - EXTENT_DELALLOC_NEW, cached_state, - GFP_NOFS); + EXTENT_DELALLOC_NEW, cached_state); next: search_start = extent_map_end(em); free_extent_map(em); @@ -2923,7 +2922,7 @@ int btrfs_set_extent_delalloc(struct btrfs_inode *inode, u64 start, u64 end, } return set_extent_bit(&inode->io_tree, start, end, - EXTENT_DELALLOC | extra_bits, cached_state, GFP_NOFS); + EXTENT_DELALLOC | extra_bits, cached_state); } /* see btrfs_writepage_start_hook for details on why this is required */ @@ -5000,7 +4999,7 @@ again: if (only_release_metadata) set_extent_bit(&inode->io_tree, block_start, block_end, - EXTENT_NORESERVE, NULL, GFP_NOFS); + EXTENT_NORESERVE, NULL); out_unlock: if (ret) { diff --git a/fs/btrfs/relocation.c b/fs/btrfs/relocation.c index 1ed8b132fe2a..0bda67ad349e 100644 --- a/fs/btrfs/relocation.c +++ b/fs/btrfs/relocation.c @@ -175,8 +175,7 @@ static void mark_block_processed(struct reloc_control *rc, rc->block_group->length)) { blocksize = rc->extent_root->fs_info->nodesize; set_extent_bit(&rc->processed_blocks, node->bytenr, - node->bytenr + blocksize - 1, EXTENT_DIRTY, - NULL, GFP_NOFS); + node->bytenr + blocksize - 1, EXTENT_DIRTY, NULL); } node->processed = 1; } @@ -3054,7 +3053,7 @@ static int relocate_one_page(struct inode *inode, struct file_ra_state *ra, set_extent_bit(&BTRFS_I(inode)->io_tree, boundary_start, boundary_end, - EXTENT_BOUNDARY, NULL, GFP_NOFS); + EXTENT_BOUNDARY, NULL); } unlock_extent(&BTRFS_I(inode)->io_tree, clamped_start, clamped_end, &cached_state); diff --git a/fs/btrfs/tests/extent-io-tests.c b/fs/btrfs/tests/extent-io-tests.c index acaaddf7181a..f6bc6d738555 100644 --- a/fs/btrfs/tests/extent-io-tests.c +++ b/fs/btrfs/tests/extent-io-tests.c @@ -159,7 +159,7 @@ static int test_find_delalloc(u32 sectorsize) * |--- delalloc ---| * |--- search ---| */ - set_extent_bit(tmp, 0, sectorsize - 1, EXTENT_DELALLOC, NULL, GFP_NOFS); + set_extent_bit(tmp, 0, sectorsize - 1, EXTENT_DELALLOC, NULL); start = 0; end = start + PAGE_SIZE - 1; found = find_lock_delalloc_range(inode, locked_page, &start, @@ -190,7 +190,7 @@ static int test_find_delalloc(u32 sectorsize) test_err("couldn't find the locked page"); goto out_bits; } - set_extent_bit(tmp, sectorsize, max_bytes - 1, EXTENT_DELALLOC, NULL, GFP_NOFS); + set_extent_bit(tmp, sectorsize, max_bytes - 1, EXTENT_DELALLOC, NULL); start = test_start; end = start + PAGE_SIZE - 1; found = find_lock_delalloc_range(inode, locked_page, &start, @@ -245,7 +245,7 @@ static int test_find_delalloc(u32 sectorsize) * * We are re-using our test_start from above since it works out well. */ - set_extent_bit(tmp, max_bytes, total_dirty - 1, EXTENT_DELALLOC, NULL, GFP_NOFS); + set_extent_bit(tmp, max_bytes, total_dirty - 1, EXTENT_DELALLOC, NULL); start = test_start; end = start + PAGE_SIZE - 1; found = find_lock_delalloc_range(inode, locked_page, &start, @@ -504,7 +504,7 @@ static int test_find_first_clear_extent_bit(void) * 4M-32M */ set_extent_bit(&tree, SZ_1M, SZ_4M - 1, - CHUNK_TRIMMED | CHUNK_ALLOCATED, NULL, GFP_NOFS); + CHUNK_TRIMMED | CHUNK_ALLOCATED, NULL); find_first_clear_extent_bit(&tree, SZ_512K, &start, &end, CHUNK_TRIMMED | CHUNK_ALLOCATED); @@ -517,7 +517,7 @@ static int test_find_first_clear_extent_bit(void) /* Now add 32M-64M so that we have a hole between 4M-32M */ set_extent_bit(&tree, SZ_32M, SZ_64M - 1, - CHUNK_TRIMMED | CHUNK_ALLOCATED, NULL, GFP_NOFS); + CHUNK_TRIMMED | CHUNK_ALLOCATED, NULL); /* * Request first hole starting at 12M, we should get 4M-32M @@ -548,8 +548,7 @@ static int test_find_first_clear_extent_bit(void) * Set 64M-72M with CHUNK_ALLOC flag, then search for CHUNK_TRIMMED flag * being unset in this range, we should get the entry in range 64M-72M */ - set_extent_bit(&tree, SZ_64M, SZ_64M + SZ_8M - 1, CHUNK_ALLOCATED, NULL, - GFP_NOFS); + set_extent_bit(&tree, SZ_64M, SZ_64M + SZ_8M - 1, CHUNK_ALLOCATED, NULL); find_first_clear_extent_bit(&tree, SZ_64M + SZ_1M, &start, &end, CHUNK_TRIMMED); diff --git a/fs/btrfs/zoned.c b/fs/btrfs/zoned.c index fb90e2b20614..98d6b8cc3874 100644 --- a/fs/btrfs/zoned.c +++ b/fs/btrfs/zoned.c @@ -1612,7 +1612,7 @@ void btrfs_redirty_list_add(struct btrfs_transaction *trans, set_bit(EXTENT_BUFFER_NO_CHECK, &eb->bflags); set_extent_buffer_dirty(eb); set_extent_bit(&trans->dirty_pages, eb->start, eb->start + eb->len - 1, - EXTENT_DIRTY | EXTENT_NOWAIT, NULL, GFP_NOWAIT); + EXTENT_DIRTY | EXTENT_NOWAIT, NULL); } bool btrfs_use_zone_append(struct btrfs_bio *bbio) -- cgit v1.2.3 From 75258f20fb70ce9e1fa4fa0cb27c0bdee05b2701 Mon Sep 17 00:00:00 2001 From: Qu Wenruo Date: Fri, 26 May 2023 20:30:53 +0800 Subject: btrfs: subpage: dump extra subpage bitmaps for debug There is a bug report that assert_eb_page_uptodate() gets triggered for free space tree metadata. Without proper dump for the subpage bitmaps it's much harder to debug. Thus this patch would dump all the subpage bitmaps (split them into their own bitmaps) for a easier debugging. The output would look like this: (Dumped after a tree block got read from disk) page:000000006e34bf49 refcount:4 mapcount:0 mapping:0000000067661ac4 index:0x1d1 pfn:0x110e9 memcg:ffff0000d7d62000 aops:btree_aops [btrfs] ino:1 flags: 0x8000000000002002(referenced|private|zone=2) page_type: 0xffffffff() raw: 8000000000002002 0000000000000000 dead000000000122 ffff00000188bed0 raw: 00000000000001d1 ffff0000c7992700 00000004ffffffff ffff0000d7d62000 page dumped because: btrfs subpage dump BTRFS warning (device dm-1): start=30490624 len=16384 page=30474240 bitmaps: uptodate=4-7 error= dirty= writeback= ordered= checked= Reviewed-by: Christoph Hellwig Signed-off-by: Qu Wenruo Reviewed-by: David Sterba Signed-off-by: David Sterba --- fs/btrfs/extent_io.c | 5 +++-- fs/btrfs/subpage.c | 42 ++++++++++++++++++++++++++++++++++++++++++ fs/btrfs/subpage.h | 2 ++ 3 files changed, 47 insertions(+), 2 deletions(-) (limited to 'fs/btrfs/extent_io.c') diff --git a/fs/btrfs/extent_io.c b/fs/btrfs/extent_io.c index 3c7773995d63..87ee376b3cc8 100644 --- a/fs/btrfs/extent_io.c +++ b/fs/btrfs/extent_io.c @@ -4262,8 +4262,9 @@ static void assert_eb_page_uptodate(const struct extent_buffer *eb, return; if (fs_info->nodesize < PAGE_SIZE) { - WARN_ON(!btrfs_subpage_test_uptodate(fs_info, page, - eb->start, eb->len)); + if (WARN_ON(!btrfs_subpage_test_uptodate(fs_info, page, + eb->start, eb->len))) + btrfs_subpage_dump_bitmap(fs_info, page, eb->start, eb->len); } else { WARN_ON(!PageUptodate(page)); } diff --git a/fs/btrfs/subpage.c b/fs/btrfs/subpage.c index 045117ca0ddc..74bc43040531 100644 --- a/fs/btrfs/subpage.c +++ b/fs/btrfs/subpage.c @@ -745,3 +745,45 @@ void btrfs_page_unlock_writer(struct btrfs_fs_info *fs_info, struct page *page, /* Have writers, use proper subpage helper to end it */ btrfs_page_end_writer_lock(fs_info, page, start, len); } + +#define GET_SUBPAGE_BITMAP(subpage, subpage_info, name, dst) \ + bitmap_cut(dst, subpage->bitmaps, 0, \ + subpage_info->name##_offset, subpage_info->bitmap_nr_bits) + +void __cold btrfs_subpage_dump_bitmap(const struct btrfs_fs_info *fs_info, + struct page *page, u64 start, u32 len) +{ + struct btrfs_subpage_info *subpage_info = fs_info->subpage_info; + struct btrfs_subpage *subpage; + unsigned long uptodate_bitmap; + unsigned long error_bitmap; + unsigned long dirty_bitmap; + unsigned long writeback_bitmap; + unsigned long ordered_bitmap; + unsigned long checked_bitmap; + unsigned long flags; + + ASSERT(PagePrivate(page) && page->private); + ASSERT(subpage_info); + subpage = (struct btrfs_subpage *)page->private; + + spin_lock_irqsave(&subpage->lock, flags); + GET_SUBPAGE_BITMAP(subpage, subpage_info, uptodate, &uptodate_bitmap); + GET_SUBPAGE_BITMAP(subpage, subpage_info, error, &error_bitmap); + GET_SUBPAGE_BITMAP(subpage, subpage_info, dirty, &dirty_bitmap); + GET_SUBPAGE_BITMAP(subpage, subpage_info, writeback, &writeback_bitmap); + GET_SUBPAGE_BITMAP(subpage, subpage_info, ordered, &ordered_bitmap); + GET_SUBPAGE_BITMAP(subpage, subpage_info, checked, &checked_bitmap); + spin_unlock_irqrestore(&subpage->lock, flags); + + dump_page(page, "btrfs subpage dump"); + btrfs_warn(fs_info, +"start=%llu len=%u page=%llu, bitmaps uptodate=%*pbl error=%*pbl dirty=%*pbl writeback=%*pbl ordered=%*pbl checked=%*pbl", + start, len, page_offset(page), + subpage_info->bitmap_nr_bits, &uptodate_bitmap, + subpage_info->bitmap_nr_bits, &error_bitmap, + subpage_info->bitmap_nr_bits, &dirty_bitmap, + subpage_info->bitmap_nr_bits, &writeback_bitmap, + subpage_info->bitmap_nr_bits, &ordered_bitmap, + subpage_info->bitmap_nr_bits, &checked_bitmap); +} diff --git a/fs/btrfs/subpage.h b/fs/btrfs/subpage.h index 0e80ad336904..5905caea8409 100644 --- a/fs/btrfs/subpage.h +++ b/fs/btrfs/subpage.h @@ -154,5 +154,7 @@ void btrfs_page_assert_not_dirty(const struct btrfs_fs_info *fs_info, struct page *page); void btrfs_page_unlock_writer(struct btrfs_fs_info *fs_info, struct page *page, u64 start, u32 len); +void __cold btrfs_subpage_dump_bitmap(const struct btrfs_fs_info *fs_info, + struct page *page, u64 start, u32 len); #endif -- cgit v1.2.3 From 5a96341927b097601e1fefc292f0a6e5273e7554 Mon Sep 17 00:00:00 2001 From: Qu Wenruo Date: Tue, 30 May 2023 09:45:27 +0800 Subject: btrfs: subpage: make alloc_extent_buffer() handle previously uptodate range efficiently Currently alloc_extent_buffer() would make the extent buffer uptodate if the corresponding pages are also uptodate. But this check is only checking PageUptodate, which is fine for regular cases, but not for subpage cases, as we can have multiple extent buffers in the same page. So here we go btrfs_page_test_uptodate() instead. The old code doesn't cause any problem, but is not efficient, as it would cause extra metadata read even if the range is already uptodate. Reviewed-by: Christoph Hellwig Signed-off-by: Qu Wenruo Reviewed-by: David Sterba Signed-off-by: David Sterba --- fs/btrfs/extent_io.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'fs/btrfs/extent_io.c') diff --git a/fs/btrfs/extent_io.c b/fs/btrfs/extent_io.c index 87ee376b3cc8..f578813be749 100644 --- a/fs/btrfs/extent_io.c +++ b/fs/btrfs/extent_io.c @@ -3712,7 +3712,7 @@ struct extent_buffer *alloc_extent_buffer(struct btrfs_fs_info *fs_info, WARN_ON(btrfs_page_test_dirty(fs_info, p, eb->start, eb->len)); eb->pages[i] = p; - if (!PageUptodate(p)) + if (!btrfs_page_test_uptodate(fs_info, p, eb->start, eb->len)) uptodate = 0; /* -- cgit v1.2.3 From 31dd8c81ddfa142dab53a0837eed88e8febe7b1e Mon Sep 17 00:00:00 2001 From: Qu Wenruo Date: Tue, 30 May 2023 09:45:28 +0800 Subject: btrfs: use the same uptodate variable for end_bio_extent_readpage() In function end_bio_extent_readpage() we call endio_readpage_release_extent() to unlock the extent io tree. However we pass PageUptodate(page) as @uptodate parameter for it, while for previous end_page_read() call, we use a dedicated @uptodate local variable. This is not a big deal, as even for subpage cases, either the bio only covers part of the page, then the @uptodate is always false, and the subpage ranges can still be merged. But for the sake of consistency, always use @uptodate variable when possible. Reviewed-by: Christoph Hellwig Reviewed-by: Anand Jain Signed-off-by: Qu Wenruo Reviewed-by: David Sterba Signed-off-by: David Sterba --- fs/btrfs/extent_io.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'fs/btrfs/extent_io.c') diff --git a/fs/btrfs/extent_io.c b/fs/btrfs/extent_io.c index f578813be749..d5b3b15f7ab2 100644 --- a/fs/btrfs/extent_io.c +++ b/fs/btrfs/extent_io.c @@ -745,7 +745,7 @@ static void end_bio_extent_readpage(struct btrfs_bio *bbio) /* Update page status and unlock. */ end_page_read(page, uptodate, start, len); endio_readpage_release_extent(&processed, BTRFS_I(inode), - start, end, PageUptodate(page)); + start, end, uptodate); ASSERT(bio_offset + len > bio_offset); bio_offset += len; -- cgit v1.2.3 From 36614a3beba33a05ad78d4dcb9aa1d00e8a7d01f Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Wed, 31 May 2023 08:04:50 +0200 Subject: btrfs: fix range_end calculation in extent_write_locked_range The range_end field in struct writeback_control is inclusive, just like the end parameter passed to extent_write_locked_range. Not doing this could cause extra writeout, which is harmless but suboptimal. Fixes: 771ed689d2cd ("Btrfs: Optimize compressed writeback and reads") CC: stable@vger.kernel.org # 5.9+ Reviewed-by: Josef Bacik Signed-off-by: Christoph Hellwig Reviewed-by: David Sterba Signed-off-by: David Sterba --- fs/btrfs/extent_io.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'fs/btrfs/extent_io.c') diff --git a/fs/btrfs/extent_io.c b/fs/btrfs/extent_io.c index d5b3b15f7ab2..0726c82db309 100644 --- a/fs/btrfs/extent_io.c +++ b/fs/btrfs/extent_io.c @@ -2310,7 +2310,7 @@ int extent_write_locked_range(struct inode *inode, u64 start, u64 end) struct writeback_control wbc_writepages = { .sync_mode = WB_SYNC_ALL, .range_start = start, - .range_end = end + 1, + .range_end = end, .no_cgroup_owner = 1, }; struct btrfs_bio_ctrl bio_ctrl = { -- cgit v1.2.3 From ed9ee98ecb4fdbdfe043ee3eec0a65c0745d8669 Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Wed, 31 May 2023 08:04:51 +0200 Subject: btrfs: factor out a btrfs_verify_page helper Split all the conditionals for the fsverity calls in end_page_read into a btrfs_verify_page helper to keep the code readable and make additional refactoring easier. Reviewed-by: Josef Bacik Signed-off-by: Christoph Hellwig Reviewed-by: David Sterba Signed-off-by: David Sterba --- fs/btrfs/extent_io.c | 15 ++++++++++----- 1 file changed, 10 insertions(+), 5 deletions(-) (limited to 'fs/btrfs/extent_io.c') diff --git a/fs/btrfs/extent_io.c b/fs/btrfs/extent_io.c index 0726c82db309..8e42ce48b52e 100644 --- a/fs/btrfs/extent_io.c +++ b/fs/btrfs/extent_io.c @@ -481,6 +481,15 @@ void extent_clear_unlock_delalloc(struct btrfs_inode *inode, u64 start, u64 end, start, end, page_ops, NULL); } +static bool btrfs_verify_page(struct page *page, u64 start) +{ + if (!fsverity_active(page->mapping->host) || + PageError(page) || PageUptodate(page) || + start >= i_size_read(page->mapping->host)) + return true; + return fsverity_verify_page(page); +} + static void end_page_read(struct page *page, bool uptodate, u64 start, u32 len) { struct btrfs_fs_info *fs_info = btrfs_sb(page->mapping->host->i_sb); @@ -489,11 +498,7 @@ static void end_page_read(struct page *page, bool uptodate, u64 start, u32 len) start + len <= page_offset(page) + PAGE_SIZE); if (uptodate) { - if (fsverity_active(page->mapping->host) && - !PageError(page) && - !PageUptodate(page) && - start < i_size_read(page->mapping->host) && - !fsverity_verify_page(page)) { + if (!btrfs_verify_page(page, start)) { btrfs_page_set_error(fs_info, page, start, len); } else { btrfs_page_set_uptodate(fs_info, page, start, len); -- cgit v1.2.3 From 2c14f0ffdd30bd3d321ad5fe76fcf701746e1df6 Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Wed, 31 May 2023 08:04:52 +0200 Subject: btrfs: fix fsverify read error handling in end_page_read Also clear the uptodate bit to make sure the page isn't seen as uptodate in the page cache if fsverity verification fails. Fixes: 146054090b08 ("btrfs: initial fsverity support") CC: stable@vger.kernel.org # 5.15+ Reviewed-by: Josef Bacik Signed-off-by: Christoph Hellwig Reviewed-by: David Sterba Signed-off-by: David Sterba --- fs/btrfs/extent_io.c | 8 ++------ 1 file changed, 2 insertions(+), 6 deletions(-) (limited to 'fs/btrfs/extent_io.c') diff --git a/fs/btrfs/extent_io.c b/fs/btrfs/extent_io.c index 8e42ce48b52e..a943a6622489 100644 --- a/fs/btrfs/extent_io.c +++ b/fs/btrfs/extent_io.c @@ -497,12 +497,8 @@ static void end_page_read(struct page *page, bool uptodate, u64 start, u32 len) ASSERT(page_offset(page) <= start && start + len <= page_offset(page) + PAGE_SIZE); - if (uptodate) { - if (!btrfs_verify_page(page, start)) { - btrfs_page_set_error(fs_info, page, start, len); - } else { - btrfs_page_set_uptodate(fs_info, page, start, len); - } + if (uptodate && btrfs_verify_page(page, start)) { + btrfs_page_set_uptodate(fs_info, page, start, len); } else { btrfs_page_clear_uptodate(fs_info, page, start, len); btrfs_page_set_error(fs_info, page, start, len); -- cgit v1.2.3 From 57201dddd6f820de9ff94472da9b9cda88973c8f Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Wed, 31 May 2023 08:04:53 +0200 Subject: btrfs: don't check PageError in btrfs_verify_page btrfs_verify_page is called from the readpage completion handler, which is only used to read pages, or parts of pages that aren't uptodate yet. The only case where PageError could be set on a page in btrfs is if we had a previous writeback error, but in that case we won't called readpage on it, as it has previously been marked uptodate. Reviewed-by: Josef Bacik Signed-off-by: Christoph Hellwig Reviewed-by: David Sterba Signed-off-by: David Sterba --- fs/btrfs/extent_io.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'fs/btrfs/extent_io.c') diff --git a/fs/btrfs/extent_io.c b/fs/btrfs/extent_io.c index a943a6622489..178e8230c28a 100644 --- a/fs/btrfs/extent_io.c +++ b/fs/btrfs/extent_io.c @@ -484,7 +484,7 @@ void extent_clear_unlock_delalloc(struct btrfs_inode *inode, u64 start, u64 end, static bool btrfs_verify_page(struct page *page, u64 start) { if (!fsverity_active(page->mapping->host) || - PageError(page) || PageUptodate(page) || + PageUptodate(page) || start >= i_size_read(page->mapping->host)) return true; return fsverity_verify_page(page); -- cgit v1.2.3 From 3e92499e3b004baffb479d61e191b41b604ece9a Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Wed, 31 May 2023 08:04:56 +0200 Subject: btrfs: don't check PageError in __extent_writepage __extent_writepage currenly sets PageError whenever any error happens, and the also checks for PageError to decide if to call error handling. This leads to very unclear responsibility for cleaning up on errors. In the VM and generic writeback helpers the basic idea is that once I/O is fired off all error handling responsibility is delegated to the end I/O handler. But if that end I/O handler sets the PageError bit, and the submitter checks it, the bit could in some cases leak into the submission context for fast enough I/O. Fix this by simply not checking PageError and just using the local ret variable to check for submission errors. This also fundamentally solves the long problem documented in a comment in __extent_writepage by never leaking the error bit into the submission context. Reviewed-by: Josef Bacik Signed-off-by: Christoph Hellwig Reviewed-by: David Sterba Signed-off-by: David Sterba --- fs/btrfs/extent_io.c | 33 +-------------------------------- 1 file changed, 1 insertion(+), 32 deletions(-) (limited to 'fs/btrfs/extent_io.c') diff --git a/fs/btrfs/extent_io.c b/fs/btrfs/extent_io.c index 178e8230c28a..33e5f0a31c21 100644 --- a/fs/btrfs/extent_io.c +++ b/fs/btrfs/extent_io.c @@ -1557,38 +1557,7 @@ done: set_page_writeback(page); end_page_writeback(page); } - /* - * Here we used to have a check for PageError() and then set @ret and - * call end_extent_writepage(). - * - * But in fact setting @ret here will cause different error paths - * between subpage and regular sectorsize. - * - * For regular page size, we never submit current page, but only add - * current page to current bio. - * The bio submission can only happen in next page. - * Thus if we hit the PageError() branch, @ret is already set to - * non-zero value and will not get updated for regular sectorsize. - * - * But for subpage case, it's possible we submit part of current page, - * thus can get PageError() set by submitted bio of the same page, - * while our @ret is still 0. - * - * So here we unify the behavior and don't set @ret. - * Error can still be properly passed to higher layer as page will - * be set error, here we just don't handle the IO failure. - * - * NOTE: This is just a hotfix for subpage. - * The root fix will be properly ending ordered extent when we hit - * an error during writeback. - * - * But that needs a bigger refactoring, as we not only need to grab the - * submitted OE, but also need to know exactly at which bytenr we hit - * the error. - * Currently the full page based __extent_writepage_io() is not - * capable of that. - */ - if (PageError(page)) + if (ret) end_extent_writepage(page, ret, page_start, page_end); if (bio_ctrl->extent_locked) { struct writeback_control *wbc = bio_ctrl->wbc; -- cgit v1.2.3 From 2b2553f12355577d8de3eaedfa25fce3368d652c Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Wed, 31 May 2023 08:04:57 +0200 Subject: btrfs: stop setting PageError in the data I/O path PageError is not used by the VFS/MM and deprecated because it uses up a page bit and has no coherent rules. Instead read errors are usually propagated by not setting or clearing the uptodate bit, and write errors are propagated through the address_space. Btrfs now only sets the flag and never clears it for data pages, so just remove all places setting it, and the subpage error bit. Note that the error propagation for superblock writes that work on the block device mapping still uses PageError for now, but that will be addressed in a separate series. Reviewed-by: Josef Bacik Signed-off-by: Christoph Hellwig Reviewed-by: David Sterba Signed-off-by: David Sterba --- fs/btrfs/compression.c | 2 -- fs/btrfs/extent_io.c | 24 +++++------------------- fs/btrfs/inode.c | 3 --- fs/btrfs/subpage.c | 35 ----------------------------------- fs/btrfs/subpage.h | 10 ++++------ 5 files changed, 9 insertions(+), 65 deletions(-) (limited to 'fs/btrfs/extent_io.c') diff --git a/fs/btrfs/compression.c b/fs/btrfs/compression.c index 04cd5de4f00f..bb2d1a4ceca1 100644 --- a/fs/btrfs/compression.c +++ b/fs/btrfs/compression.c @@ -211,8 +211,6 @@ static noinline void end_compressed_writeback(const struct compressed_bio *cb) for (i = 0; i < ret; i++) { struct folio *folio = fbatch.folios[i]; - if (errno) - folio_set_error(folio); btrfs_page_clamp_clear_writeback(fs_info, &folio->page, cb->start, cb->len); } diff --git a/fs/btrfs/extent_io.c b/fs/btrfs/extent_io.c index 33e5f0a31c21..b7f26a4bb663 100644 --- a/fs/btrfs/extent_io.c +++ b/fs/btrfs/extent_io.c @@ -223,8 +223,6 @@ static int process_one_page(struct btrfs_fs_info *fs_info, if (page_ops & PAGE_SET_ORDERED) btrfs_page_clamp_set_ordered(fs_info, page, start, len); - if (page_ops & PAGE_SET_ERROR) - btrfs_page_clamp_set_error(fs_info, page, start, len); if (page_ops & PAGE_START_WRITEBACK) { btrfs_page_clamp_clear_dirty(fs_info, page, start, len); btrfs_page_clamp_set_writeback(fs_info, page, start, len); @@ -497,12 +495,10 @@ static void end_page_read(struct page *page, bool uptodate, u64 start, u32 len) ASSERT(page_offset(page) <= start && start + len <= page_offset(page) + PAGE_SIZE); - if (uptodate && btrfs_verify_page(page, start)) { + if (uptodate && btrfs_verify_page(page, start)) btrfs_page_set_uptodate(fs_info, page, start, len); - } else { + else btrfs_page_clear_uptodate(fs_info, page, start, len); - btrfs_page_set_error(fs_info, page, start, len); - } if (!btrfs_is_subpage(fs_info, page)) unlock_page(page); @@ -530,7 +526,6 @@ void end_extent_writepage(struct page *page, int err, u64 start, u64 end) len = end + 1 - start; btrfs_page_clear_uptodate(fs_info, page, start, len); - btrfs_page_set_error(fs_info, page, start, len); ret = err < 0 ? err : -EIO; mapping_set_error(page->mapping, ret); } @@ -1059,7 +1054,6 @@ static int btrfs_do_readpage(struct page *page, struct extent_map **em_cached, ret = set_page_extent_mapped(page); if (ret < 0) { unlock_extent(tree, start, end, NULL); - btrfs_page_set_error(fs_info, page, start, PAGE_SIZE); unlock_page(page); return ret; } @@ -1263,11 +1257,9 @@ static noinline_for_stack int writepage_delalloc(struct btrfs_inode *inode, } ret = btrfs_run_delalloc_range(inode, page, delalloc_start, delalloc_end, &page_started, &nr_written, wbc); - if (ret) { - btrfs_page_set_error(inode->root->fs_info, page, - page_offset(page), PAGE_SIZE); + if (ret) return ret; - } + /* * delalloc_end is already one less than the total length, so * we don't subtract one from PAGE_SIZE @@ -1420,7 +1412,6 @@ static noinline_for_stack int __extent_writepage_io(struct btrfs_inode *inode, em = btrfs_get_extent(inode, NULL, 0, cur, end - cur + 1); if (IS_ERR(em)) { - btrfs_page_set_error(fs_info, page, cur, end - cur + 1); ret = PTR_ERR_OR_ZERO(em); goto out_error; } @@ -1519,9 +1510,6 @@ static int __extent_writepage(struct page *page, struct btrfs_bio_ctrl *bio_ctrl WARN_ON(!PageLocked(page)); - btrfs_page_clear_error(btrfs_sb(inode->i_sb), page, - page_offset(page), PAGE_SIZE); - pg_offset = offset_in_page(i_size); if (page->index > end_index || (page->index == end_index && !pg_offset)) { @@ -1534,10 +1522,8 @@ static int __extent_writepage(struct page *page, struct btrfs_bio_ctrl *bio_ctrl memzero_page(page, pg_offset, PAGE_SIZE - pg_offset); ret = set_page_extent_mapped(page); - if (ret < 0) { - SetPageError(page); + if (ret < 0) goto done; - } if (!bio_ctrl->extent_locked) { ret = writepage_delalloc(BTRFS_I(inode), page, bio_ctrl->wbc); diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c index 4633ccacefac..14e9aa91293e 100644 --- a/fs/btrfs/inode.c +++ b/fs/btrfs/inode.c @@ -1153,8 +1153,6 @@ static int submit_uncompressed_range(struct btrfs_inode *inode, const u64 page_start = page_offset(locked_page); const u64 page_end = page_start + PAGE_SIZE - 1; - btrfs_page_set_error(inode->root->fs_info, locked_page, - page_start, PAGE_SIZE); set_page_writeback(locked_page); end_page_writeback(locked_page); end_extent_writepage(locked_page, ret, page_start, page_end); @@ -2942,7 +2940,6 @@ out_page: mapping_set_error(page->mapping, ret); end_extent_writepage(page, ret, page_start, page_end); clear_page_dirty_for_io(page); - SetPageError(page); } btrfs_page_clear_checked(inode->root->fs_info, page, page_start, PAGE_SIZE); unlock_page(page); diff --git a/fs/btrfs/subpage.c b/fs/btrfs/subpage.c index 74bc43040531..1b999c6e4193 100644 --- a/fs/btrfs/subpage.c +++ b/fs/btrfs/subpage.c @@ -100,9 +100,6 @@ void btrfs_init_subpage_info(struct btrfs_subpage_info *subpage_info, u32 sector subpage_info->uptodate_offset = cur; cur += nr_bits; - subpage_info->error_offset = cur; - cur += nr_bits; - subpage_info->dirty_offset = cur; cur += nr_bits; @@ -416,35 +413,6 @@ void btrfs_subpage_clear_uptodate(const struct btrfs_fs_info *fs_info, spin_unlock_irqrestore(&subpage->lock, flags); } -void btrfs_subpage_set_error(const struct btrfs_fs_info *fs_info, - struct page *page, u64 start, u32 len) -{ - struct btrfs_subpage *subpage = (struct btrfs_subpage *)page->private; - unsigned int start_bit = subpage_calc_start_bit(fs_info, page, - error, start, len); - unsigned long flags; - - spin_lock_irqsave(&subpage->lock, flags); - bitmap_set(subpage->bitmaps, start_bit, len >> fs_info->sectorsize_bits); - SetPageError(page); - spin_unlock_irqrestore(&subpage->lock, flags); -} - -void btrfs_subpage_clear_error(const struct btrfs_fs_info *fs_info, - struct page *page, u64 start, u32 len) -{ - struct btrfs_subpage *subpage = (struct btrfs_subpage *)page->private; - unsigned int start_bit = subpage_calc_start_bit(fs_info, page, - error, start, len); - unsigned long flags; - - spin_lock_irqsave(&subpage->lock, flags); - bitmap_clear(subpage->bitmaps, start_bit, len >> fs_info->sectorsize_bits); - if (subpage_test_bitmap_all_zero(fs_info, subpage, error)) - ClearPageError(page); - spin_unlock_irqrestore(&subpage->lock, flags); -} - void btrfs_subpage_set_dirty(const struct btrfs_fs_info *fs_info, struct page *page, u64 start, u32 len) { @@ -606,7 +574,6 @@ bool btrfs_subpage_test_##name(const struct btrfs_fs_info *fs_info, \ return ret; \ } IMPLEMENT_BTRFS_SUBPAGE_TEST_OP(uptodate); -IMPLEMENT_BTRFS_SUBPAGE_TEST_OP(error); IMPLEMENT_BTRFS_SUBPAGE_TEST_OP(dirty); IMPLEMENT_BTRFS_SUBPAGE_TEST_OP(writeback); IMPLEMENT_BTRFS_SUBPAGE_TEST_OP(ordered); @@ -674,7 +641,6 @@ bool btrfs_page_clamp_test_##name(const struct btrfs_fs_info *fs_info, \ } IMPLEMENT_BTRFS_PAGE_OPS(uptodate, SetPageUptodate, ClearPageUptodate, PageUptodate); -IMPLEMENT_BTRFS_PAGE_OPS(error, SetPageError, ClearPageError, PageError); IMPLEMENT_BTRFS_PAGE_OPS(dirty, set_page_dirty, clear_page_dirty_for_io, PageDirty); IMPLEMENT_BTRFS_PAGE_OPS(writeback, set_page_writeback, end_page_writeback, @@ -769,7 +735,6 @@ void __cold btrfs_subpage_dump_bitmap(const struct btrfs_fs_info *fs_info, spin_lock_irqsave(&subpage->lock, flags); GET_SUBPAGE_BITMAP(subpage, subpage_info, uptodate, &uptodate_bitmap); - GET_SUBPAGE_BITMAP(subpage, subpage_info, error, &error_bitmap); GET_SUBPAGE_BITMAP(subpage, subpage_info, dirty, &dirty_bitmap); GET_SUBPAGE_BITMAP(subpage, subpage_info, writeback, &writeback_bitmap); GET_SUBPAGE_BITMAP(subpage, subpage_info, ordered, &ordered_bitmap); diff --git a/fs/btrfs/subpage.h b/fs/btrfs/subpage.h index 5905caea8409..5cbf67ccbdeb 100644 --- a/fs/btrfs/subpage.h +++ b/fs/btrfs/subpage.h @@ -8,17 +8,17 @@ /* * Extra info for subpapge bitmap. * - * For subpage we pack all uptodate/error/dirty/writeback/ordered bitmaps into + * For subpage we pack all uptodate/dirty/writeback/ordered bitmaps into * one larger bitmap. * * This structure records how they are organized in the bitmap: * - * /- uptodate_offset /- error_offset /- dirty_offset + * /- uptodate_offset /- dirty_offset /- ordered_offset * | | | * v v v - * |u|u|u|u|........|u|u|e|e|.......|e|e| ... |o|o| + * |u|u|u|u|........|u|u|d|d|.......|d|d|o|o|.......|o|o| * |<- bitmap_nr_bits ->| - * |<--------------- total_nr_bits ---------------->| + * |<----------------- total_nr_bits ------------------>| */ struct btrfs_subpage_info { /* Number of bits for each bitmap */ @@ -32,7 +32,6 @@ struct btrfs_subpage_info { * @bitmap_size, which is calculated from PAGE_SIZE / sectorsize. */ unsigned int uptodate_offset; - unsigned int error_offset; unsigned int dirty_offset; unsigned int writeback_offset; unsigned int ordered_offset; @@ -141,7 +140,6 @@ bool btrfs_page_clamp_test_##name(const struct btrfs_fs_info *fs_info, \ struct page *page, u64 start, u32 len); DECLARE_BTRFS_SUBPAGE_OPS(uptodate); -DECLARE_BTRFS_SUBPAGE_OPS(error); DECLARE_BTRFS_SUBPAGE_OPS(dirty); DECLARE_BTRFS_SUBPAGE_OPS(writeback); DECLARE_BTRFS_SUBPAGE_OPS(ordered); -- cgit v1.2.3 From a994310aa26fcb6bd7529714ea713b6792630f67 Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Wed, 31 May 2023 08:04:58 +0200 Subject: btrfs: remove PAGE_SET_ERROR Now that the btrfs writeback code has stopped using PageError, using PAGE_SET_ERROR to just set the per-address_space error flag is confusing. Open code the mapping_set_error calls in the callers and remove the PAGE_SET_ERROR flag. Reviewed-by: Josef Bacik Signed-off-by: Christoph Hellwig Reviewed-by: David Sterba Signed-off-by: David Sterba --- fs/btrfs/extent_io.c | 3 --- fs/btrfs/extent_io.h | 1 - fs/btrfs/inode.c | 11 ++++++----- 3 files changed, 6 insertions(+), 9 deletions(-) (limited to 'fs/btrfs/extent_io.c') diff --git a/fs/btrfs/extent_io.c b/fs/btrfs/extent_io.c index b7f26a4bb663..09a9973c27cc 100644 --- a/fs/btrfs/extent_io.c +++ b/fs/btrfs/extent_io.c @@ -268,9 +268,6 @@ static int __process_pages_contig(struct address_space *mapping, ASSERT(processed_end && *processed_end == start); } - if ((page_ops & PAGE_SET_ERROR) && start_index <= end_index) - mapping_set_error(mapping, -EIO); - folio_batch_init(&fbatch); while (index <= end_index) { int found_folios; diff --git a/fs/btrfs/extent_io.h b/fs/btrfs/extent_io.h index 2d91ca91dca5..6723bf3483d9 100644 --- a/fs/btrfs/extent_io.h +++ b/fs/btrfs/extent_io.h @@ -40,7 +40,6 @@ enum { ENUM_BIT(PAGE_START_WRITEBACK), ENUM_BIT(PAGE_END_WRITEBACK), ENUM_BIT(PAGE_SET_ORDERED), - ENUM_BIT(PAGE_SET_ERROR), ENUM_BIT(PAGE_LOCK), }; diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c index 14e9aa91293e..a40a6002a198 100644 --- a/fs/btrfs/inode.c +++ b/fs/btrfs/inode.c @@ -835,6 +835,7 @@ static noinline int compress_file_range(struct async_chunk *async_chunk) { struct btrfs_inode *inode = async_chunk->inode; struct btrfs_fs_info *fs_info = inode->root->fs_info; + struct address_space *mapping = inode->vfs_inode.i_mapping; u64 blocksize = fs_info->sectorsize; u64 start = async_chunk->start; u64 end = async_chunk->end; @@ -949,7 +950,7 @@ again: /* Compression level is applied here and only here */ ret = btrfs_compress_pages( compress_type | (fs_info->compress_level << 4), - inode->vfs_inode.i_mapping, start, + mapping, start, pages, &nr_pages, &total_in, @@ -992,9 +993,9 @@ cont: unsigned long clear_flags = EXTENT_DELALLOC | EXTENT_DELALLOC_NEW | EXTENT_DEFRAG | EXTENT_DO_ACCOUNTING; - unsigned long page_error_op; - page_error_op = ret < 0 ? PAGE_SET_ERROR : 0; + if (ret < 0) + mapping_set_error(mapping, -EIO); /* * inline extent creation worked or returned error, @@ -1011,7 +1012,6 @@ cont: clear_flags, PAGE_UNLOCK | PAGE_START_WRITEBACK | - page_error_op | PAGE_END_WRITEBACK); /* @@ -1271,12 +1271,13 @@ out_free_reserve: btrfs_dec_block_group_reservations(fs_info, ins.objectid); btrfs_free_reserved_extent(fs_info, ins.objectid, ins.offset, 1); out_free: + mapping_set_error(inode->vfs_inode.i_mapping, -EIO); extent_clear_unlock_delalloc(inode, start, end, NULL, EXTENT_LOCKED | EXTENT_DELALLOC | EXTENT_DELALLOC_NEW | EXTENT_DEFRAG | EXTENT_DO_ACCOUNTING, PAGE_UNLOCK | PAGE_START_WRITEBACK | - PAGE_END_WRITEBACK | PAGE_SET_ERROR); + PAGE_END_WRITEBACK); free_async_extent_pages(async_extent); goto done; } -- cgit v1.2.3 From f22b5dcbd71edea087946511554956591557de9a Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Wed, 31 May 2023 08:04:59 +0200 Subject: btrfs: remove non-standard extent handling in __extent_writepage_io __extent_writepage_io is never called for compressed or inline extents, or holes. Remove the not quite working code for them and replace it with asserts that these cases don't happen. Reviewed-by: Josef Bacik Signed-off-by: Christoph Hellwig Reviewed-by: David Sterba Signed-off-by: David Sterba --- fs/btrfs/extent_io.c | 23 +++++------------------ 1 file changed, 5 insertions(+), 18 deletions(-) (limited to 'fs/btrfs/extent_io.c') diff --git a/fs/btrfs/extent_io.c b/fs/btrfs/extent_io.c index 09a9973c27cc..a2e1dbd9b923 100644 --- a/fs/btrfs/extent_io.c +++ b/fs/btrfs/extent_io.c @@ -1361,7 +1361,6 @@ static noinline_for_stack int __extent_writepage_io(struct btrfs_inode *inode, struct extent_map *em; int ret = 0; int nr = 0; - bool compressed; ret = btrfs_writepage_cow_fixup(page); if (ret) { @@ -1419,10 +1418,14 @@ static noinline_for_stack int __extent_writepage_io(struct btrfs_inode *inode, ASSERT(cur < end); ASSERT(IS_ALIGNED(em->start, fs_info->sectorsize)); ASSERT(IS_ALIGNED(em->len, fs_info->sectorsize)); + block_start = em->block_start; - compressed = test_bit(EXTENT_FLAG_COMPRESSED, &em->flags); disk_bytenr = em->block_start + extent_offset; + ASSERT(!test_bit(EXTENT_FLAG_COMPRESSED, &em->flags)); + ASSERT(block_start != EXTENT_MAP_HOLE); + ASSERT(block_start != EXTENT_MAP_INLINE); + /* * Note that em_end from extent_map_end() and dirty_range_end from * find_next_dirty_byte() are all exclusive @@ -1431,22 +1434,6 @@ static noinline_for_stack int __extent_writepage_io(struct btrfs_inode *inode, free_extent_map(em); em = NULL; - /* - * compressed and inline extents are written through other - * paths in the FS - */ - if (compressed || block_start == EXTENT_MAP_HOLE || - block_start == EXTENT_MAP_INLINE) { - if (compressed) - nr++; - else - btrfs_writepage_endio_finish_ordered(inode, - page, cur, cur + iosize - 1, true); - btrfs_page_clear_dirty(fs_info, page, cur, iosize); - cur += iosize; - continue; - } - btrfs_set_range_writeback(inode, cur, cur + iosize - 1); if (!PageWriteback(page)) { btrfs_err(inode->root->fs_info, -- cgit v1.2.3 From 9ecdbee819ca90589bf2efccfb90d5dabc0de057 Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Wed, 31 May 2023 08:05:00 +0200 Subject: btrfs: move writeback_control::nr_to_write update to __extent_writepage Move the nr_to_write accounting from __extent_writepage_io to __extent_writepage_io as we'll grow another __extent_writepage_io that doesn't want this accounting soon. Also drop the obsolete comment - decrementing a counter in the on-stack writeback_control data structure doesn't need the page lock. Reviewed-by: Anand Jain Reviewed-by: Josef Bacik Signed-off-by: Christoph Hellwig Reviewed-by: David Sterba Signed-off-by: David Sterba --- fs/btrfs/extent_io.c | 8 ++------ 1 file changed, 2 insertions(+), 6 deletions(-) (limited to 'fs/btrfs/extent_io.c') diff --git a/fs/btrfs/extent_io.c b/fs/btrfs/extent_io.c index a2e1dbd9b923..f773ee12ce1c 100644 --- a/fs/btrfs/extent_io.c +++ b/fs/btrfs/extent_io.c @@ -1370,12 +1370,6 @@ static noinline_for_stack int __extent_writepage_io(struct btrfs_inode *inode, return 1; } - /* - * we don't want to touch the inode after unlocking the page, - * so we update the mapping writeback index now - */ - bio_ctrl->wbc->nr_to_write--; - bio_ctrl->end_io_func = end_bio_extent_writepage; while (cur <= end) { u64 disk_bytenr; @@ -1521,6 +1515,8 @@ static int __extent_writepage(struct page *page, struct btrfs_bio_ctrl *bio_ctrl if (ret == 1) return 0; + bio_ctrl->wbc->nr_to_write--; + done: if (nr == 0) { /* make sure the mapping tag for page dirty gets cleared */ -- cgit v1.2.3 From eb34dceace983e304e00d4bf711cec0a603959ac Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Wed, 31 May 2023 08:05:01 +0200 Subject: btrfs: only call __extent_writepage_io from extent_write_locked_range __extent_writepage does a lot of things that make no sense for extent_write_locked_range, given that extent_write_locked_range itself is called from __extent_writepage either directly or through a workqueue, and all this work has already been done in the first invocation and the pages haven't been unlocked since. Call __extent_writepage_io directly instead and open code the logic tracked in btrfs_bio_ctrl::extent_locked. Reviewed-by: Josef Bacik Signed-off-by: Christoph Hellwig Reviewed-by: David Sterba Signed-off-by: David Sterba --- fs/btrfs/extent_io.c | 65 +++++++++++++++++++++------------------------------- 1 file changed, 26 insertions(+), 39 deletions(-) (limited to 'fs/btrfs/extent_io.c') diff --git a/fs/btrfs/extent_io.c b/fs/btrfs/extent_io.c index f773ee12ce1c..1da247e753b0 100644 --- a/fs/btrfs/extent_io.c +++ b/fs/btrfs/extent_io.c @@ -103,12 +103,6 @@ struct btrfs_bio_ctrl { blk_opf_t opf; btrfs_bio_end_io_t end_io_func; struct writeback_control *wbc; - - /* - * Tell writepage not to lock the state bits for this range, it still - * does the unlocking. - */ - bool extent_locked; }; static void submit_one_bio(struct btrfs_bio_ctrl *bio_ctrl) @@ -1475,7 +1469,6 @@ static int __extent_writepage(struct page *page, struct btrfs_bio_ctrl *bio_ctrl { struct folio *folio = page_folio(page); struct inode *inode = page->mapping->host; - struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb); const u64 page_start = page_offset(page); const u64 page_end = page_start + PAGE_SIZE - 1; int ret; @@ -1503,13 +1496,11 @@ static int __extent_writepage(struct page *page, struct btrfs_bio_ctrl *bio_ctrl if (ret < 0) goto done; - if (!bio_ctrl->extent_locked) { - ret = writepage_delalloc(BTRFS_I(inode), page, bio_ctrl->wbc); - if (ret == 1) - return 0; - if (ret) - goto done; - } + ret = writepage_delalloc(BTRFS_I(inode), page, bio_ctrl->wbc); + if (ret == 1) + return 0; + if (ret) + goto done; ret = __extent_writepage_io(BTRFS_I(inode), page, bio_ctrl, i_size, &nr); if (ret == 1) @@ -1525,21 +1516,7 @@ done: } if (ret) end_extent_writepage(page, ret, page_start, page_end); - if (bio_ctrl->extent_locked) { - struct writeback_control *wbc = bio_ctrl->wbc; - - /* - * If bio_ctrl->extent_locked, it's from extent_write_locked_range(), - * the page can either be locked by lock_page() or - * process_one_page(). - * Let btrfs_page_unlock_writer() handle both cases. - */ - ASSERT(wbc); - btrfs_page_unlock_writer(fs_info, page, wbc->range_start, - wbc->range_end + 1 - wbc->range_start); - } else { - unlock_page(page); - } + unlock_page(page); ASSERT(ret <= 0); return ret; } @@ -2239,10 +2216,10 @@ int extent_write_locked_range(struct inode *inode, u64 start, u64 end) int first_error = 0; int ret = 0; struct address_space *mapping = inode->i_mapping; - struct page *page; + struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb); + const u32 sectorsize = fs_info->sectorsize; + loff_t i_size = i_size_read(inode); u64 cur = start; - unsigned long nr_pages; - const u32 sectorsize = btrfs_sb(inode->i_sb)->sectorsize; struct writeback_control wbc_writepages = { .sync_mode = WB_SYNC_ALL, .range_start = start, @@ -2254,17 +2231,15 @@ int extent_write_locked_range(struct inode *inode, u64 start, u64 end) /* We're called from an async helper function */ .opf = REQ_OP_WRITE | REQ_BTRFS_CGROUP_PUNT | wbc_to_write_flags(&wbc_writepages), - .extent_locked = 1, }; ASSERT(IS_ALIGNED(start, sectorsize) && IS_ALIGNED(end + 1, sectorsize)); - nr_pages = (round_up(end, PAGE_SIZE) - round_down(start, PAGE_SIZE)) >> - PAGE_SHIFT; - wbc_writepages.nr_to_write = nr_pages * 2; wbc_attach_fdatawrite_inode(&wbc_writepages, inode); while (cur <= end) { u64 cur_end = min(round_down(cur, PAGE_SIZE) + PAGE_SIZE - 1, end); + struct page *page; + int nr = 0; page = find_get_page(mapping, cur >> PAGE_SHIFT); /* @@ -2275,12 +2250,25 @@ int extent_write_locked_range(struct inode *inode, u64 start, u64 end) ASSERT(PageLocked(page)); ASSERT(PageDirty(page)); clear_page_dirty_for_io(page); - ret = __extent_writepage(page, &bio_ctrl); - ASSERT(ret <= 0); + + ret = __extent_writepage_io(BTRFS_I(inode), page, &bio_ctrl, + i_size, &nr); + if (ret == 1) + goto next_page; + + /* Make sure the mapping tag for page dirty gets cleared. */ + if (nr == 0) { + set_page_writeback(page); + end_page_writeback(page); + } + if (ret) + end_extent_writepage(page, ret, cur, cur_end); + btrfs_page_unlock_writer(fs_info, page, cur, cur_end + 1 - cur); if (ret < 0) { found_error = true; first_error = ret; } +next_page: put_page(page); cur = cur_end + 1; } @@ -2301,7 +2289,6 @@ int extent_writepages(struct address_space *mapping, struct btrfs_bio_ctrl bio_ctrl = { .wbc = wbc, .opf = REQ_OP_WRITE | wbc_to_write_flags(wbc), - .extent_locked = 0, }; /* -- cgit v1.2.3 From 7027f87108ce3d23ea542e1a9a79056530db4a87 Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Wed, 31 May 2023 08:05:02 +0200 Subject: btrfs: don't treat zoned writeback as being from an async helper thread When extent_write_locked_range was originally added, it was only used writing back compressed pages from an async helper thread. But it is now also used for writing back pages on zoned devices, where it is called directly from the ->writepage context. In this case we want to be able to pass on the writeback_control instead of creating a new one, and more importantly want to use all the normal cgroup interaction instead of potentially deferring writeback to another helper. Fixes: 898793d992c2 ("btrfs: zoned: write out partially allocated region") Reviewed-by: Josef Bacik Signed-off-by: Christoph Hellwig Reviewed-by: David Sterba Signed-off-by: David Sterba --- fs/btrfs/extent_io.c | 20 +++++++------------- fs/btrfs/extent_io.h | 3 ++- fs/btrfs/inode.c | 20 +++++++++++++++----- 3 files changed, 24 insertions(+), 19 deletions(-) (limited to 'fs/btrfs/extent_io.c') diff --git a/fs/btrfs/extent_io.c b/fs/btrfs/extent_io.c index 1da247e753b0..f4d3c56b2900 100644 --- a/fs/btrfs/extent_io.c +++ b/fs/btrfs/extent_io.c @@ -2210,7 +2210,8 @@ retry: * already been ran (aka, ordered extent inserted) and all pages are still * locked. */ -int extent_write_locked_range(struct inode *inode, u64 start, u64 end) +int extent_write_locked_range(struct inode *inode, u64 start, u64 end, + struct writeback_control *wbc) { bool found_error = false; int first_error = 0; @@ -2220,22 +2221,16 @@ int extent_write_locked_range(struct inode *inode, u64 start, u64 end) const u32 sectorsize = fs_info->sectorsize; loff_t i_size = i_size_read(inode); u64 cur = start; - struct writeback_control wbc_writepages = { - .sync_mode = WB_SYNC_ALL, - .range_start = start, - .range_end = end, - .no_cgroup_owner = 1, - }; struct btrfs_bio_ctrl bio_ctrl = { - .wbc = &wbc_writepages, - /* We're called from an async helper function */ - .opf = REQ_OP_WRITE | REQ_BTRFS_CGROUP_PUNT | - wbc_to_write_flags(&wbc_writepages), + .wbc = wbc, + .opf = REQ_OP_WRITE | wbc_to_write_flags(wbc), }; + if (wbc->no_cgroup_owner) + bio_ctrl.opf |= REQ_BTRFS_CGROUP_PUNT; + ASSERT(IS_ALIGNED(start, sectorsize) && IS_ALIGNED(end + 1, sectorsize)); - wbc_attach_fdatawrite_inode(&wbc_writepages, inode); while (cur <= end) { u64 cur_end = min(round_down(cur, PAGE_SIZE) + PAGE_SIZE - 1, end); struct page *page; @@ -2275,7 +2270,6 @@ next_page: submit_write_bio(&bio_ctrl, found_error ? ret : 0); - wbc_detach_inode(&wbc_writepages); if (found_error) return first_error; return ret; diff --git a/fs/btrfs/extent_io.h b/fs/btrfs/extent_io.h index 6723bf3483d9..c5fae3a7d911 100644 --- a/fs/btrfs/extent_io.h +++ b/fs/btrfs/extent_io.h @@ -178,7 +178,8 @@ int try_release_extent_mapping(struct page *page, gfp_t mask); int try_release_extent_buffer(struct page *page); int btrfs_read_folio(struct file *file, struct folio *folio); -int extent_write_locked_range(struct inode *inode, u64 start, u64 end); +int extent_write_locked_range(struct inode *inode, u64 start, u64 end, + struct writeback_control *wbc); int extent_writepages(struct address_space *mapping, struct writeback_control *wbc); int btree_write_cache_pages(struct address_space *mapping, diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c index a40a6002a198..600086b8195d 100644 --- a/fs/btrfs/inode.c +++ b/fs/btrfs/inode.c @@ -1133,6 +1133,12 @@ static int submit_uncompressed_range(struct btrfs_inode *inode, unsigned long nr_written = 0; int page_started = 0; int ret; + struct writeback_control wbc = { + .sync_mode = WB_SYNC_ALL, + .range_start = start, + .range_end = end, + .no_cgroup_owner = 1, + }; /* * Call cow_file_range() to run the delalloc range directly, since we @@ -1162,7 +1168,10 @@ static int submit_uncompressed_range(struct btrfs_inode *inode, } /* All pages will be unlocked, including @locked_page */ - return extent_write_locked_range(&inode->vfs_inode, start, end); + wbc_attach_fdatawrite_inode(&wbc, &inode->vfs_inode); + ret = extent_write_locked_range(&inode->vfs_inode, start, end, &wbc); + wbc_detach_inode(&wbc); + return ret; } static int submit_one_async_extent(struct btrfs_inode *inode, @@ -1815,7 +1824,8 @@ static bool run_delalloc_compressed(struct btrfs_inode *inode, static noinline int run_delalloc_zoned(struct btrfs_inode *inode, struct page *locked_page, u64 start, u64 end, int *page_started, - unsigned long *nr_written) + unsigned long *nr_written, + struct writeback_control *wbc) { u64 done_offset = end; int ret; @@ -1847,8 +1857,8 @@ static noinline int run_delalloc_zoned(struct btrfs_inode *inode, account_page_redirty(locked_page); } locked_page_done = true; - extent_write_locked_range(&inode->vfs_inode, start, done_offset); - + extent_write_locked_range(&inode->vfs_inode, start, done_offset, + wbc); start = done_offset + 1; } @@ -2422,7 +2432,7 @@ int btrfs_run_delalloc_range(struct btrfs_inode *inode, struct page *locked_page if (zoned) ret = run_delalloc_zoned(inode, locked_page, start, end, - page_started, nr_written); + page_started, nr_written, wbc); else ret = cow_file_range(inode, locked_page, start, end, page_started, nr_written, 1, NULL); -- cgit v1.2.3 From a39da514eba81e687db05efb1e8b7cb393e2cb71 Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Wed, 31 May 2023 09:53:55 +0200 Subject: btrfs: limit write bios to a single ordered extent Currently buffered writeback bios are allowed to span multiple ordered_extents, although that basically never actually happens since commit 4a445b7b6178 ("btrfs: don't merge pages into bio if their page offset is not contiguous"). Supporting bios than span ordered_extents complicates the file checksumming code, and prevents us from adding an ordered_extent pointer to the btrfs_bio structure. Use the existing code to limit a bio to single ordered_extent for zoned device writes for all writes. This allows to remove the REQ_BTRFS_ONE_ORDERED flags, and the handling of multiple ordered_extents in btrfs_csum_one_bio. Reviewed-by: Johannes Thumshirn Reviewed-by: Josef Bacik Signed-off-by: Christoph Hellwig Reviewed-by: David Sterba Signed-off-by: David Sterba --- fs/btrfs/bio.c | 3 --- fs/btrfs/bio.h | 3 --- fs/btrfs/compression.c | 2 -- fs/btrfs/extent_io.c | 11 ++--------- fs/btrfs/file-item.c | 38 -------------------------------------- 5 files changed, 2 insertions(+), 55 deletions(-) (limited to 'fs/btrfs/extent_io.c') diff --git a/fs/btrfs/bio.c b/fs/btrfs/bio.c index 846deed98fd0..4a75efd588df 100644 --- a/fs/btrfs/bio.c +++ b/fs/btrfs/bio.c @@ -457,9 +457,6 @@ static void btrfs_submit_mirrored_bio(struct btrfs_io_context *bioc, int dev_nr) static void __btrfs_submit_bio(struct bio *bio, struct btrfs_io_context *bioc, struct btrfs_io_stripe *smap, int mirror_num) { - /* Do not leak our private flag into the block layer. */ - bio->bi_opf &= ~REQ_BTRFS_ONE_ORDERED; - if (!bioc) { /* Single mirror read/write fast path. */ btrfs_bio(bio)->mirror_num = mirror_num; diff --git a/fs/btrfs/bio.h b/fs/btrfs/bio.h index 8a29980159b4..52e7962103ff 100644 --- a/fs/btrfs/bio.h +++ b/fs/btrfs/bio.h @@ -102,9 +102,6 @@ static inline void btrfs_bio_end_io(struct btrfs_bio *bbio, blk_status_t status) bbio->end_io(bbio); } -/* Bio only refers to one ordered extent. */ -#define REQ_BTRFS_ONE_ORDERED REQ_DRV - /* Submit using blkcg_punt_bio_submit. */ #define REQ_BTRFS_CGROUP_PUNT REQ_FS_PRIVATE diff --git a/fs/btrfs/compression.c b/fs/btrfs/compression.c index bb2d1a4ceca1..617b9572f13e 100644 --- a/fs/btrfs/compression.c +++ b/fs/btrfs/compression.c @@ -293,8 +293,6 @@ void btrfs_submit_compressed_write(struct btrfs_inode *inode, u64 start, ASSERT(IS_ALIGNED(start, fs_info->sectorsize) && IS_ALIGNED(len, fs_info->sectorsize)); - write_flags |= REQ_BTRFS_ONE_ORDERED; - cb = alloc_compressed_bio(inode, start, REQ_OP_WRITE | write_flags, end_compressed_bio_write); cb->start = start; diff --git a/fs/btrfs/extent_io.c b/fs/btrfs/extent_io.c index f4d3c56b2900..35141a223a3e 100644 --- a/fs/btrfs/extent_io.c +++ b/fs/btrfs/extent_io.c @@ -826,13 +826,8 @@ static void alloc_new_bio(struct btrfs_inode *inode, bio_ctrl->bbio = bbio; bio_ctrl->len_to_oe_boundary = U32_MAX; - /* - * Limit the extent to the ordered boundary for Zone Append. - * Compressed bios aren't submitted directly, so it doesn't apply to - * them. - */ - if (bio_ctrl->compress_type == BTRFS_COMPRESS_NONE && - btrfs_use_zone_append(bbio)) { + /* Limit data write bios to the ordered boundary. */ + if (bio_ctrl->wbc) { struct btrfs_ordered_extent *ordered; ordered = btrfs_lookup_ordered_extent(inode, file_offset); @@ -842,9 +837,7 @@ static void alloc_new_bio(struct btrfs_inode *inode, ordered->disk_num_bytes - file_offset); btrfs_put_ordered_extent(ordered); } - } - if (bio_ctrl->wbc) { /* * Pick the last added device to support cgroup writeback. For * multi-device file systems this means blk-cgroup policies have diff --git a/fs/btrfs/file-item.c b/fs/btrfs/file-item.c index 0cb4a9921d21..782bbf081c26 100644 --- a/fs/btrfs/file-item.c +++ b/fs/btrfs/file-item.c @@ -733,8 +733,6 @@ blk_status_t btrfs_csum_one_bio(struct btrfs_bio *bbio) struct bio_vec bvec; int index; unsigned int blockcount; - unsigned long total_bytes = 0; - unsigned long this_sum_bytes = 0; int i; unsigned nofs_flag; @@ -776,34 +774,6 @@ blk_status_t btrfs_csum_one_bio(struct btrfs_bio *bbio) - 1); for (i = 0; i < blockcount; i++) { - if (!(bio->bi_opf & REQ_BTRFS_ONE_ORDERED) && - !in_range(offset, ordered->file_offset, - ordered->num_bytes)) { - unsigned long bytes_left; - - sums->len = this_sum_bytes; - this_sum_bytes = 0; - btrfs_add_ordered_sum(ordered, sums); - btrfs_put_ordered_extent(ordered); - - bytes_left = bio->bi_iter.bi_size - total_bytes; - - nofs_flag = memalloc_nofs_save(); - sums = kvzalloc(btrfs_ordered_sum_size(fs_info, - bytes_left), GFP_KERNEL); - memalloc_nofs_restore(nofs_flag); - if (!sums) - return BLK_STS_RESOURCE; - - sums->len = bytes_left; - ordered = btrfs_lookup_ordered_extent(inode, - offset); - ASSERT(ordered); /* Logic error */ - sums->logical = (bio->bi_iter.bi_sector << SECTOR_SHIFT) - + total_bytes; - index = 0; - } - data = bvec_kmap_local(&bvec); crypto_shash_digest(shash, data + (i * fs_info->sectorsize), @@ -812,18 +782,10 @@ blk_status_t btrfs_csum_one_bio(struct btrfs_bio *bbio) kunmap_local(data); index += fs_info->csum_size; offset += fs_info->sectorsize; - this_sum_bytes += fs_info->sectorsize; - total_bytes += fs_info->sectorsize; } } - this_sum_bytes = 0; - /* - * The ->sums assignment is for zoned writes, where a bio never spans - * ordered extents and is only done unconditionally because that's cheaper - * than a branch. - */ bbio->sums = sums; btrfs_add_ordered_sum(ordered, sums); btrfs_put_ordered_extent(ordered); -- cgit v1.2.3 From ec63b84d4611b2f07e9242080f3e8de4a011820b Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Wed, 31 May 2023 09:54:02 +0200 Subject: btrfs: add an ordered_extent pointer to struct btrfs_bio Add a pointer to the ordered_extent to the existing union in struct btrfs_bio, so all code dealing with data write bios can just use a pointer dereference to retrieve the ordered_extent instead of doing multiple rbtree lookups per I/O. The reference to this ordered_extent is dropped at end I/O time, which implies that an extra one must be acquired when the bio is split. This also requires moving the btrfs_extract_ordered_extent call into btrfs_split_bio so that the invariant of always having a valid ordered_extent reference for the btrfs_bio is kept. Reviewed-by: Johannes Thumshirn Reviewed-by: Josef Bacik Signed-off-by: Christoph Hellwig Reviewed-by: David Sterba Signed-off-by: David Sterba --- fs/btrfs/bio.c | 42 ++++++++++++++++++++++++++++++++++++++---- fs/btrfs/bio.h | 9 +++------ fs/btrfs/compression.c | 2 +- fs/btrfs/extent_io.c | 2 +- fs/btrfs/file-item.c | 9 +-------- fs/btrfs/inode.c | 8 +++++--- 6 files changed, 49 insertions(+), 23 deletions(-) (limited to 'fs/btrfs/extent_io.c') diff --git a/fs/btrfs/bio.c b/fs/btrfs/bio.c index 41a152e87429..12b12443efaa 100644 --- a/fs/btrfs/bio.c +++ b/fs/btrfs/bio.c @@ -33,6 +33,11 @@ static inline bool is_data_bbio(struct btrfs_bio *bbio) return bbio->inode && is_data_inode(&bbio->inode->vfs_inode); } +static bool bbio_has_ordered_extent(struct btrfs_bio *bbio) +{ + return is_data_bbio(bbio) && btrfs_op(&bbio->bio) == BTRFS_MAP_WRITE; +} + /* * Initialize a btrfs_bio structure. This skips the embedded bio itself as it * is already initialized by the block layer. @@ -88,11 +93,40 @@ static struct btrfs_bio *btrfs_split_bio(struct btrfs_fs_info *fs_info, bbio->inode = orig_bbio->inode; bbio->file_offset = orig_bbio->file_offset; orig_bbio->file_offset += map_length; - + if (bbio_has_ordered_extent(bbio)) { + refcount_inc(&orig_bbio->ordered->refs); + bbio->ordered = orig_bbio->ordered; + } atomic_inc(&orig_bbio->pending_ios); return bbio; } +/* Free a bio that was never submitted to the underlying device. */ +static void btrfs_cleanup_bio(struct btrfs_bio *bbio) +{ + if (bbio_has_ordered_extent(bbio)) + btrfs_put_ordered_extent(bbio->ordered); + bio_put(&bbio->bio); +} + +static void __btrfs_bio_end_io(struct btrfs_bio *bbio) +{ + if (bbio_has_ordered_extent(bbio)) { + struct btrfs_ordered_extent *ordered = bbio->ordered; + + bbio->end_io(bbio); + btrfs_put_ordered_extent(ordered); + } else { + bbio->end_io(bbio); + } +} + +void btrfs_bio_end_io(struct btrfs_bio *bbio, blk_status_t status) +{ + bbio->bio.bi_status = status; + __btrfs_bio_end_io(bbio); +} + static void btrfs_orig_write_end_io(struct bio *bio); static void btrfs_bbio_propagate_error(struct btrfs_bio *bbio, @@ -121,12 +155,12 @@ static void btrfs_orig_bbio_end_io(struct btrfs_bio *bbio) if (bbio->bio.bi_status) btrfs_bbio_propagate_error(bbio, orig_bbio); - bio_put(&bbio->bio); + btrfs_cleanup_bio(bbio); bbio = orig_bbio; } if (atomic_dec_and_test(&bbio->pending_ios)) - bbio->end_io(bbio); + __btrfs_bio_end_io(bbio); } static int next_repair_mirror(struct btrfs_failed_bio *fbio, int cur_mirror) @@ -684,7 +718,7 @@ done: fail_put_bio: if (map_length < length) - bio_put(bio); + btrfs_cleanup_bio(bbio); fail: btrfs_bio_counter_dec(fs_info); btrfs_bio_end_io(orig_bbio, ret); diff --git a/fs/btrfs/bio.h b/fs/btrfs/bio.h index 52e7962103ff..ca79decee060 100644 --- a/fs/btrfs/bio.h +++ b/fs/btrfs/bio.h @@ -50,11 +50,13 @@ struct btrfs_bio { /* * For data writes: + * - ordered extent covering the bio * - pointer to the checksums for this bio * - original physical address from the allocator * (for zone append only) */ struct { + struct btrfs_ordered_extent *ordered; struct btrfs_ordered_sum *sums; u64 orig_physical; }; @@ -95,12 +97,7 @@ void btrfs_bio_init(struct btrfs_bio *bbio, struct btrfs_fs_info *fs_info, struct btrfs_bio *btrfs_bio_alloc(unsigned int nr_vecs, blk_opf_t opf, struct btrfs_fs_info *fs_info, btrfs_bio_end_io_t end_io, void *private); - -static inline void btrfs_bio_end_io(struct btrfs_bio *bbio, blk_status_t status) -{ - bbio->bio.bi_status = status; - bbio->end_io(bbio); -} +void btrfs_bio_end_io(struct btrfs_bio *bbio, blk_status_t status); /* Submit using blkcg_punt_bio_submit. */ #define REQ_BTRFS_CGROUP_PUNT REQ_FS_PRIVATE diff --git a/fs/btrfs/compression.c b/fs/btrfs/compression.c index aafc846bcd4f..ce4be0f21c5c 100644 --- a/fs/btrfs/compression.c +++ b/fs/btrfs/compression.c @@ -303,10 +303,10 @@ void btrfs_submit_compressed_write(struct btrfs_ordered_extent *ordered, INIT_WORK(&cb->write_end_work, btrfs_finish_compressed_write_work); cb->nr_pages = nr_pages; cb->bbio.bio.bi_iter.bi_sector = ordered->disk_bytenr >> SECTOR_SHIFT; + cb->bbio.ordered = ordered; btrfs_add_compressed_bio_pages(cb); btrfs_submit_bio(&cb->bbio, 0); - btrfs_put_ordered_extent(ordered); } /* diff --git a/fs/btrfs/extent_io.c b/fs/btrfs/extent_io.c index 35141a223a3e..ad536eaf02b0 100644 --- a/fs/btrfs/extent_io.c +++ b/fs/btrfs/extent_io.c @@ -835,7 +835,7 @@ static void alloc_new_bio(struct btrfs_inode *inode, bio_ctrl->len_to_oe_boundary = min_t(u32, U32_MAX, ordered->file_offset + ordered->disk_num_bytes - file_offset); - btrfs_put_ordered_extent(ordered); + bbio->ordered = ordered; } /* diff --git a/fs/btrfs/file-item.c b/fs/btrfs/file-item.c index 782bbf081c26..cbacaa80526c 100644 --- a/fs/btrfs/file-item.c +++ b/fs/btrfs/file-item.c @@ -799,19 +799,12 @@ blk_status_t btrfs_csum_one_bio(struct btrfs_bio *bbio) */ blk_status_t btrfs_alloc_dummy_sum(struct btrfs_bio *bbio) { - struct btrfs_ordered_extent *ordered = - btrfs_lookup_ordered_extent(bbio->inode, bbio->file_offset); - - if (WARN_ON_ONCE(!ordered)) - return BLK_STS_IOERR; - bbio->sums = kmalloc(sizeof(*bbio->sums), GFP_NOFS); if (!bbio->sums) return BLK_STS_RESOURCE; bbio->sums->len = bbio->bio.bi_iter.bi_size; bbio->sums->logical = bbio->bio.bi_iter.bi_sector << SECTOR_SHIFT; - btrfs_add_ordered_sum(ordered, bbio->sums); - btrfs_put_ordered_extent(ordered); + btrfs_add_ordered_sum(bbio->ordered, bbio->sums); return 0; } diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c index 020d871bb135..b8ec3d95e659 100644 --- a/fs/btrfs/inode.c +++ b/fs/btrfs/inode.c @@ -2716,8 +2716,11 @@ static int btrfs_extract_ordered_extent(struct btrfs_bio *bbio, return -EINVAL; /* No need to split if the ordered extent covers the entire bio. */ - if (ordered->disk_num_bytes == len) + if (ordered->disk_num_bytes == len) { + refcount_inc(&ordered->refs); + bbio->ordered = ordered; return 0; + } /* * Don't split the extent_map for NOCOW extents, as we're writing into @@ -2734,8 +2737,7 @@ static int btrfs_extract_ordered_extent(struct btrfs_bio *bbio, new = btrfs_split_ordered_extent(ordered, len); if (IS_ERR(new)) return PTR_ERR(new); - btrfs_put_ordered_extent(new); - + bbio->ordered = new; return 0; } -- cgit v1.2.3 From 4ba8223d3d0124894578554b473bd7cc680dfd5b Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Wed, 31 May 2023 09:54:07 +0200 Subject: btrfs: open code end_extent_writepage in end_bio_extent_writepage This prepares for switching to more efficient ordered_extent processing and already removes the forth and back conversion from len to end back to len. Reviewed-by: Johannes Thumshirn Reviewed-by: Josef Bacik Signed-off-by: Christoph Hellwig Reviewed-by: David Sterba Signed-off-by: David Sterba --- fs/btrfs/extent_io.c | 17 +++++++++-------- 1 file changed, 9 insertions(+), 8 deletions(-) (limited to 'fs/btrfs/extent_io.c') diff --git a/fs/btrfs/extent_io.c b/fs/btrfs/extent_io.c index ad536eaf02b0..827e8dfebf0f 100644 --- a/fs/btrfs/extent_io.c +++ b/fs/btrfs/extent_io.c @@ -536,8 +536,6 @@ static void end_bio_extent_writepage(struct btrfs_bio *bbio) struct bio *bio = &bbio->bio; int error = blk_status_to_errno(bio->bi_status); struct bio_vec *bvec; - u64 start; - u64 end; struct bvec_iter_all iter_all; ASSERT(!bio_flagged(bio, BIO_CLONED)); @@ -546,6 +544,8 @@ static void end_bio_extent_writepage(struct btrfs_bio *bbio) struct inode *inode = page->mapping->host; struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb); const u32 sectorsize = fs_info->sectorsize; + u64 start = page_offset(page) + bvec->bv_offset; + u32 len = bvec->bv_len; /* Our read/write should always be sector aligned. */ if (!IS_ALIGNED(bvec->bv_offset, sectorsize)) @@ -557,12 +557,13 @@ static void end_bio_extent_writepage(struct btrfs_bio *bbio) "incomplete page write with offset %u and length %u", bvec->bv_offset, bvec->bv_len); - start = page_offset(page) + bvec->bv_offset; - end = start + bvec->bv_len - 1; - - end_extent_writepage(page, error, start, end); - - btrfs_page_clear_writeback(fs_info, page, start, bvec->bv_len); + btrfs_writepage_endio_finish_ordered(BTRFS_I(inode), page, start, + start + len - 1, !error); + if (error) { + btrfs_page_clear_uptodate(fs_info, page, start, len); + mapping_set_error(page->mapping, error); + } + btrfs_page_clear_writeback(fs_info, page, start, len); } bio_put(bio); -- cgit v1.2.3 From 0d394cca8435045f909bd2bb099575b11452e19a Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Wed, 31 May 2023 09:54:10 +0200 Subject: btrfs: use btrfs_finish_ordered_extent to complete buffered writes Use the btrfs_finish_ordered_extent helper to complete compressed writes using the bbio->ordered pointer instead of requiring an rbtree lookup in the otherwise equivalent btrfs_mark_ordered_io_finished called from btrfs_writepage_endio_finish_ordered. Reviewed-by: Johannes Thumshirn Reviewed-by: Josef Bacik Signed-off-by: Christoph Hellwig Reviewed-by: David Sterba Signed-off-by: David Sterba --- fs/btrfs/extent_io.c | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) (limited to 'fs/btrfs/extent_io.c') diff --git a/fs/btrfs/extent_io.c b/fs/btrfs/extent_io.c index 827e8dfebf0f..a91d5ad27984 100644 --- a/fs/btrfs/extent_io.c +++ b/fs/btrfs/extent_io.c @@ -557,8 +557,7 @@ static void end_bio_extent_writepage(struct btrfs_bio *bbio) "incomplete page write with offset %u and length %u", bvec->bv_offset, bvec->bv_len); - btrfs_writepage_endio_finish_ordered(BTRFS_I(inode), page, start, - start + len - 1, !error); + btrfs_finish_ordered_extent(bbio->ordered, page, start, len, !error); if (error) { btrfs_page_clear_uptodate(fs_info, page, start, len); mapping_set_error(page->mapping, error); -- cgit v1.2.3