diff options
Diffstat (limited to 'fs/btrfs/extent_io.c')
-rw-r--r-- | fs/btrfs/extent_io.c | 168 |
1 files changed, 76 insertions, 92 deletions
diff --git a/fs/btrfs/extent_io.c b/fs/btrfs/extent_io.c index cce6087d6880..628f1aef34b0 100644 --- a/fs/btrfs/extent_io.c +++ b/fs/btrfs/extent_io.c @@ -140,14 +140,6 @@ static int add_extent_changeset(struct extent_state *state, unsigned bits, static void flush_write_bio(struct extent_page_data *epd); -static inline struct btrfs_fs_info * -tree_fs_info(struct extent_io_tree *tree) -{ - if (tree->ops) - return tree->ops->tree_fs_info(tree->private_data); - return NULL; -} - int __init extent_io_init(void) { extent_state_cache = kmem_cache_create("btrfs_extent_state", @@ -564,8 +556,10 @@ alloc_extent_state_atomic(struct extent_state *prealloc) static void extent_io_tree_panic(struct extent_io_tree *tree, int err) { - btrfs_panic(tree_fs_info(tree), err, - "Locking error: Extent tree was modified by another thread while locked."); + struct inode *inode = tree->private_data; + + btrfs_panic(btrfs_sb(inode->i_sb), err, + "locking error: extent tree was modified by another thread while locked"); } /* @@ -1386,14 +1380,6 @@ void extent_range_redirty_for_io(struct inode *inode, u64 start, u64 end) } } -/* - * helper function to set both pages and extents in the tree writeback - */ -static void set_range_writeback(struct extent_io_tree *tree, u64 start, u64 end) -{ - tree->ops->set_range_writeback(tree->private_data, start, end); -} - /* find the first state struct with 'bits' set after 'start', and * return it. tree->lock must be held. NULL will returned if * nothing was found after 'start' @@ -2059,7 +2045,7 @@ int repair_eb_io_failure(struct btrfs_fs_info *fs_info, struct extent_buffer *eb, int mirror_num) { u64 start = eb->start; - unsigned long i, num_pages = num_extent_pages(eb->start, eb->len); + int i, num_pages = num_extent_pages(eb); int ret = 0; if (sb_rdonly(fs_info->sb)) @@ -2398,7 +2384,7 @@ static int bio_readpage_error(struct bio *failed_bio, u64 phy_offset, start - page_offset(page), (int)phy_offset, failed_bio->bi_end_io, NULL); - bio_set_op_attrs(bio, REQ_OP_READ, read_mode); + bio->bi_opf = REQ_OP_READ | read_mode; btrfs_debug(btrfs_sb(inode->i_sb), "Repair Read Error: submitting new read[%#x] to this_mirror=%d, in_validation=%d", @@ -2790,8 +2776,8 @@ static int submit_extent_page(unsigned int opf, struct extent_io_tree *tree, else contig = bio_end_sector(bio) == sector; - if (tree->ops && tree->ops->merge_bio_hook(page, offset, - page_size, bio, bio_flags)) + if (tree->ops && btrfs_merge_bio_hook(page, offset, page_size, + bio, bio_flags)) can_merge = false; if (prev_bio_flags != bio_flags || !contig || !can_merge || @@ -3422,7 +3408,7 @@ static noinline_for_stack int __extent_writepage_io(struct inode *inode, continue; } - set_range_writeback(tree, cur, cur + iosize - 1); + btrfs_set_range_writeback(tree, cur, cur + iosize - 1); if (!PageWriteback(page)) { btrfs_err(BTRFS_I(inode)->root->fs_info, "page %lu not writeback, cur %llu end %llu", @@ -3538,7 +3524,7 @@ lock_extent_buffer_for_io(struct extent_buffer *eb, struct btrfs_fs_info *fs_info, struct extent_page_data *epd) { - unsigned long i, num_pages; + int i, num_pages; int flush = 0; int ret = 0; @@ -3588,7 +3574,7 @@ lock_extent_buffer_for_io(struct extent_buffer *eb, if (!ret) return ret; - num_pages = num_extent_pages(eb->start, eb->len); + num_pages = num_extent_pages(eb); for (i = 0; i < num_pages; i++) { struct page *p = eb->pages[i]; @@ -3712,13 +3698,13 @@ static noinline_for_stack int write_one_eb(struct extent_buffer *eb, struct extent_io_tree *tree = &BTRFS_I(fs_info->btree_inode)->io_tree; u64 offset = eb->start; u32 nritems; - unsigned long i, num_pages; + int i, num_pages; unsigned long start, end; unsigned int write_flags = wbc_to_write_flags(wbc) | REQ_META; int ret = 0; clear_bit(EXTENT_BUFFER_WRITE_ERR, &eb->bflags); - num_pages = num_extent_pages(eb->start, eb->len); + num_pages = num_extent_pages(eb); atomic_set(&eb->io_pages, num_pages); /* set btree blocks beyond nritems with 0 to avoid stale content. */ @@ -4238,8 +4224,9 @@ int try_release_extent_mapping(struct page *page, gfp_t mask) struct extent_map *em; u64 start = page_offset(page); u64 end = start + PAGE_SIZE - 1; - struct extent_io_tree *tree = &BTRFS_I(page->mapping->host)->io_tree; - struct extent_map_tree *map = &BTRFS_I(page->mapping->host)->extent_tree; + struct btrfs_inode *btrfs_inode = BTRFS_I(page->mapping->host); + struct extent_io_tree *tree = &btrfs_inode->io_tree; + struct extent_map_tree *map = &btrfs_inode->extent_tree; if (gfpflags_allow_blocking(mask) && page->mapping->host->i_size > SZ_16M) { @@ -4262,6 +4249,8 @@ int try_release_extent_mapping(struct page *page, gfp_t mask) extent_map_end(em) - 1, EXTENT_LOCKED | EXTENT_WRITEBACK, 0, NULL)) { + set_bit(BTRFS_INODE_NEEDS_FULL_SYNC, + &btrfs_inode->runtime_flags); remove_extent_mapping(map, em); /* once for the rb tree */ free_extent_map(em); @@ -4542,8 +4531,11 @@ int extent_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo, offset_in_extent = em_start - em->start; em_end = extent_map_end(em); em_len = em_end - em_start; - disko = em->block_start + offset_in_extent; flags = 0; + if (em->block_start < EXTENT_MAP_LAST_BYTE) + disko = em->block_start + offset_in_extent; + else + disko = 0; /* * bump off for our next call to get_extent @@ -4637,23 +4629,20 @@ int extent_buffer_under_io(struct extent_buffer *eb) } /* - * Helper for releasing extent buffer page. + * Release all pages attached to the extent buffer. */ -static void btrfs_release_extent_buffer_page(struct extent_buffer *eb) +static void btrfs_release_extent_buffer_pages(struct extent_buffer *eb) { - unsigned long index; - struct page *page; - int mapped = !test_bit(EXTENT_BUFFER_DUMMY, &eb->bflags); + int i; + int num_pages; + int mapped = !test_bit(EXTENT_BUFFER_UNMAPPED, &eb->bflags); BUG_ON(extent_buffer_under_io(eb)); - index = num_extent_pages(eb->start, eb->len); - if (index == 0) - return; + num_pages = num_extent_pages(eb); + for (i = 0; i < num_pages; i++) { + struct page *page = eb->pages[i]; - do { - index--; - page = eb->pages[index]; if (!page) continue; if (mapped) @@ -4685,7 +4674,7 @@ static void btrfs_release_extent_buffer_page(struct extent_buffer *eb) /* One for when we allocated the page */ put_page(page); - } while (index != 0); + } } /* @@ -4693,7 +4682,7 @@ static void btrfs_release_extent_buffer_page(struct extent_buffer *eb) */ static inline void btrfs_release_extent_buffer(struct extent_buffer *eb) { - btrfs_release_extent_buffer_page(eb); + btrfs_release_extent_buffer_pages(eb); __free_extent_buffer(eb); } @@ -4737,10 +4726,10 @@ __alloc_extent_buffer(struct btrfs_fs_info *fs_info, u64 start, struct extent_buffer *btrfs_clone_extent_buffer(struct extent_buffer *src) { - unsigned long i; + int i; struct page *p; struct extent_buffer *new; - unsigned long num_pages = num_extent_pages(src->start, src->len); + int num_pages = num_extent_pages(src); new = __alloc_extent_buffer(src->fs_info, src->start, src->len); if (new == NULL) @@ -4760,7 +4749,7 @@ struct extent_buffer *btrfs_clone_extent_buffer(struct extent_buffer *src) } set_bit(EXTENT_BUFFER_UPTODATE, &new->bflags); - set_bit(EXTENT_BUFFER_DUMMY, &new->bflags); + set_bit(EXTENT_BUFFER_UNMAPPED, &new->bflags); return new; } @@ -4769,15 +4758,14 @@ struct extent_buffer *__alloc_dummy_extent_buffer(struct btrfs_fs_info *fs_info, u64 start, unsigned long len) { struct extent_buffer *eb; - unsigned long num_pages; - unsigned long i; - - num_pages = num_extent_pages(start, len); + int num_pages; + int i; eb = __alloc_extent_buffer(fs_info, start, len); if (!eb) return NULL; + num_pages = num_extent_pages(eb); for (i = 0; i < num_pages; i++) { eb->pages[i] = alloc_page(GFP_NOFS); if (!eb->pages[i]) @@ -4785,7 +4773,7 @@ struct extent_buffer *__alloc_dummy_extent_buffer(struct btrfs_fs_info *fs_info, } set_extent_buffer_uptodate(eb); btrfs_set_header_nritems(eb, 0); - set_bit(EXTENT_BUFFER_DUMMY, &eb->bflags); + set_bit(EXTENT_BUFFER_UNMAPPED, &eb->bflags); return eb; err: @@ -4837,11 +4825,11 @@ static void check_buffer_tree_ref(struct extent_buffer *eb) static void mark_extent_buffer_accessed(struct extent_buffer *eb, struct page *accessed) { - unsigned long num_pages, i; + int num_pages, i; check_buffer_tree_ref(eb); - num_pages = num_extent_pages(eb->start, eb->len); + num_pages = num_extent_pages(eb); for (i = 0; i < num_pages; i++) { struct page *p = eb->pages[i]; @@ -4938,8 +4926,8 @@ struct extent_buffer *alloc_extent_buffer(struct btrfs_fs_info *fs_info, u64 start) { unsigned long len = fs_info->nodesize; - unsigned long num_pages = num_extent_pages(start, len); - unsigned long i; + int num_pages; + int i; unsigned long index = start >> PAGE_SHIFT; struct extent_buffer *eb; struct extent_buffer *exists = NULL; @@ -4961,6 +4949,7 @@ struct extent_buffer *alloc_extent_buffer(struct btrfs_fs_info *fs_info, if (!eb) return ERR_PTR(-ENOMEM); + num_pages = num_extent_pages(eb); for (i = 0; i < num_pages; i++, index++) { p = find_or_create_page(mapping, index, GFP_NOFS|__GFP_NOFAIL); if (!p) { @@ -5003,8 +4992,11 @@ struct extent_buffer *alloc_extent_buffer(struct btrfs_fs_info *fs_info, uptodate = 0; /* - * see below about how we avoid a nasty race with release page - * and why we unlock later + * We can't unlock the pages just yet since the extent buffer + * hasn't been properly inserted in the radix tree, this + * opens a race with btree_releasepage which can free a page + * while we are still filling in all pages for the buffer and + * we could crash. */ } if (uptodate) @@ -5033,21 +5025,12 @@ again: set_bit(EXTENT_BUFFER_IN_TREE, &eb->bflags); /* - * there is a race where release page may have - * tried to find this extent buffer in the radix - * but failed. It will tell the VM it is safe to - * reclaim the, and it will clear the page private bit. - * We must make sure to set the page private bit properly - * after the extent buffer is in the radix tree so - * it doesn't get lost + * Now it's safe to unlock the pages because any calls to + * btree_releasepage will correctly detect that a page belongs to a + * live buffer and won't free them prematurely. */ - SetPageChecked(eb->pages[0]); - for (i = 1; i < num_pages; i++) { - p = eb->pages[i]; - ClearPageChecked(p); - unlock_page(p); - } - unlock_page(eb->pages[0]); + for (i = 0; i < num_pages; i++) + unlock_page(eb->pages[i]); return eb; free_eb: @@ -5069,9 +5052,10 @@ static inline void btrfs_release_extent_buffer_rcu(struct rcu_head *head) __free_extent_buffer(eb); } -/* Expects to have eb->eb_lock already held */ static int release_extent_buffer(struct extent_buffer *eb) { + lockdep_assert_held(&eb->refs_lock); + WARN_ON(atomic_read(&eb->refs) == 0); if (atomic_dec_and_test(&eb->refs)) { if (test_and_clear_bit(EXTENT_BUFFER_IN_TREE, &eb->bflags)) { @@ -5088,9 +5072,9 @@ static int release_extent_buffer(struct extent_buffer *eb) } /* Should be safe to release our pages at this point */ - btrfs_release_extent_buffer_page(eb); + btrfs_release_extent_buffer_pages(eb); #ifdef CONFIG_BTRFS_FS_RUN_SANITY_TESTS - if (unlikely(test_bit(EXTENT_BUFFER_DUMMY, &eb->bflags))) { + if (unlikely(test_bit(EXTENT_BUFFER_UNMAPPED, &eb->bflags))) { __free_extent_buffer(eb); return 1; } @@ -5121,7 +5105,7 @@ void free_extent_buffer(struct extent_buffer *eb) spin_lock(&eb->refs_lock); if (atomic_read(&eb->refs) == 2 && - test_bit(EXTENT_BUFFER_DUMMY, &eb->bflags)) + test_bit(EXTENT_BUFFER_UNMAPPED, &eb->bflags)) atomic_dec(&eb->refs); if (atomic_read(&eb->refs) == 2 && @@ -5153,11 +5137,11 @@ void free_extent_buffer_stale(struct extent_buffer *eb) void clear_extent_buffer_dirty(struct extent_buffer *eb) { - unsigned long i; - unsigned long num_pages; + int i; + int num_pages; struct page *page; - num_pages = num_extent_pages(eb->start, eb->len); + num_pages = num_extent_pages(eb); for (i = 0; i < num_pages; i++) { page = eb->pages[i]; @@ -5183,15 +5167,15 @@ void clear_extent_buffer_dirty(struct extent_buffer *eb) int set_extent_buffer_dirty(struct extent_buffer *eb) { - unsigned long i; - unsigned long num_pages; + int i; + int num_pages; int was_dirty = 0; check_buffer_tree_ref(eb); was_dirty = test_and_set_bit(EXTENT_BUFFER_DIRTY, &eb->bflags); - num_pages = num_extent_pages(eb->start, eb->len); + num_pages = num_extent_pages(eb); WARN_ON(atomic_read(&eb->refs) == 0); WARN_ON(!test_bit(EXTENT_BUFFER_TREE_REF, &eb->bflags)); @@ -5202,12 +5186,12 @@ int set_extent_buffer_dirty(struct extent_buffer *eb) void clear_extent_buffer_uptodate(struct extent_buffer *eb) { - unsigned long i; + int i; struct page *page; - unsigned long num_pages; + int num_pages; clear_bit(EXTENT_BUFFER_UPTODATE, &eb->bflags); - num_pages = num_extent_pages(eb->start, eb->len); + num_pages = num_extent_pages(eb); for (i = 0; i < num_pages; i++) { page = eb->pages[i]; if (page) @@ -5217,12 +5201,12 @@ void clear_extent_buffer_uptodate(struct extent_buffer *eb) void set_extent_buffer_uptodate(struct extent_buffer *eb) { - unsigned long i; + int i; struct page *page; - unsigned long num_pages; + int num_pages; set_bit(EXTENT_BUFFER_UPTODATE, &eb->bflags); - num_pages = num_extent_pages(eb->start, eb->len); + num_pages = num_extent_pages(eb); for (i = 0; i < num_pages; i++) { page = eb->pages[i]; SetPageUptodate(page); @@ -5232,13 +5216,13 @@ void set_extent_buffer_uptodate(struct extent_buffer *eb) int read_extent_buffer_pages(struct extent_io_tree *tree, struct extent_buffer *eb, int wait, int mirror_num) { - unsigned long i; + int i; struct page *page; int err; int ret = 0; int locked_pages = 0; int all_uptodate = 1; - unsigned long num_pages; + int num_pages; unsigned long num_reads = 0; struct bio *bio = NULL; unsigned long bio_flags = 0; @@ -5246,7 +5230,7 @@ int read_extent_buffer_pages(struct extent_io_tree *tree, if (test_bit(EXTENT_BUFFER_UPTODATE, &eb->bflags)) return 0; - num_pages = num_extent_pages(eb->start, eb->len); + num_pages = num_extent_pages(eb); for (i = 0; i < num_pages; i++) { page = eb->pages[i]; if (wait == WAIT_NONE) { @@ -5570,11 +5554,11 @@ void copy_extent_buffer_full(struct extent_buffer *dst, struct extent_buffer *src) { int i; - unsigned num_pages; + int num_pages; ASSERT(dst->len == src->len); - num_pages = num_extent_pages(dst->start, dst->len); + num_pages = num_extent_pages(dst); for (i = 0; i < num_pages; i++) copy_page(page_address(dst->pages[i]), page_address(src->pages[i])); |