diff options
Diffstat (limited to 'fs/btrfs')
-rw-r--r-- | fs/btrfs/file.c | 30 |
1 files changed, 24 insertions, 6 deletions
diff --git a/fs/btrfs/file.c b/fs/btrfs/file.c index c8f953b7c084..15934d342b5f 100644 --- a/fs/btrfs/file.c +++ b/fs/btrfs/file.c @@ -506,6 +506,16 @@ int btrfs_dirty_pages(struct btrfs_root *root, struct inode *inode, num_bytes = ALIGN(write_bytes + pos - start_pos, root->sectorsize); end_of_last_block = start_pos + num_bytes - 1; + + /* + * The pages may have already been dirty, clear out old accounting so + * we can set things up properly + */ + clear_extent_bit(&BTRFS_I(inode)->io_tree, start_pos, end_of_last_block, + EXTENT_DIRTY | EXTENT_DELALLOC | + EXTENT_DO_ACCOUNTING | EXTENT_DEFRAG, 0, 0, cached, + GFP_NOFS); + err = btrfs_set_extent_delalloc(inode, start_pos, end_of_last_block, cached); if (err) @@ -1408,18 +1418,26 @@ lock_and_cleanup_extent_if_need(struct inode *inode, struct page **pages, if (ordered) btrfs_put_ordered_extent(ordered); - clear_extent_bit(&BTRFS_I(inode)->io_tree, start_pos, - last_pos, EXTENT_DIRTY | EXTENT_DELALLOC | - EXTENT_DO_ACCOUNTING | EXTENT_DEFRAG, - 0, 0, cached_state, GFP_NOFS); *lockstart = start_pos; *lockend = last_pos; ret = 1; } + /* + * It's possible the pages are dirty right now, but we don't want + * to clean them yet because copy_from_user may catch a page fault + * and we might have to fall back to one page at a time. If that + * happens, we'll unlock these pages and we'd have a window where + * reclaim could sneak in and drop the once-dirty page on the floor + * without writing it. + * + * We have the pages locked and the extent range locked, so there's + * no way someone can start IO on any dirty pages in this range. + * + * We'll call btrfs_dirty_pages() later on, and that will flip around + * delalloc bits and dirty the pages as required. + */ for (i = 0; i < num_pages; i++) { - if (clear_page_dirty_for_io(pages[i])) - account_page_redirty(pages[i]); set_page_extent_mapped(pages[i]); WARN_ON(!PageLocked(pages[i])); } |