diff options
| author | Arnaldo Carvalho de Melo <acme@redhat.com> | 2021-06-30 21:27:32 +0300 |
|---|---|---|
| committer | Arnaldo Carvalho de Melo <acme@redhat.com> | 2021-06-30 21:27:32 +0300 |
| commit | 857286e4c5ae5d2e860fd15d4628e707b434d7e5 (patch) | |
| tree | 520ea5916f50fb2a4289d8d70438d559c6808b01 /fs/btrfs/file.c | |
| parent | 51f382428c17f172f430f9be8de4246b8f15f97c (diff) | |
| parent | 007b350a58754a93ca9fe50c498cc27780171153 (diff) | |
| download | linux-857286e4c5ae5d2e860fd15d4628e707b434d7e5.tar.xz | |
Merge remote-tracking branch 'torvalds/master' into perf/core
To pick up fixes.
Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
Diffstat (limited to 'fs/btrfs/file.c')
| -rw-r--r-- | fs/btrfs/file.c | 44 |
1 files changed, 28 insertions, 16 deletions
diff --git a/fs/btrfs/file.c b/fs/btrfs/file.c index 55f68422061d..28a05ba47060 100644 --- a/fs/btrfs/file.c +++ b/fs/btrfs/file.c @@ -28,6 +28,7 @@ #include "compression.h" #include "delalloc-space.h" #include "reflink.h" +#include "subpage.h" static struct kmem_cache *btrfs_inode_defrag_cachep; /* @@ -482,6 +483,7 @@ int btrfs_dirty_pages(struct btrfs_inode *inode, struct page **pages, start_pos = round_down(pos, fs_info->sectorsize); num_bytes = round_up(write_bytes + pos - start_pos, fs_info->sectorsize); + ASSERT(num_bytes <= U32_MAX); end_of_last_block = start_pos + num_bytes - 1; @@ -500,9 +502,10 @@ int btrfs_dirty_pages(struct btrfs_inode *inode, struct page **pages, for (i = 0; i < num_pages; i++) { struct page *p = pages[i]; - SetPageUptodate(p); + + btrfs_page_clamp_set_uptodate(fs_info, p, start_pos, num_bytes); ClearPageChecked(p); - set_page_dirty(p); + btrfs_page_clamp_set_dirty(fs_info, p, start_pos, num_bytes); } /* @@ -2483,6 +2486,17 @@ static int btrfs_punch_hole_lock_range(struct inode *inode, const u64 lockend, struct extent_state **cached_state) { + /* + * For subpage case, if the range is not at page boundary, we could + * have pages at the leading/tailing part of the range. + * This could lead to dead loop since filemap_range_has_page() + * will always return true. + * So here we need to do extra page alignment for + * filemap_range_has_page(). + */ + const u64 page_lockstart = round_up(lockstart, PAGE_SIZE); + const u64 page_lockend = round_down(lockend + 1, PAGE_SIZE) - 1; + while (1) { struct btrfs_ordered_extent *ordered; int ret; @@ -2503,7 +2517,7 @@ static int btrfs_punch_hole_lock_range(struct inode *inode, (ordered->file_offset + ordered->num_bytes <= lockstart || ordered->file_offset > lockend)) && !filemap_range_has_page(inode->i_mapping, - lockstart, lockend)) { + page_lockstart, page_lockend)) { if (ordered) btrfs_put_ordered_extent(ordered); break; @@ -3034,22 +3048,20 @@ struct falloc_range { */ static int add_falloc_range(struct list_head *head, u64 start, u64 len) { - struct falloc_range *prev = NULL; struct falloc_range *range = NULL; - if (list_empty(head)) - goto insert; - - /* - * As fallocate iterate by bytenr order, we only need to check - * the last range. - */ - prev = list_entry(head->prev, struct falloc_range, list); - if (prev->start + prev->len == start) { - prev->len += len; - return 0; + if (!list_empty(head)) { + /* + * As fallocate iterates by bytenr order, we only need to check + * the last range. + */ + range = list_last_entry(head, struct falloc_range, list); + if (range->start + range->len == start) { + range->len += len; + return 0; + } } -insert: + range = kmalloc(sizeof(*range), GFP_KERNEL); if (!range) return -ENOMEM; |
