diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2016-12-16 21:53:01 +0300 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2016-12-16 21:53:01 +0300 |
commit | 087a76d390cbb8c0d21ea0cb3672ab4a7bb76362 (patch) | |
tree | 7c86752ba8d430c37618851ee2806b5b921299bd /fs/btrfs/compression.c | |
parent | 759b2656b259d10935647a92dbfae7fafee6a790 (diff) | |
parent | 2939e1a86f758b55cdba73e29397dd3d94df13bc (diff) | |
download | linux-087a76d390cbb8c0d21ea0cb3672ab4a7bb76362.tar.xz |
Merge branch 'for-linus-4.10' of git://git.kernel.org/pub/scm/linux/kernel/git/mason/linux-btrfs
Pull btrfs updates from Chris Mason:
"Jeff Mahoney and Dave Sterba have a really nice set of cleanups in
here, and Christoph pitched in corrections/improvements to make btrfs
use proper helpers for bio walking instead of doing it by hand.
There are some key fixes as well, including some long standing bugs
that took forever to track down in btrfs_drop_extents and during
balance"
* 'for-linus-4.10' of git://git.kernel.org/pub/scm/linux/kernel/git/mason/linux-btrfs: (77 commits)
btrfs: limit async_work allocation and worker func duration
Revert "Btrfs: adjust len of writes if following a preallocated extent"
Btrfs: don't WARN() in btrfs_transaction_abort() for IO errors
btrfs: opencode chunk locking, remove helpers
btrfs: remove root parameter from transaction commit/end routines
btrfs: split btrfs_wait_marked_extents into normal and tree log functions
btrfs: take an fs_info directly when the root is not used otherwise
btrfs: simplify btrfs_wait_cache_io prototype
btrfs: convert extent-tree tracepoints to use fs_info
btrfs: root->fs_info cleanup, access fs_info->delayed_root directly
btrfs: root->fs_info cleanup, add fs_info convenience variables
btrfs: root->fs_info cleanup, update_block_group{,flags}
btrfs: root->fs_info cleanup, lock/unlock_chunks
btrfs: root->fs_info cleanup, btrfs_calc_{trans,trunc}_metadata_size
btrfs: pull node/sector/stripe sizes out of root and into fs_info
btrfs: root->fs_info cleanup, io_ctl_init
btrfs: root->fs_info cleanup, use fs_info->dev_root everywhere
btrfs: struct reada_control.root -> reada_control.fs_info
btrfs: struct btrfsic_state->root should be an fs_info
btrfs: alloc_reserved_file_extent trace point should use extent_root
...
Diffstat (limited to 'fs/btrfs/compression.c')
-rw-r--r-- | fs/btrfs/compression.c | 196 |
1 files changed, 79 insertions, 117 deletions
diff --git a/fs/btrfs/compression.c b/fs/btrfs/compression.c index d4d8b7e36b2f..7f390849343b 100644 --- a/fs/btrfs/compression.c +++ b/fs/btrfs/compression.c @@ -81,17 +81,17 @@ struct compressed_bio { u32 sums; }; -static int btrfs_decompress_biovec(int type, struct page **pages_in, - u64 disk_start, struct bio_vec *bvec, - int vcnt, size_t srclen); +static int btrfs_decompress_bio(int type, struct page **pages_in, + u64 disk_start, struct bio *orig_bio, + size_t srclen); -static inline int compressed_bio_size(struct btrfs_root *root, +static inline int compressed_bio_size(struct btrfs_fs_info *fs_info, unsigned long disk_size) { - u16 csum_size = btrfs_super_csum_size(root->fs_info->super_copy); + u16 csum_size = btrfs_super_csum_size(fs_info->super_copy); return sizeof(struct compressed_bio) + - (DIV_ROUND_UP(disk_size, root->sectorsize)) * csum_size; + (DIV_ROUND_UP(disk_size, fs_info->sectorsize)) * csum_size; } static struct bio *compressed_bio_alloc(struct block_device *bdev, @@ -120,7 +120,7 @@ static int check_compressed_csum(struct inode *inode, kaddr = kmap_atomic(page); csum = btrfs_csum_data(kaddr, csum, PAGE_SIZE); - btrfs_csum_final(csum, (char *)&csum); + btrfs_csum_final(csum, (u8 *)&csum); kunmap_atomic(kaddr); if (csum != *cb_sum) { @@ -175,11 +175,10 @@ static void end_compressed_bio_read(struct bio *bio) /* ok, we're the last bio for this extent, lets start * the decompression. */ - ret = btrfs_decompress_biovec(cb->compress_type, + ret = btrfs_decompress_bio(cb->compress_type, cb->compressed_pages, cb->start, - cb->orig_bio->bi_io_vec, - cb->orig_bio->bi_vcnt, + cb->orig_bio, cb->compressed_len); csum_failed: if (ret) @@ -329,8 +328,8 @@ int btrfs_submit_compressed_write(struct inode *inode, u64 start, struct page **compressed_pages, unsigned long nr_pages) { + struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb); struct bio *bio = NULL; - struct btrfs_root *root = BTRFS_I(inode)->root; struct compressed_bio *cb; unsigned long bytes_left; struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree; @@ -342,7 +341,7 @@ int btrfs_submit_compressed_write(struct inode *inode, u64 start, int skip_sum = BTRFS_I(inode)->flags & BTRFS_INODE_NODATASUM; WARN_ON(start & ((u64)PAGE_SIZE - 1)); - cb = kmalloc(compressed_bio_size(root, compressed_len), GFP_NOFS); + cb = kmalloc(compressed_bio_size(fs_info, compressed_len), GFP_NOFS); if (!cb) return -ENOMEM; atomic_set(&cb->pending_bios, 0); @@ -356,7 +355,7 @@ int btrfs_submit_compressed_write(struct inode *inode, u64 start, cb->orig_bio = NULL; cb->nr_pages = nr_pages; - bdev = BTRFS_I(inode)->root->fs_info->fs_devices->latest_bdev; + bdev = fs_info->fs_devices->latest_bdev; bio = compressed_bio_alloc(bdev, first_byte, GFP_NOFS); if (!bio) { @@ -392,17 +391,16 @@ int btrfs_submit_compressed_write(struct inode *inode, u64 start, * freed before we're done setting it up */ atomic_inc(&cb->pending_bios); - ret = btrfs_bio_wq_end_io(root->fs_info, bio, - BTRFS_WQ_ENDIO_DATA); + ret = btrfs_bio_wq_end_io(fs_info, bio, + BTRFS_WQ_ENDIO_DATA); BUG_ON(ret); /* -ENOMEM */ if (!skip_sum) { - ret = btrfs_csum_one_bio(root, inode, bio, - start, 1); + ret = btrfs_csum_one_bio(inode, bio, start, 1); BUG_ON(ret); /* -ENOMEM */ } - ret = btrfs_map_bio(root, bio, 0, 1); + ret = btrfs_map_bio(fs_info, bio, 0, 1); if (ret) { bio->bi_error = ret; bio_endio(bio); @@ -418,7 +416,7 @@ int btrfs_submit_compressed_write(struct inode *inode, u64 start, bio_add_page(bio, page, PAGE_SIZE, 0); } if (bytes_left < PAGE_SIZE) { - btrfs_info(BTRFS_I(inode)->root->fs_info, + btrfs_info(fs_info, "bytes left %lu compress len %lu nr %lu", bytes_left, cb->compressed_len, cb->nr_pages); } @@ -428,15 +426,15 @@ int btrfs_submit_compressed_write(struct inode *inode, u64 start, } bio_get(bio); - ret = btrfs_bio_wq_end_io(root->fs_info, bio, BTRFS_WQ_ENDIO_DATA); + ret = btrfs_bio_wq_end_io(fs_info, bio, BTRFS_WQ_ENDIO_DATA); BUG_ON(ret); /* -ENOMEM */ if (!skip_sum) { - ret = btrfs_csum_one_bio(root, inode, bio, start, 1); + ret = btrfs_csum_one_bio(inode, bio, start, 1); BUG_ON(ret); /* -ENOMEM */ } - ret = btrfs_map_bio(root, bio, 0, 1); + ret = btrfs_map_bio(fs_info, bio, 0, 1); if (ret) { bio->bi_error = ret; bio_endio(bio); @@ -446,6 +444,13 @@ int btrfs_submit_compressed_write(struct inode *inode, u64 start, return 0; } +static u64 bio_end_offset(struct bio *bio) +{ + struct bio_vec *last = &bio->bi_io_vec[bio->bi_vcnt - 1]; + + return page_offset(last->bv_page) + last->bv_len + last->bv_offset; +} + static noinline int add_ra_bio_pages(struct inode *inode, u64 compressed_end, struct compressed_bio *cb) @@ -464,8 +469,7 @@ static noinline int add_ra_bio_pages(struct inode *inode, u64 end; int misses = 0; - page = cb->orig_bio->bi_io_vec[cb->orig_bio->bi_vcnt - 1].bv_page; - last_offset = (page_offset(page) + PAGE_SIZE); + last_offset = bio_end_offset(cb->orig_bio); em_tree = &BTRFS_I(inode)->extent_tree; tree = &BTRFS_I(inode)->io_tree; @@ -563,7 +567,6 @@ next: * * bio->bi_iter.bi_sector points to the compressed extent on disk * bio->bi_io_vec points to all of the inode pages - * bio->bi_vcnt is a count of pages * * After the compressed pages are read, we copy the bytes into the * bio we were passed and then call the bio end_io calls @@ -571,11 +574,10 @@ next: int btrfs_submit_compressed_read(struct inode *inode, struct bio *bio, int mirror_num, unsigned long bio_flags) { + struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb); struct extent_io_tree *tree; struct extent_map_tree *em_tree; struct compressed_bio *cb; - struct btrfs_root *root = BTRFS_I(inode)->root; - unsigned long uncompressed_len = bio->bi_vcnt * PAGE_SIZE; unsigned long compressed_len; unsigned long nr_pages; unsigned long pg_index; @@ -603,7 +605,7 @@ int btrfs_submit_compressed_read(struct inode *inode, struct bio *bio, return -EIO; compressed_len = em->block_len; - cb = kmalloc(compressed_bio_size(root, compressed_len), GFP_NOFS); + cb = kmalloc(compressed_bio_size(fs_info, compressed_len), GFP_NOFS); if (!cb) goto out; @@ -620,7 +622,7 @@ int btrfs_submit_compressed_read(struct inode *inode, struct bio *bio, free_extent_map(em); em = NULL; - cb->len = uncompressed_len; + cb->len = bio->bi_iter.bi_size; cb->compressed_len = compressed_len; cb->compress_type = extent_compress_type(bio_flags); cb->orig_bio = bio; @@ -631,7 +633,7 @@ int btrfs_submit_compressed_read(struct inode *inode, struct bio *bio, if (!cb->compressed_pages) goto fail1; - bdev = BTRFS_I(inode)->root->fs_info->fs_devices->latest_bdev; + bdev = fs_info->fs_devices->latest_bdev; for (pg_index = 0; pg_index < nr_pages; pg_index++) { cb->compressed_pages[pg_index] = alloc_page(GFP_NOFS | @@ -648,8 +650,7 @@ int btrfs_submit_compressed_read(struct inode *inode, struct bio *bio, add_ra_bio_pages(inode, em_start + em_len, cb); /* include any pages we added in add_ra-bio_pages */ - uncompressed_len = bio->bi_vcnt * PAGE_SIZE; - cb->len = uncompressed_len; + cb->len = bio->bi_iter.bi_size; comp_bio = compressed_bio_alloc(bdev, cur_disk_byte, GFP_NOFS); if (!comp_bio) @@ -676,8 +677,8 @@ int btrfs_submit_compressed_read(struct inode *inode, struct bio *bio, PAGE_SIZE) { bio_get(comp_bio); - ret = btrfs_bio_wq_end_io(root->fs_info, comp_bio, - BTRFS_WQ_ENDIO_DATA); + ret = btrfs_bio_wq_end_io(fs_info, comp_bio, + BTRFS_WQ_ENDIO_DATA); BUG_ON(ret); /* -ENOMEM */ /* @@ -689,14 +690,14 @@ int btrfs_submit_compressed_read(struct inode *inode, struct bio *bio, atomic_inc(&cb->pending_bios); if (!(BTRFS_I(inode)->flags & BTRFS_INODE_NODATASUM)) { - ret = btrfs_lookup_bio_sums(root, inode, - comp_bio, sums); + ret = btrfs_lookup_bio_sums(inode, comp_bio, + sums); BUG_ON(ret); /* -ENOMEM */ } sums += DIV_ROUND_UP(comp_bio->bi_iter.bi_size, - root->sectorsize); + fs_info->sectorsize); - ret = btrfs_map_bio(root, comp_bio, mirror_num, 0); + ret = btrfs_map_bio(fs_info, comp_bio, mirror_num, 0); if (ret) { comp_bio->bi_error = ret; bio_endio(comp_bio); @@ -717,16 +718,15 @@ int btrfs_submit_compressed_read(struct inode *inode, struct bio *bio, } bio_get(comp_bio); - ret = btrfs_bio_wq_end_io(root->fs_info, comp_bio, - BTRFS_WQ_ENDIO_DATA); + ret = btrfs_bio_wq_end_io(fs_info, comp_bio, BTRFS_WQ_ENDIO_DATA); BUG_ON(ret); /* -ENOMEM */ if (!(BTRFS_I(inode)->flags & BTRFS_INODE_NODATASUM)) { - ret = btrfs_lookup_bio_sums(root, inode, comp_bio, sums); + ret = btrfs_lookup_bio_sums(inode, comp_bio, sums); BUG_ON(ret); /* -ENOMEM */ } - ret = btrfs_map_bio(root, comp_bio, mirror_num, 0); + ret = btrfs_map_bio(fs_info, comp_bio, mirror_num, 0); if (ret) { comp_bio->bi_error = ret; bio_endio(comp_bio); @@ -959,9 +959,7 @@ int btrfs_compress_pages(int type, struct address_space *mapping, * * disk_start is the starting logical offset of this array in the file * - * bvec is a bio_vec of pages from the file that we want to decompress into - * - * vcnt is the count of pages in the biovec + * orig_bio contains the pages from the file that we want to decompress into * * srclen is the number of bytes in pages_in * @@ -970,18 +968,18 @@ int btrfs_compress_pages(int type, struct address_space *mapping, * be contiguous. They all correspond to the range of bytes covered by * the compressed extent. */ -static int btrfs_decompress_biovec(int type, struct page **pages_in, - u64 disk_start, struct bio_vec *bvec, - int vcnt, size_t srclen) +static int btrfs_decompress_bio(int type, struct page **pages_in, + u64 disk_start, struct bio *orig_bio, + size_t srclen) { struct list_head *workspace; int ret; workspace = find_workspace(type); - ret = btrfs_compress_op[type-1]->decompress_biovec(workspace, pages_in, - disk_start, - bvec, vcnt, srclen); + ret = btrfs_compress_op[type-1]->decompress_bio(workspace, pages_in, + disk_start, orig_bio, + srclen); free_workspace(type, workspace); return ret; } @@ -1021,9 +1019,7 @@ void btrfs_exit_compress(void) */ int btrfs_decompress_buf2page(char *buf, unsigned long buf_start, unsigned long total_out, u64 disk_start, - struct bio_vec *bvec, int vcnt, - unsigned long *pg_index, - unsigned long *pg_offset) + struct bio *bio) { unsigned long buf_offset; unsigned long current_buf_start; @@ -1031,13 +1027,13 @@ int btrfs_decompress_buf2page(char *buf, unsigned long buf_start, unsigned long working_bytes = total_out - buf_start; unsigned long bytes; char *kaddr; - struct page *page_out = bvec[*pg_index].bv_page; + struct bio_vec bvec = bio_iter_iovec(bio, bio->bi_iter); /* * start byte is the first byte of the page we're currently * copying into relative to the start of the compressed data. */ - start_byte = page_offset(page_out) - disk_start; + start_byte = page_offset(bvec.bv_page) - disk_start; /* we haven't yet hit data corresponding to this page */ if (total_out <= start_byte) @@ -1057,80 +1053,46 @@ int btrfs_decompress_buf2page(char *buf, unsigned long buf_start, /* copy bytes from the working buffer into the pages */ while (working_bytes > 0) { - bytes = min(PAGE_SIZE - *pg_offset, - PAGE_SIZE - buf_offset); + bytes = min_t(unsigned long, bvec.bv_len, + PAGE_SIZE - buf_offset); bytes = min(bytes, working_bytes); - kaddr = kmap_atomic(page_out); - memcpy(kaddr + *pg_offset, buf + buf_offset, bytes); + + kaddr = kmap_atomic(bvec.bv_page); + memcpy(kaddr + bvec.bv_offset, buf + buf_offset, bytes); kunmap_atomic(kaddr); - flush_dcache_page(page_out); + flush_dcache_page(bvec.bv_page); - *pg_offset += bytes; buf_offset += bytes; working_bytes -= bytes; current_buf_start += bytes; /* check if we need to pick another page */ - if (*pg_offset == PAGE_SIZE) { - (*pg_index)++; - if (*pg_index >= vcnt) - return 0; + bio_advance(bio, bytes); + if (!bio->bi_iter.bi_size) + return 0; + bvec = bio_iter_iovec(bio, bio->bi_iter); - page_out = bvec[*pg_index].bv_page; - *pg_offset = 0; - start_byte = page_offset(page_out) - disk_start; + start_byte = page_offset(bvec.bv_page) - disk_start; - /* - * make sure our new page is covered by this - * working buffer - */ - if (total_out <= start_byte) - return 1; + /* + * make sure our new page is covered by this + * working buffer + */ + if (total_out <= start_byte) + return 1; - /* - * the next page in the biovec might not be adjacent - * to the last page, but it might still be found - * inside this working buffer. bump our offset pointer - */ - if (total_out > start_byte && - current_buf_start < start_byte) { - buf_offset = start_byte - buf_start; - working_bytes = total_out - start_byte; - current_buf_start = buf_start + buf_offset; - } + /* + * the next page in the biovec might not be adjacent + * to the last page, but it might still be found + * inside this working buffer. bump our offset pointer + */ + if (total_out > start_byte && + current_buf_start < start_byte) { + buf_offset = start_byte - buf_start; + working_bytes = total_out - start_byte; + current_buf_start = buf_start + buf_offset; } } return 1; } - -/* - * When uncompressing data, we need to make sure and zero any parts of - * the biovec that were not filled in by the decompression code. pg_index - * and pg_offset indicate the last page and the last offset of that page - * that have been filled in. This will zero everything remaining in the - * biovec. - */ -void btrfs_clear_biovec_end(struct bio_vec *bvec, int vcnt, - unsigned long pg_index, - unsigned long pg_offset) -{ - while (pg_index < vcnt) { - struct page *page = bvec[pg_index].bv_page; - unsigned long off = bvec[pg_index].bv_offset; - unsigned long len = bvec[pg_index].bv_len; - - if (pg_offset < off) - pg_offset = off; - if (pg_offset < off + len) { - unsigned long bytes = off + len - pg_offset; - char *kaddr; - - kaddr = kmap_atomic(page); - memset(kaddr + pg_offset, 0, bytes); - kunmap_atomic(kaddr); - } - pg_index++; - pg_offset = 0; - } -} |