diff options
Diffstat (limited to 'fs/btrfs')
-rw-r--r-- | fs/btrfs/check-integrity.c | 10 | ||||
-rw-r--r-- | fs/btrfs/compression.c | 29 | ||||
-rw-r--r-- | fs/btrfs/disk-io.c | 35 | ||||
-rw-r--r-- | fs/btrfs/extent_io.c | 39 | ||||
-rw-r--r-- | fs/btrfs/inode.c | 53 | ||||
-rw-r--r-- | fs/btrfs/qgroup.c | 7 | ||||
-rw-r--r-- | fs/btrfs/raid56.c | 64 | ||||
-rw-r--r-- | fs/btrfs/scrub.c | 42 | ||||
-rw-r--r-- | fs/btrfs/super.c | 3 | ||||
-rw-r--r-- | fs/btrfs/transaction.c | 8 | ||||
-rw-r--r-- | fs/btrfs/volumes.c | 105 |
11 files changed, 143 insertions, 252 deletions
diff --git a/fs/btrfs/check-integrity.c b/fs/btrfs/check-integrity.c index ce7dec88f4b8..541fbfaed276 100644 --- a/fs/btrfs/check-integrity.c +++ b/fs/btrfs/check-integrity.c @@ -343,7 +343,7 @@ static int btrfsic_process_written_superblock( struct btrfsic_state *state, struct btrfsic_block *const block, struct btrfs_super_block *const super_hdr); -static void btrfsic_bio_end_io(struct bio *bp, int bio_error_status); +static void btrfsic_bio_end_io(struct bio *bp); static void btrfsic_bh_end_io(struct buffer_head *bh, int uptodate); static int btrfsic_is_block_ref_by_superblock(const struct btrfsic_state *state, const struct btrfsic_block *block, @@ -2207,7 +2207,7 @@ continue_loop: goto again; } -static void btrfsic_bio_end_io(struct bio *bp, int bio_error_status) +static void btrfsic_bio_end_io(struct bio *bp) { struct btrfsic_block *block = (struct btrfsic_block *)bp->bi_private; int iodone_w_error; @@ -2215,7 +2215,7 @@ static void btrfsic_bio_end_io(struct bio *bp, int bio_error_status) /* mutex is not held! This is not save if IO is not yet completed * on umount */ iodone_w_error = 0; - if (bio_error_status) + if (bp->bi_error) iodone_w_error = 1; BUG_ON(NULL == block); @@ -2230,7 +2230,7 @@ static void btrfsic_bio_end_io(struct bio *bp, int bio_error_status) BTRFSIC_PRINT_MASK_END_IO_BIO_BH)) printk(KERN_INFO "bio_end_io(err=%d) for %c @%llu (%s/%llu/%d)\n", - bio_error_status, + bp->bi_error, btrfsic_get_block_type(dev_state->state, block), block->logical_bytenr, dev_state->name, block->dev_bytenr, block->mirror_num); @@ -2252,7 +2252,7 @@ static void btrfsic_bio_end_io(struct bio *bp, int bio_error_status) block = next_block; } while (NULL != block); - bp->bi_end_io(bp, bio_error_status); + bp->bi_end_io(bp); } static void btrfsic_bh_end_io(struct buffer_head *bh, int uptodate) diff --git a/fs/btrfs/compression.c b/fs/btrfs/compression.c index ce62324c78e7..57ee8ca29b06 100644 --- a/fs/btrfs/compression.c +++ b/fs/btrfs/compression.c @@ -97,10 +97,7 @@ static inline int compressed_bio_size(struct btrfs_root *root, static struct bio *compressed_bio_alloc(struct block_device *bdev, u64 first_byte, gfp_t gfp_flags) { - int nr_vecs; - - nr_vecs = bio_get_nr_vecs(bdev); - return btrfs_bio_alloc(bdev, first_byte >> 9, nr_vecs, gfp_flags); + return btrfs_bio_alloc(bdev, first_byte >> 9, BIO_MAX_PAGES, gfp_flags); } static int check_compressed_csum(struct inode *inode, @@ -152,7 +149,7 @@ fail: * The compressed pages are freed here, and it must be run * in process context */ -static void end_compressed_bio_read(struct bio *bio, int err) +static void end_compressed_bio_read(struct bio *bio) { struct compressed_bio *cb = bio->bi_private; struct inode *inode; @@ -160,7 +157,7 @@ static void end_compressed_bio_read(struct bio *bio, int err) unsigned long index; int ret; - if (err) + if (bio->bi_error) cb->errors = 1; /* if there are more bios still pending for this compressed @@ -210,7 +207,7 @@ csum_failed: bio_for_each_segment_all(bvec, cb->orig_bio, i) SetPageChecked(bvec->bv_page); - bio_endio(cb->orig_bio, 0); + bio_endio(cb->orig_bio); } /* finally free the cb struct */ @@ -266,7 +263,7 @@ static noinline void end_compressed_writeback(struct inode *inode, * This also calls the writeback end hooks for the file pages so that * metadata and checksums can be updated in the file. */ -static void end_compressed_bio_write(struct bio *bio, int err) +static void end_compressed_bio_write(struct bio *bio) { struct extent_io_tree *tree; struct compressed_bio *cb = bio->bi_private; @@ -274,7 +271,7 @@ static void end_compressed_bio_write(struct bio *bio, int err) struct page *page; unsigned long index; - if (err) + if (bio->bi_error) cb->errors = 1; /* if there are more bios still pending for this compressed @@ -293,7 +290,7 @@ static void end_compressed_bio_write(struct bio *bio, int err) cb->start, cb->start + cb->len - 1, NULL, - err ? 0 : 1); + bio->bi_error ? 0 : 1); cb->compressed_pages[0]->mapping = NULL; end_compressed_writeback(inode, cb); @@ -697,8 +694,10 @@ int btrfs_submit_compressed_read(struct inode *inode, struct bio *bio, ret = btrfs_map_bio(root, READ, comp_bio, mirror_num, 0); - if (ret) - bio_endio(comp_bio, ret); + if (ret) { + bio->bi_error = ret; + bio_endio(comp_bio); + } bio_put(comp_bio); @@ -724,8 +723,10 @@ int btrfs_submit_compressed_read(struct inode *inode, struct bio *bio, } ret = btrfs_map_bio(root, READ, comp_bio, mirror_num, 0); - if (ret) - bio_endio(comp_bio, ret); + if (ret) { + bio->bi_error = ret; + bio_endio(comp_bio); + } bio_put(comp_bio); return 0; diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c index aa59871885da..295795aebe0b 100644 --- a/fs/btrfs/disk-io.c +++ b/fs/btrfs/disk-io.c @@ -703,7 +703,7 @@ static int btree_io_failed_hook(struct page *page, int failed_mirror) return -EIO; /* we fixed nothing */ } -static void end_workqueue_bio(struct bio *bio, int err) +static void end_workqueue_bio(struct bio *bio) { struct btrfs_end_io_wq *end_io_wq = bio->bi_private; struct btrfs_fs_info *fs_info; @@ -711,7 +711,7 @@ static void end_workqueue_bio(struct bio *bio, int err) btrfs_work_func_t func; fs_info = end_io_wq->info; - end_io_wq->error = err; + end_io_wq->error = bio->bi_error; if (bio->bi_rw & REQ_WRITE) { if (end_io_wq->metadata == BTRFS_WQ_ENDIO_METADATA) { @@ -808,7 +808,8 @@ static void run_one_async_done(struct btrfs_work *work) /* If an error occured we just want to clean up the bio and move on */ if (async->error) { - bio_endio(async->bio, async->error); + async->bio->bi_error = async->error; + bio_endio(async->bio); return; } @@ -908,8 +909,10 @@ static int __btree_submit_bio_done(struct inode *inode, int rw, struct bio *bio, * submission context. Just jump into btrfs_map_bio */ ret = btrfs_map_bio(BTRFS_I(inode)->root, rw, bio, mirror_num, 1); - if (ret) - bio_endio(bio, ret); + if (ret) { + bio->bi_error = ret; + bio_endio(bio); + } return ret; } @@ -960,10 +963,13 @@ static int btree_submit_bio_hook(struct inode *inode, int rw, struct bio *bio, __btree_submit_bio_done); } - if (ret) { + if (ret) + goto out_w_error; + return 0; + out_w_error: - bio_endio(bio, ret); - } + bio->bi_error = ret; + bio_endio(bio); return ret; } @@ -1736,16 +1742,15 @@ static void end_workqueue_fn(struct btrfs_work *work) { struct bio *bio; struct btrfs_end_io_wq *end_io_wq; - int error; end_io_wq = container_of(work, struct btrfs_end_io_wq, work); bio = end_io_wq->bio; - error = end_io_wq->error; + bio->bi_error = end_io_wq->error; bio->bi_private = end_io_wq->private; bio->bi_end_io = end_io_wq->end_io; kmem_cache_free(btrfs_end_io_wq_cache, end_io_wq); - bio_endio(bio, error); + bio_endio(bio); } static int cleaner_kthread(void *arg) @@ -3325,10 +3330,8 @@ static int write_dev_supers(struct btrfs_device *device, * endio for the write_dev_flush, this will wake anyone waiting * for the barrier when it is done */ -static void btrfs_end_empty_barrier(struct bio *bio, int err) +static void btrfs_end_empty_barrier(struct bio *bio) { - if (err) - clear_bit(BIO_UPTODATE, &bio->bi_flags); if (bio->bi_private) complete(bio->bi_private); bio_put(bio); @@ -3356,8 +3359,8 @@ static int write_dev_flush(struct btrfs_device *device, int wait) wait_for_completion(&device->flush_wait); - if (!bio_flagged(bio, BIO_UPTODATE)) { - ret = -EIO; + if (bio->bi_error) { + ret = bio->bi_error; btrfs_dev_stat_inc_and_print(device, BTRFS_DEV_STAT_FLUSH_ERRS); } diff --git a/fs/btrfs/extent_io.c b/fs/btrfs/extent_io.c index 11aa8f743b90..e2357e31609a 100644 --- a/fs/btrfs/extent_io.c +++ b/fs/btrfs/extent_io.c @@ -2486,7 +2486,7 @@ int end_extent_writepage(struct page *page, int err, u64 start, u64 end) * Scheduling is not allowed, so the extent state tree is expected * to have one and only one object corresponding to this IO. */ -static void end_bio_extent_writepage(struct bio *bio, int err) +static void end_bio_extent_writepage(struct bio *bio) { struct bio_vec *bvec; u64 start; @@ -2516,7 +2516,7 @@ static void end_bio_extent_writepage(struct bio *bio, int err) start = page_offset(page); end = start + bvec->bv_offset + bvec->bv_len - 1; - if (end_extent_writepage(page, err, start, end)) + if (end_extent_writepage(page, bio->bi_error, start, end)) continue; end_page_writeback(page); @@ -2548,10 +2548,10 @@ endio_readpage_release_extent(struct extent_io_tree *tree, u64 start, u64 len, * Scheduling is not allowed, so the extent state tree is expected * to have one and only one object corresponding to this IO. */ -static void end_bio_extent_readpage(struct bio *bio, int err) +static void end_bio_extent_readpage(struct bio *bio) { struct bio_vec *bvec; - int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags); + int uptodate = !bio->bi_error; struct btrfs_io_bio *io_bio = btrfs_io_bio(bio); struct extent_io_tree *tree; u64 offset = 0; @@ -2564,16 +2564,13 @@ static void end_bio_extent_readpage(struct bio *bio, int err) int ret; int i; - if (err) - uptodate = 0; - bio_for_each_segment_all(bvec, bio, i) { struct page *page = bvec->bv_page; struct inode *inode = page->mapping->host; pr_debug("end_bio_extent_readpage: bi_sector=%llu, err=%d, " - "mirror=%u\n", (u64)bio->bi_iter.bi_sector, err, - io_bio->mirror_num); + "mirror=%u\n", (u64)bio->bi_iter.bi_sector, + bio->bi_error, io_bio->mirror_num); tree = &BTRFS_I(inode)->io_tree; /* We always issue full-page reads, but if some block @@ -2614,8 +2611,7 @@ static void end_bio_extent_readpage(struct bio *bio, int err) if (tree->ops && tree->ops->readpage_io_failed_hook) { ret = tree->ops->readpage_io_failed_hook(page, mirror); - if (!ret && !err && - test_bit(BIO_UPTODATE, &bio->bi_flags)) + if (!ret && !bio->bi_error) uptodate = 1; } else { /* @@ -2631,10 +2627,7 @@ static void end_bio_extent_readpage(struct bio *bio, int err) ret = bio_readpage_error(bio, offset, page, start, end, mirror); if (ret == 0) { - uptodate = - test_bit(BIO_UPTODATE, &bio->bi_flags); - if (err) - uptodate = 0; + uptodate = !bio->bi_error; offset += len; continue; } @@ -2684,7 +2677,7 @@ readpage_ok: endio_readpage_release_extent(tree, extent_start, extent_len, uptodate); if (io_bio->end_io) - io_bio->end_io(io_bio, err); + io_bio->end_io(io_bio, bio->bi_error); bio_put(bio); } @@ -2810,9 +2803,7 @@ static int submit_extent_page(int rw, struct extent_io_tree *tree, { int ret = 0; struct bio *bio; - int nr; int contig = 0; - int this_compressed = bio_flags & EXTENT_BIO_COMPRESSED; int old_compressed = prev_bio_flags & EXTENT_BIO_COMPRESSED; size_t page_size = min_t(size_t, size, PAGE_CACHE_SIZE); @@ -2840,12 +2831,9 @@ static int submit_extent_page(int rw, struct extent_io_tree *tree, return 0; } } - if (this_compressed) - nr = BIO_MAX_PAGES; - else - nr = bio_get_nr_vecs(bdev); - bio = btrfs_bio_alloc(bdev, sector, nr, GFP_NOFS | __GFP_HIGH); + bio = btrfs_bio_alloc(bdev, sector, BIO_MAX_PAGES, + GFP_NOFS | __GFP_HIGH); if (!bio) return -ENOMEM; @@ -3758,7 +3746,7 @@ static void set_btree_ioerr(struct page *page) } } -static void end_bio_extent_buffer_writepage(struct bio *bio, int err) +static void end_bio_extent_buffer_writepage(struct bio *bio) { struct bio_vec *bvec; struct extent_buffer *eb; @@ -3771,7 +3759,8 @@ static void end_bio_extent_buffer_writepage(struct bio *bio, int err) BUG_ON(!eb); done = atomic_dec_and_test(&eb->io_pages); - if (err || test_bit(EXTENT_BUFFER_WRITE_ERR, &eb->bflags)) { + if (bio->bi_error || + test_bit(EXTENT_BUFFER_WRITE_ERR, &eb->bflags)) { ClearPageUptodate(page); set_btree_ioerr(page); } diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c index b7e439bf5e4f..611b66d73e80 100644 --- a/fs/btrfs/inode.c +++ b/fs/btrfs/inode.c @@ -1845,8 +1845,10 @@ static int __btrfs_submit_bio_done(struct inode *inode, int rw, struct bio *bio, int ret; ret = btrfs_map_bio(root, rw, bio, mirror_num, 1); - if (ret) - bio_endio(bio, ret); + if (ret) { + bio->bi_error = ret; + bio_endio(bio); + } return ret; } @@ -1906,8 +1908,10 @@ mapit: ret = btrfs_map_bio(root, rw, bio, mirror_num, 0); out: - if (ret < 0) - bio_endio(bio, ret); + if (ret < 0) { + bio->bi_error = ret; + bio_endio(bio); + } return ret; } @@ -7722,13 +7726,13 @@ struct btrfs_retry_complete { int uptodate; }; -static void btrfs_retry_endio_nocsum(struct bio *bio, int err) +static void btrfs_retry_endio_nocsum(struct bio *bio) { struct btrfs_retry_complete *done = bio->bi_private; struct bio_vec *bvec; int i; - if (err) + if (bio->bi_error) goto end; done->uptodate = 1; @@ -7777,7 +7781,7 @@ try_again: return 0; } -static void btrfs_retry_endio(struct bio *bio, int err) +static void btrfs_retry_endio(struct bio *bio) { struct btrfs_retry_complete *done = bio->bi_private; struct btrfs_io_bio *io_bio = btrfs_io_bio(bio); @@ -7786,7 +7790,7 @@ static void btrfs_retry_endio(struct bio *bio, int err) int ret; int i; - if (err) + if (bio->bi_error) goto end; uptodate = 1; @@ -7869,12 +7873,13 @@ static int btrfs_subio_endio_read(struct inode *inode, } } -static void btrfs_endio_direct_read(struct bio *bio, int err) +static void btrfs_endio_direct_read(struct bio *bio) { struct btrfs_dio_private *dip = bio->bi_private; struct inode *inode = dip->inode; struct bio *dio_bio; struct btrfs_io_bio *io_bio = btrfs_io_bio(bio); + int err = bio->bi_error; if (dip->flags & BTRFS_DIO_ORIG_BIO_SUBMITTED) err = btrfs_subio_endio_read(inode, io_bio, err); @@ -7885,17 +7890,14 @@ static void btrfs_endio_direct_read(struct bio *bio, int err) kfree(dip); - /* If we had a csum failure make sure to clear the uptodate flag */ - if (err) - clear_bit(BIO_UPTODATE, &dio_bio->bi_flags); - dio_end_io(dio_bio, err); + dio_end_io(dio_bio, bio->bi_error); if (io_bio->end_io) io_bio->end_io(io_bio, err); bio_put(bio); } -static void btrfs_endio_direct_write(struct bio *bio, int err) +static void btrfs_endio_direct_write(struct bio *bio) { struct btrfs_dio_private *dip = bio->bi_private; struct inode *inode = dip->inode; @@ -7909,7 +7911,8 @@ static void btrfs_endio_direct_write(struct bio *bio, int err) again: ret = btrfs_dec_test_first_ordered_pending(inode, &ordered, &ordered_offset, - ordered_bytes, !err); + ordered_bytes, + !bio->bi_error); if (!ret) goto out_test; @@ -7932,10 +7935,7 @@ out_test: kfree(dip); - /* If we had an error make sure to clear the uptodate flag */ - if (err) - clear_bit(BIO_UPTODATE, &dio_bio->bi_flags); - dio_end_io(dio_bio, err); + dio_end_io(dio_bio, bio->bi_error); bio_put(bio); } @@ -7950,9 +7950,10 @@ static int __btrfs_submit_bio_start_direct_io(struct inode *inode, int rw, return 0; } -static void btrfs_end_dio_bio(struct bio *bio, int err) +static void btrfs_end_dio_bio(struct bio *bio) { struct btrfs_dio_private *dip = bio->bi_private; + int err = bio->bi_error; if (err) btrfs_warn(BTRFS_I(dip->inode)->root->fs_info, @@ -7981,8 +7982,8 @@ static void btrfs_end_dio_bio(struct bio *bio, int err) if (dip->errors) { bio_io_error(dip->orig_bio); } else { - set_bit(BIO_UPTODATE, &dip->dio_bio->bi_flags); - bio_endio(dip->orig_bio, 0); + dip->dio_bio->bi_error = 0; + bio_endio(dip->orig_bio); } out: bio_put(bio); @@ -7991,9 +7992,8 @@ out: static struct bio *btrfs_dio_bio_alloc(struct block_device *bdev, u64 first_sector, gfp_t gfp_flags) { - int nr_vecs = bio_get_nr_vecs(bdev); struct bio *bio; - bio = btrfs_bio_alloc(bdev, first_sector, nr_vecs, gfp_flags); + bio = btrfs_bio_alloc(bdev, first_sector, BIO_MAX_PAGES, gfp_flags); if (bio) bio_associate_current(bio); return bio; @@ -8257,7 +8257,8 @@ free_ordered: * callbacks - they require an allocated dip and a clone of dio_bio. */ if (io_bio && dip) { - bio_endio(io_bio, ret); + io_bio->bi_error = -EIO; + bio_endio(io_bio); /* * The end io callbacks free our dip, do the final put on io_bio * and all the cleanup and final put for dio_bio (through @@ -8284,7 +8285,7 @@ free_ordered: unlock_extent(&BTRFS_I(inode)->io_tree, file_offset, file_offset + dio_bio->bi_iter.bi_size - 1); } - clear_bit(BIO_UPTODATE, &dio_bio->bi_flags); + dio_bio->bi_error = -EIO; /* * Releases and cleans up our dio_bio, no need to bio_put() * nor bio_endio()/bio_io_error() against dio_bio. diff --git a/fs/btrfs/qgroup.c b/fs/btrfs/qgroup.c index e9ace099162c..d904ee1c5349 100644 --- a/fs/btrfs/qgroup.c +++ b/fs/btrfs/qgroup.c @@ -376,7 +376,7 @@ int btrfs_read_qgroup_config(struct btrfs_fs_info *fs_info) qgroup = find_qgroup_rb(fs_info, found_key.offset); if ((qgroup && found_key.type == BTRFS_QGROUP_INFO_KEY) || (!qgroup && found_key.type == BTRFS_QGROUP_LIMIT_KEY)) { - btrfs_err(fs_info, "inconsitent qgroup config"); + btrfs_err(fs_info, "inconsistent qgroup config"); flags |= BTRFS_QGROUP_STATUS_FLAG_INCONSISTENT; } if (!qgroup) { @@ -1651,6 +1651,11 @@ static int qgroup_update_counters(struct btrfs_fs_info *fs_info, /* Exclusive -> exclusive, nothing changed */ } } + + /* For exclusive extent, free its reserved bytes too */ + if (nr_old_roots == 0 && nr_new_roots == 1 && + cur_new_count == nr_new_roots) + qg->reserved -= num_bytes; if (dirty) qgroup_dirty(fs_info, qg); } diff --git a/fs/btrfs/raid56.c b/fs/btrfs/raid56.c index 6fe2613ef288..fcf7265ca46f 100644 --- a/fs/btrfs/raid56.c +++ b/fs/btrfs/raid56.c @@ -859,7 +859,7 @@ static void free_raid_bio(struct btrfs_raid_bio *rbio) * this frees the rbio and runs through all the bios in the * bio_list and calls end_io on them */ -static void rbio_orig_end_io(struct btrfs_raid_bio *rbio, int err, int uptodate) +static void rbio_orig_end_io(struct btrfs_raid_bio *rbio, int err) { struct bio *cur = bio_list_get(&rbio->bio_list); struct bio *next; @@ -872,9 +872,8 @@ static void rbio_orig_end_io(struct btrfs_raid_bio *rbio, int err, int uptodate) while (cur) { next = cur->bi_next; cur->bi_next = NULL; - if (uptodate) - set_bit(BIO_UPTODATE, &cur->bi_flags); - bio_endio(cur, err); + cur->bi_error = err; + bio_endio(cur); cur = next; } } @@ -883,9 +882,10 @@ static void rbio_orig_end_io(struct btrfs_raid_bio *rbio, int err, int uptodate) * end io function used by finish_rmw. When we finally * get here, we've written a full stripe */ -static void raid_write_end_io(struct bio *bio, int err) +static void raid_write_end_io(struct bio *bio) { struct btrfs_raid_bio *rbio = bio->bi_private; + int err = bio->bi_error; if (err) fail_bio_stripe(rbio, bio); @@ -901,7 +901,7 @@ static void raid_write_end_io(struct bio *bio, int err) if (atomic_read(&rbio->error) > rbio->bbio->max_errors) err = -EIO; - rbio_orig_end_io(rbio, err, 0); + rbio_orig_end_io(rbio, err); return; } @@ -1079,7 +1079,7 @@ static int rbio_add_io_page(struct btrfs_raid_bio *rbio, * devices or if they are not contiguous */ if (last_end == disk_start && stripe->dev->bdev && - test_bit(BIO_UPTODATE, &last->bi_flags) && + !last->bi_error && last->bi_bdev == stripe->dev->bdev) { ret = bio_add_page(last, page, PAGE_CACHE_SIZE, 0); if (ret == PAGE_CACHE_SIZE) @@ -1095,7 +1095,6 @@ static int rbio_add_io_page(struct btrfs_raid_bio *rbio, bio->bi_iter.bi_size = 0; bio->bi_bdev = stripe->dev->bdev; bio->bi_iter.bi_sector = disk_start >> 9; - set_bit(BIO_UPTODATE, &bio->bi_flags); bio_add_page(bio, page, PAGE_CACHE_SIZE, 0); bio_list_add(bio_list, bio); @@ -1320,13 +1319,12 @@ write_data: bio->bi_private = rbio; bio->bi_end_io = raid_write_end_io; - BUG_ON(!test_bit(BIO_UPTODATE, &bio->bi_flags)); submit_bio(WRITE, bio); } return; cleanup: - rbio_orig_end_io(rbio, -EIO, 0); + rbio_orig_end_io(rbio, -EIO); } /* @@ -1449,11 +1447,11 @@ static void set_bio_pages_uptodate(struct bio *bio) * This will usually kick off finish_rmw once all the bios are read in, but it * may trigger parity reconstruction if we had any errors along the way */ -static void raid_rmw_end_io(struct bio *bio, int err) +static void raid_rmw_end_io(struct bio *bio) { struct btrfs_raid_bio *rbio = bio->bi_private; - if (err) + if (bio->bi_error) fail_bio_stripe(rbio, bio); else set_bio_pages_uptodate(bio); @@ -1463,7 +1461,6 @@ static void raid_rmw_end_io(struct bio *bio, int err) if (!atomic_dec_and_test(&rbio->stripes_pending)) return; - err = 0; if (atomic_read(&rbio->error) > rbio->bbio->max_errors) goto cleanup; @@ -1477,7 +1474,7 @@ static void raid_rmw_end_io(struct bio *bio, int err) cleanup: - rbio_orig_end_io(rbio, -EIO, 0); + rbio_orig_end_io(rbio, -EIO); } static void async_rmw_stripe(struct btrfs_raid_bio *rbio) @@ -1580,14 +1577,13 @@ static int raid56_rmw_stripe(struct btrfs_raid_bio *rbio) btrfs_bio_wq_end_io(rbio->fs_info, bio, BTRFS_WQ_ENDIO_RAID56); - BUG_ON(!test_bit(BIO_UPTODATE, &bio->bi_flags)); submit_bio(READ, bio); } /* the actual write will happen once the reads are done */ return 0; cleanup: - rbio_orig_end_io(rbio, -EIO, 0); + rbio_orig_end_io(rbio, -EIO); return -EIO; finish: @@ -1975,9 +1971,9 @@ cleanup_io: else clear_bit(RBIO_CACHE_READY_BIT, &rbio->flags); - rbio_orig_end_io(rbio, err, err == 0); + rbio_orig_end_io(rbio, err); } else if (rbio->operation == BTRFS_RBIO_REBUILD_MISSING) { - rbio_orig_end_io(rbio, err, err == 0); + rbio_orig_end_io(rbio, err); } else if (err == 0) { rbio->faila = -1; rbio->failb = -1; @@ -1989,7 +1985,7 @@ cleanup_io: else BUG(); } else { - rbio_orig_end_io(rbio, err, 0); + rbio_orig_end_io(rbio, err); } } @@ -1997,7 +1993,7 @@ cleanup_io: * This is called only for stripes we've read from disk to * reconstruct the parity. */ -static void raid_recover_end_io(struct bio *bio, int err) +static void raid_recover_end_io(struct bio *bio) { struct btrfs_raid_bio *rbio = bio->bi_private; @@ -2005,7 +2001,7 @@ static void raid_recover_end_io(struct bio *bio, int err) * we only read stripe pages off the disk, set them * up to date if there were no errors */ - if (err) + if (bio->bi_error) fail_bio_stripe(rbio, bio); else set_bio_pages_uptodate(bio); @@ -2015,7 +2011,7 @@ static void raid_recover_end_io(struct bio *bio, int err) return; if (atomic_read(&rbio->error) > rbio->bbio->max_errors) - rbio_orig_end_io(rbio, -EIO, 0); + rbio_orig_end_io(rbio, -EIO); else __raid_recover_end_io(rbio); } @@ -2107,7 +2103,6 @@ static int __raid56_parity_recover(struct btrfs_raid_bio *rbio) btrfs_bio_wq_end_io(rbio->fs_info, bio, BTRFS_WQ_ENDIO_RAID56); - BUG_ON(!test_bit(BIO_UPTODATE, &bio->bi_flags)); submit_bio(READ, bio); } out: @@ -2116,7 +2111,7 @@ out: cleanup: if (rbio->operation == BTRFS_RBIO_READ_REBUILD || rbio->operation == BTRFS_RBIO_REBUILD_MISSING) - rbio_orig_end_io(rbio, -EIO, 0); + rbio_orig_end_io(rbio, -EIO); return -EIO; } @@ -2292,11 +2287,12 @@ static int alloc_rbio_essential_pages(struct btrfs_raid_bio *rbio) * end io function used by finish_rmw. When we finally * get here, we've written a full stripe */ -static void raid_write_parity_end_io(struct bio *bio, int err) +static void raid_write_parity_end_io(struct bio *bio) { struct btrfs_raid_bio *rbio = bio->bi_private; + int err = bio->bi_error; - if (err) + if (bio->bi_error) fail_bio_stripe(rbio, bio); bio_put(bio); @@ -2309,7 +2305,7 @@ static void raid_write_parity_end_io(struct bio *bio, int err) if (atomic_read(&rbio->error)) err = -EIO; - rbio_orig_end_io(rbio, err, 0); + rbio_orig_end_io(rbio, err); } static noinline void finish_parity_scrub(struct btrfs_raid_bio *rbio, @@ -2452,7 +2448,7 @@ submit_write: nr_data = bio_list_size(&bio_list); if (!nr_data) { /* Every parity is right */ - rbio_orig_end_io(rbio, 0, 0); + rbio_orig_end_io(rbio, 0); return; } @@ -2465,13 +2461,12 @@ submit_write: bio->bi_private = rbio; bio->bi_end_io = raid_write_parity_end_io; - BUG_ON(!test_bit(BIO_UPTODATE, &bio->bi_flags)); submit_bio(WRITE, bio); } return; cleanup: - rbio_orig_end_io(rbio, -EIO, 0); + rbio_orig_end_io(rbio, -EIO); } static inline int is_data_stripe(struct btrfs_raid_bio *rbio, int stripe) @@ -2539,7 +2534,7 @@ static void validate_rbio_for_parity_scrub(struct btrfs_raid_bio *rbio) return; cleanup: - rbio_orig_end_io(rbio, -EIO, 0); + rbio_orig_end_io(rbio, -EIO); } /* @@ -2550,11 +2545,11 @@ cleanup: * This will usually kick off finish_rmw once all the bios are read in, but it * may trigger parity reconstruction if we had any errors along the way */ -static void raid56_parity_scrub_end_io(struct bio *bio, int err) +static void raid56_parity_scrub_end_io(struct bio *bio) { struct btrfs_raid_bio *rbio = bio->bi_private; - if (err) + if (bio->bi_error) fail_bio_stripe(rbio, bio); else set_bio_pages_uptodate(bio); @@ -2647,14 +2642,13 @@ static void raid56_parity_scrub_stripe(struct btrfs_raid_bio *rbio) btrfs_bio_wq_end_io(rbio->fs_info, bio, BTRFS_WQ_ENDIO_RAID56); - BUG_ON(!test_bit(BIO_UPTODATE, &bio->bi_flags)); submit_bio(READ, bio); } /* the actual write will happen once the reads are done */ return; cleanup: - rbio_orig_end_io(rbio, -EIO, 0); + rbio_orig_end_io(rbio, -EIO); return; finish: diff --git a/fs/btrfs/scrub.c b/fs/btrfs/scrub.c index d64f557ea91e..a39f5d1144e8 100644 --- a/fs/btrfs/scrub.c +++ b/fs/btrfs/scrub.c @@ -279,7 +279,7 @@ static int scrub_pages(struct scrub_ctx *sctx, u64 logical, u64 len, u64 physical, struct btrfs_device *dev, u64 flags, u64 gen, int mirror_num, u8 *csum, int force, u64 physical_for_dev_replace); -static void scrub_bio_end_io(struct bio *bio, int err); +static void scrub_bio_end_io(struct bio *bio); static void scrub_bio_end_io_worker(struct btrfs_work *work); static void scrub_block_complete(struct scrub_block *sblock); static void scrub_remap_extent(struct btrfs_fs_info *fs_info, @@ -296,7 +296,7 @@ static void scrub_free_wr_ctx(struct scrub_wr_ctx *wr_ctx); static int scrub_add_page_to_wr_bio(struct scrub_ctx *sctx, struct scrub_page *spage); static void scrub_wr_submit(struct scrub_ctx *sctx); -static void scrub_wr_bio_end_io(struct bio *bio, int err); +static void scrub_wr_bio_end_io(struct bio *bio); static void scrub_wr_bio_end_io_worker(struct btrfs_work *work); static int write_page_nocow(struct scrub_ctx *sctx, u64 physical_for_dev_replace, struct page *page); @@ -464,27 +464,14 @@ struct scrub_ctx *scrub_setup_ctx(struct btrfs_device *dev, int is_dev_replace) struct scrub_ctx *sctx; int i; struct btrfs_fs_info *fs_info = dev->dev_root->fs_info; - int pages_per_rd_bio; int ret; - /* - * the setting of pages_per_rd_bio is correct for scrub but might - * be wrong for the dev_replace code where we might read from - * different devices in the initial huge bios. However, that - * code is able to correctly handle the case when adding a page - * to a bio fails. - */ - if (dev->bdev) - pages_per_rd_bio = min_t(int, SCRUB_PAGES_PER_RD_BIO, - bio_get_nr_vecs(dev->bdev)); - else - pages_per_rd_bio = SCRUB_PAGES_PER_RD_BIO; sctx = kzalloc(sizeof(*sctx), GFP_NOFS); if (!sctx) goto nomem; atomic_set(&sctx->refs, 1); sctx->is_dev_replace = is_dev_replace; - sctx->pages_per_rd_bio = pages_per_rd_bio; + sctx->pages_per_rd_bio = SCRUB_PAGES_PER_RD_BIO; sctx->curr = -1; sctx->dev_root = dev->dev_root; for (i = 0; i < SCRUB_BIOS_PER_SCTX; ++i) { @@ -1439,11 +1426,11 @@ struct scrub_bio_ret { int error; }; -static void scrub_bio_wait_endio(struct bio *bio, int error) +static void scrub_bio_wait_endio(struct bio *bio) { struct scrub_bio_ret *ret = bio->bi_private; - ret->error = error; + ret->error = bio->bi_error; complete(&ret->event); } @@ -1800,12 +1787,12 @@ static void scrub_wr_submit(struct scrub_ctx *sctx) btrfsic_submit_bio(WRITE, sbio->bio); } -static void scrub_wr_bio_end_io(struct bio *bio, int err) +static void scrub_wr_bio_end_io(struct bio *bio) { struct scrub_bio *sbio = bio->bi_private; struct btrfs_fs_info *fs_info = sbio->dev->dev_root->fs_info; - sbio->err = err; + sbio->err = bio->bi_error; sbio->bio = bio; btrfs_init_work(&sbio->work, btrfs_scrubwrc_helper, @@ -2174,12 +2161,12 @@ again: return 0; } -static void scrub_missing_raid56_end_io(struct bio *bio, int error) +static void scrub_missing_raid56_end_io(struct bio *bio) { struct scrub_block *sblock = bio->bi_private; struct btrfs_fs_info *fs_info = sblock->sctx->dev_root->fs_info; - if (error) + if (bio->bi_error) sblock->no_io_error_seen = 0; btrfs_queue_work(fs_info->scrub_workers, &sblock->work); @@ -2392,12 +2379,12 @@ leave_nomem: return 0; } -static void scrub_bio_end_io(struct bio *bio, int err) +static void scrub_bio_end_io(struct bio *bio) { struct scrub_bio *sbio = bio->bi_private; struct btrfs_fs_info *fs_info = sbio->dev->dev_root->fs_info; - sbio->err = err; + sbio->err = bio->bi_error; sbio->bio = bio; btrfs_queue_work(fs_info->scrub_workers, &sbio->work); @@ -2809,11 +2796,11 @@ static void scrub_parity_bio_endio_worker(struct btrfs_work *work) scrub_pending_bio_dec(sctx); } -static void scrub_parity_bio_endio(struct bio *bio, int error) +static void scrub_parity_bio_endio(struct bio *bio) { struct scrub_parity *sparity = (struct scrub_parity *)bio->bi_private; - if (error) + if (bio->bi_error) bitmap_or(sparity->ebitmap, sparity->ebitmap, sparity->dbitmap, sparity->nsectors); @@ -4066,8 +4053,7 @@ static int scrub_setup_wr_ctx(struct scrub_ctx *sctx, return 0; WARN_ON(!dev->bdev); - wr_ctx->pages_per_wr_bio = min_t(int, SCRUB_PAGES_PER_WR_BIO, - bio_get_nr_vecs(dev->bdev)); + wr_ctx->pages_per_wr_bio = SCRUB_PAGES_PER_WR_BIO; wr_ctx->tgtdev = dev; atomic_set(&wr_ctx->flush_all_writes, 0); return 0; diff --git a/fs/btrfs/super.c b/fs/btrfs/super.c index 5a186d7180d3..11d1eab9234d 100644 --- a/fs/btrfs/super.c +++ b/fs/btrfs/super.c @@ -2173,8 +2173,7 @@ static int btrfs_interface_init(void) static void btrfs_interface_exit(void) { - if (misc_deregister(&btrfs_misc) < 0) - printk(KERN_INFO "BTRFS: misc_deregister failed for control device\n"); + misc_deregister(&btrfs_misc); } static void btrfs_print_info(void) diff --git a/fs/btrfs/transaction.c b/fs/btrfs/transaction.c index a2d6f7bcef6c..74bc3338418b 100644 --- a/fs/btrfs/transaction.c +++ b/fs/btrfs/transaction.c @@ -1672,9 +1672,7 @@ static void do_async_commit(struct work_struct *work) * Tell lockdep about it. */ if (ac->newtrans->type & __TRANS_FREEZABLE) - rwsem_acquire_read( - &ac->root->fs_info->sb->s_writers.lock_map[SB_FREEZE_FS-1], - 0, 1, _THIS_IP_); + __sb_writers_acquired(ac->root->fs_info->sb, SB_FREEZE_FS); current->journal_info = ac->newtrans; @@ -1713,9 +1711,7 @@ int btrfs_commit_transaction_async(struct btrfs_trans_handle *trans, * async commit thread will be the one to unlock it. */ if (ac->newtrans->type & __TRANS_FREEZABLE) - rwsem_release( - &root->fs_info->sb->s_writers.lock_map[SB_FREEZE_FS-1], - 1, _THIS_IP_); + __sb_writers_release(root->fs_info->sb, SB_FREEZE_FS); schedule_work(&ac->work); diff --git a/fs/btrfs/volumes.c b/fs/btrfs/volumes.c index 644e070b3bd2..6fc735869c18 100644 --- a/fs/btrfs/volumes.c +++ b/fs/btrfs/volumes.c @@ -5737,23 +5737,23 @@ int btrfs_rmap_block(struct btrfs_mapping_tree *map_tree, return 0; } -static inline void btrfs_end_bbio(struct btrfs_bio *bbio, struct bio *bio, int err) +static inline void btrfs_end_bbio(struct btrfs_bio *bbio, struct bio *bio) { bio->bi_private = bbio->private; bio->bi_end_io = bbio->end_io; - bio_endio(bio, err); + bio_endio(bio); btrfs_put_bbio(bbio); } -static void btrfs_end_bio(struct bio *bio, int err) +static void btrfs_end_bio(struct bio *bio) { struct btrfs_bio *bbio = bio->bi_private; int is_orig_bio = 0; - if (err) { + if (bio->bi_error) { atomic_inc(&bbio->error); - if (err == -EIO || err == -EREMOTEIO) { + if (bio->bi_error == -EIO || bio->bi_error == -EREMOTEIO) { unsigned int stripe_index = btrfs_io_bio(bio)->stripe_index; struct btrfs_device *dev; @@ -5791,17 +5791,16 @@ static void btrfs_end_bio(struct bio *bio, int err) * beyond the tolerance of the btrfs bio */ if (atomic_read(&bbio->error) > bbio->max_errors) { - err = -EIO; + bio->bi_error = -EIO; } else { /* * this bio is actually up to date, we didn't * go over the max number of errors */ - set_bit(BIO_UPTODATE, &bio->bi_flags); - err = 0; + bio->bi_error = 0; } - btrfs_end_bbio(bbio, bio, err); + btrfs_end_bbio(bbio, bio); } else if (!is_orig_bio) { bio_put(bio); } @@ -5822,7 +5821,7 @@ static noinline void btrfs_schedule_bio(struct btrfs_root *root, struct btrfs_pending_bios *pending_bios; if (device->missing || !device->bdev) { - bio_endio(bio, -EIO); + bio_io_error(bio); return; } @@ -5867,34 +5866,6 @@ static noinline void btrfs_schedule_bio(struct btrfs_root *root, &device->work); } -static int bio_size_ok(struct block_device *bdev, struct bio *bio, - sector_t sector) -{ - struct bio_vec *prev; - struct request_queue *q = bdev_get_queue(bdev); - unsigned int max_sectors = queue_max_sectors(q); - struct bvec_merge_data bvm = { - .bi_bdev = bdev, - .bi_sector = sector, - .bi_rw = bio->bi_rw, - }; - - if (WARN_ON(bio->bi_vcnt == 0)) - return 1; - - prev = &bio->bi_io_vec[bio->bi_vcnt - 1]; - if (bio_sectors(bio) > max_sectors) - return 0; - - if (!q->merge_bvec_fn) - return 1; - - bvm.bi_size = bio->bi_iter.bi_size - prev->bv_len; - if (q->merge_bvec_fn(q, &bvm, prev) < prev->bv_len) - return 0; - return 1; -} - static void submit_stripe_bio(struct btrfs_root *root, struct btrfs_bio *bbio, struct bio *bio, u64 physical, int dev_nr, int rw, int async) @@ -5928,48 +5899,6 @@ static void submit_stripe_bio(struct btrfs_root *root, struct btrfs_bio *bbio, btrfsic_submit_bio(rw, bio); } -static int breakup_stripe_bio(struct btrfs_root *root, struct btrfs_bio *bbio, - struct bio *first_bio, struct btrfs_device *dev, - int dev_nr, int rw, int async) -{ - struct bio_vec *bvec = first_bio->bi_io_vec; - struct bio *bio; - int nr_vecs = bio_get_nr_vecs(dev->bdev); - u64 physical = bbio->stripes[dev_nr].physical; - -again: - bio = btrfs_bio_alloc(dev->bdev, physical >> 9, nr_vecs, GFP_NOFS); - if (!bio) - return -ENOMEM; - -#ifdef CONFIG_BLK_CGROUP - if (first_bio->bi_ioc) { - get_io_context_active(first_bio->bi_ioc); - bio->bi_ioc = first_bio->bi_ioc; - } - if (first_bio->bi_css) { - css_get(first_bio->bi_css); - bio->bi_css = first_bio->bi_css; - } -#endif - while (bvec <= (first_bio->bi_io_vec + first_bio->bi_vcnt - 1)) { - if (bio_add_page(bio, bvec->bv_page, bvec->bv_len, - bvec->bv_offset) < bvec->bv_len) { - u64 len = bio->bi_iter.bi_size; - - atomic_inc(&bbio->stripes_pending); - submit_stripe_bio(root, bbio, bio, physical, dev_nr, - rw, async); - physical += len; - goto again; - } - bvec++; - } - - submit_stripe_bio(root, bbio, bio, physical, dev_nr, rw, async); - return 0; -} - static void bbio_error(struct btrfs_bio *bbio, struct bio *bio, u64 logical) { atomic_inc(&bbio->error); @@ -5979,8 +5908,8 @@ static void bbio_error(struct btrfs_bio *bbio, struct bio *bio, u64 logical) btrfs_io_bio(bio)->mirror_num = bbio->mirror_num; bio->bi_iter.bi_sector = logical >> 9; - - btrfs_end_bbio(bbio, bio, -EIO); + bio->bi_error = -EIO; + btrfs_end_bbio(bbio, bio); } } @@ -6042,18 +5971,6 @@ int btrfs_map_bio(struct btrfs_root *root, int rw, struct bio *bio, continue; } - /* - * Check and see if we're ok with this bio based on it's size - * and offset with the given device. - */ - if (!bio_size_ok(dev->bdev, first_bio, - bbio->stripes[dev_nr].physical >> 9)) { - ret = breakup_stripe_bio(root, bbio, first_bio, dev, - dev_nr, rw, async_submit); - BUG_ON(ret); - continue; - } - if (dev_nr < total_devs - 1) { bio = btrfs_bio_clone(first_bio, GFP_NOFS); BUG_ON(!bio); /* -ENOMEM */ |