diff options
Diffstat (limited to 'fs')
47 files changed, 1127 insertions, 940 deletions
diff --git a/fs/afs/rxrpc.c b/fs/afs/rxrpc.c index 4f6b00efc27c..419ef05dcb5e 100644 --- a/fs/afs/rxrpc.c +++ b/fs/afs/rxrpc.c @@ -262,8 +262,7 @@ void afs_flat_call_destructor(struct afs_call *call) /* * attach the data from a bunch of pages on an inode to a call */ -static int afs_send_pages(struct afs_call *call, struct msghdr *msg, - struct kvec *iov) +static int afs_send_pages(struct afs_call *call, struct msghdr *msg) { struct page *pages[8]; unsigned count, n, loop, offset, to; @@ -286,20 +285,21 @@ static int afs_send_pages(struct afs_call *call, struct msghdr *msg, loop = 0; do { + struct bio_vec bvec = {.bv_page = pages[loop], + .bv_offset = offset}; msg->msg_flags = 0; to = PAGE_SIZE; if (first + loop >= last) to = call->last_to; else msg->msg_flags = MSG_MORE; - iov->iov_base = kmap(pages[loop]) + offset; - iov->iov_len = to - offset; + bvec.bv_len = to - offset; offset = 0; _debug("- range %u-%u%s", offset, to, msg->msg_flags ? " [more]" : ""); - iov_iter_kvec(&msg->msg_iter, WRITE | ITER_KVEC, - iov, 1, to - offset); + iov_iter_bvec(&msg->msg_iter, WRITE | ITER_BVEC, + &bvec, 1, to - offset); /* have to change the state *before* sending the last * packet as RxRPC might give us the reply before it @@ -308,7 +308,6 @@ static int afs_send_pages(struct afs_call *call, struct msghdr *msg, call->state = AFS_CALL_AWAIT_REPLY; ret = rxrpc_kernel_send_data(afs_socket, call->rxcall, msg, to - offset); - kunmap(pages[loop]); if (ret < 0) break; } while (++loop < count); @@ -393,7 +392,7 @@ int afs_make_call(struct in_addr *addr, struct afs_call *call, gfp_t gfp, goto error_do_abort; if (call->send_pages) { - ret = afs_send_pages(call, &msg, iov); + ret = afs_send_pages(call, &msg); if (ret < 0) goto error_do_abort; } @@ -1495,7 +1495,7 @@ static ssize_t aio_read(struct kiocb *req, struct iocb *iocb, bool vectored, return ret; ret = rw_verify_area(READ, file, &req->ki_pos, iov_iter_count(&iter)); if (!ret) - ret = aio_ret(req, file->f_op->read_iter(req, &iter)); + ret = aio_ret(req, call_read_iter(file, req, &iter)); kfree(iovec); return ret; } @@ -1520,7 +1520,7 @@ static ssize_t aio_write(struct kiocb *req, struct iocb *iocb, bool vectored, if (!ret) { req->ki_flags |= IOCB_WRITE; file_start_write(file); - ret = aio_ret(req, file->f_op->write_iter(req, &iter)); + ret = aio_ret(req, call_write_iter(file, req, &iter)); /* * We release freeze protection in aio_complete(). Fool lockdep * by telling it the lock got released so that it doesn't diff --git a/fs/btrfs/btrfs_inode.h b/fs/btrfs/btrfs_inode.h index 819a6d27218a..0c6baaba0651 100644 --- a/fs/btrfs/btrfs_inode.h +++ b/fs/btrfs/btrfs_inode.h @@ -237,20 +237,20 @@ static inline u64 btrfs_ino(struct btrfs_inode *inode) return ino; } -static inline void btrfs_i_size_write(struct inode *inode, u64 size) +static inline void btrfs_i_size_write(struct btrfs_inode *inode, u64 size) { - i_size_write(inode, size); - BTRFS_I(inode)->disk_i_size = size; + i_size_write(&inode->vfs_inode, size); + inode->disk_i_size = size; } -static inline bool btrfs_is_free_space_inode(struct inode *inode) +static inline bool btrfs_is_free_space_inode(struct btrfs_inode *inode) { - struct btrfs_root *root = BTRFS_I(inode)->root; + struct btrfs_root *root = inode->root; if (root == root->fs_info->tree_root && - btrfs_ino(BTRFS_I(inode)) != BTRFS_BTREE_INODE_OBJECTID) + btrfs_ino(inode) != BTRFS_BTREE_INODE_OBJECTID) return true; - if (BTRFS_I(inode)->location.objectid == BTRFS_FREE_INO_OBJECTID) + if (inode->location.objectid == BTRFS_FREE_INO_OBJECTID) return true; return false; } @@ -311,34 +311,33 @@ struct btrfs_dio_private { * to grab i_mutex. It is used to avoid the endless truncate due to * nonlocked dio read. */ -static inline void btrfs_inode_block_unlocked_dio(struct inode *inode) +static inline void btrfs_inode_block_unlocked_dio(struct btrfs_inode *inode) { - set_bit(BTRFS_INODE_READDIO_NEED_LOCK, &BTRFS_I(inode)->runtime_flags); + set_bit(BTRFS_INODE_READDIO_NEED_LOCK, &inode->runtime_flags); smp_mb(); } -static inline void btrfs_inode_resume_unlocked_dio(struct inode *inode) +static inline void btrfs_inode_resume_unlocked_dio(struct btrfs_inode *inode) { smp_mb__before_atomic(); - clear_bit(BTRFS_INODE_READDIO_NEED_LOCK, - &BTRFS_I(inode)->runtime_flags); + clear_bit(BTRFS_INODE_READDIO_NEED_LOCK, &inode->runtime_flags); } -static inline void btrfs_print_data_csum_error(struct inode *inode, +static inline void btrfs_print_data_csum_error(struct btrfs_inode *inode, u64 logical_start, u32 csum, u32 csum_expected, int mirror_num) { - struct btrfs_root *root = BTRFS_I(inode)->root; + struct btrfs_root *root = inode->root; /* Output minus objectid, which is more meaningful */ if (root->objectid >= BTRFS_LAST_FREE_OBJECTID) btrfs_warn_rl(root->fs_info, "csum failed root %lld ino %lld off %llu csum 0x%08x expected csum 0x%08x mirror %d", - root->objectid, btrfs_ino(BTRFS_I(inode)), + root->objectid, btrfs_ino(inode), logical_start, csum, csum_expected, mirror_num); else btrfs_warn_rl(root->fs_info, "csum failed root %llu ino %llu off %llu csum 0x%08x expected csum 0x%08x mirror %d", - root->objectid, btrfs_ino(BTRFS_I(inode)), + root->objectid, btrfs_ino(inode), logical_start, csum, csum_expected, mirror_num); } diff --git a/fs/btrfs/compression.c b/fs/btrfs/compression.c index 903c32c9eb22..c7721a6aa3bb 100644 --- a/fs/btrfs/compression.c +++ b/fs/btrfs/compression.c @@ -100,7 +100,7 @@ static struct bio *compressed_bio_alloc(struct block_device *bdev, return btrfs_bio_alloc(bdev, first_byte >> 9, BIO_MAX_PAGES, gfp_flags); } -static int check_compressed_csum(struct inode *inode, +static int check_compressed_csum(struct btrfs_inode *inode, struct compressed_bio *cb, u64 disk_start) { @@ -111,7 +111,7 @@ static int check_compressed_csum(struct inode *inode, u32 csum; u32 *cb_sum = &cb->sums; - if (BTRFS_I(inode)->flags & BTRFS_INODE_NODATASUM) + if (inode->flags & BTRFS_INODE_NODATASUM) return 0; for (i = 0; i < cb->nr_pages; i++) { @@ -125,7 +125,7 @@ static int check_compressed_csum(struct inode *inode, if (csum != *cb_sum) { btrfs_print_data_csum_error(inode, disk_start, csum, - *cb_sum, cb->mirror_num); + *cb_sum, cb->mirror_num); ret = -EIO; goto fail; } @@ -165,7 +165,7 @@ static void end_compressed_bio_read(struct bio *bio) goto out; inode = cb->inode; - ret = check_compressed_csum(inode, cb, + ret = check_compressed_csum(BTRFS_I(inode), cb, (u64)bio->bi_iter.bi_sector << 9); if (ret) goto csum_failed; @@ -911,32 +911,28 @@ static void free_workspaces(void) } /* - * given an address space and start/len, compress the bytes. + * Given an address space and start and length, compress the bytes into @pages + * that are allocated on demand. * - * pages are allocated to hold the compressed result and stored - * in 'pages' + * @out_pages is an in/out parameter, holds maximum number of pages to allocate + * and returns number of actually allocated pages * - * out_pages is used to return the number of pages allocated. There - * may be pages allocated even if we return an error - * - * total_in is used to return the number of bytes actually read. It - * may be smaller then len if we had to exit early because we + * @total_in is used to return the number of bytes actually read. It + * may be smaller than the input length if we had to exit early because we * ran out of room in the pages array or because we cross the * max_out threshold. * - * total_out is used to return the total number of compressed bytes + * @total_out is an in/out parameter, must be set to the input length and will + * be also used to return the total number of compressed bytes * - * max_out tells us the max number of bytes that we're allowed to + * @max_out tells us the max number of bytes that we're allowed to * stuff into pages */ int btrfs_compress_pages(int type, struct address_space *mapping, - u64 start, unsigned long len, - struct page **pages, - unsigned long nr_dest_pages, + u64 start, struct page **pages, unsigned long *out_pages, unsigned long *total_in, - unsigned long *total_out, - unsigned long max_out) + unsigned long *total_out) { struct list_head *workspace; int ret; @@ -944,10 +940,9 @@ int btrfs_compress_pages(int type, struct address_space *mapping, workspace = find_workspace(type); ret = btrfs_compress_op[type-1]->compress_pages(workspace, mapping, - start, len, pages, - nr_dest_pages, out_pages, - total_in, total_out, - max_out); + start, pages, + out_pages, + total_in, total_out); free_workspace(type, workspace); return ret; } @@ -1015,7 +1010,7 @@ void btrfs_exit_compress(void) * * total_out is the last byte of the buffer */ -int btrfs_decompress_buf2page(char *buf, unsigned long buf_start, +int btrfs_decompress_buf2page(const char *buf, unsigned long buf_start, unsigned long total_out, u64 disk_start, struct bio *bio) { diff --git a/fs/btrfs/compression.h b/fs/btrfs/compression.h index 09879579fbc8..39ec43ab8df1 100644 --- a/fs/btrfs/compression.h +++ b/fs/btrfs/compression.h @@ -19,20 +19,32 @@ #ifndef __BTRFS_COMPRESSION_ #define __BTRFS_COMPRESSION_ +/* + * We want to make sure that amount of RAM required to uncompress an extent is + * reasonable, so we limit the total size in ram of a compressed extent to + * 128k. This is a crucial number because it also controls how easily we can + * spread reads across cpus for decompression. + * + * We also want to make sure the amount of IO required to do a random read is + * reasonably small, so we limit the size of a compressed extent to 128k. + */ + +/* Maximum length of compressed data stored on disk */ +#define BTRFS_MAX_COMPRESSED (SZ_128K) +/* Maximum size of data before compression */ +#define BTRFS_MAX_UNCOMPRESSED (SZ_128K) + void btrfs_init_compress(void); void btrfs_exit_compress(void); int btrfs_compress_pages(int type, struct address_space *mapping, - u64 start, unsigned long len, - struct page **pages, - unsigned long nr_dest_pages, + u64 start, struct page **pages, unsigned long *out_pages, unsigned long *total_in, - unsigned long *total_out, - unsigned long max_out); + unsigned long *total_out); int btrfs_decompress(int type, unsigned char *data_in, struct page *dest_page, unsigned long start_byte, size_t srclen, size_t destlen); -int btrfs_decompress_buf2page(char *buf, unsigned long buf_start, +int btrfs_decompress_buf2page(const char *buf, unsigned long buf_start, unsigned long total_out, u64 disk_start, struct bio *bio); @@ -59,13 +71,11 @@ struct btrfs_compress_op { int (*compress_pages)(struct list_head *workspace, struct address_space *mapping, - u64 start, unsigned long len, + u64 start, struct page **pages, - unsigned long nr_dest_pages, unsigned long *out_pages, unsigned long *total_in, - unsigned long *total_out, - unsigned long max_out); + unsigned long *total_out); int (*decompress_bio)(struct list_head *workspace, struct page **pages_in, diff --git a/fs/btrfs/ctree.c b/fs/btrfs/ctree.c index 1192bc7d2ee7..7dc8844037e0 100644 --- a/fs/btrfs/ctree.c +++ b/fs/btrfs/ctree.c @@ -453,8 +453,6 @@ __tree_mod_log_insert(struct btrfs_fs_info *fs_info, struct tree_mod_elem *tm) struct rb_node *parent = NULL; struct tree_mod_elem *cur; - BUG_ON(!tm); - tm->seq = btrfs_inc_tree_mod_seq(fs_info); tm_root = &fs_info->tree_mod_log; @@ -4159,6 +4157,9 @@ static noinline int push_for_double_split(struct btrfs_trans_handle *trans, /* try to push all the items before our slot into the next leaf */ slot = path->slots[0]; + space_needed = data_size; + if (slot > 0) + space_needed -= btrfs_leaf_free_space(fs_info, path->nodes[0]); ret = push_leaf_left(trans, root, path, 1, space_needed, 0, slot); if (ret < 0) return ret; @@ -4214,6 +4215,10 @@ static noinline int split_leaf(struct btrfs_trans_handle *trans, if (wret < 0) return wret; if (wret) { + space_needed = data_size; + if (slot > 0) + space_needed -= btrfs_leaf_free_space(fs_info, + l); wret = push_leaf_left(trans, root, path, space_needed, space_needed, 0, (u32)-1); if (wret < 0) diff --git a/fs/btrfs/ctree.h b/fs/btrfs/ctree.h index a8812d95359d..29b7fc28c607 100644 --- a/fs/btrfs/ctree.h +++ b/fs/btrfs/ctree.h @@ -2688,7 +2688,7 @@ enum btrfs_flush_state { }; int btrfs_check_data_free_space(struct inode *inode, u64 start, u64 len); -int btrfs_alloc_data_chunk_ondemand(struct inode *inode, u64 bytes); +int btrfs_alloc_data_chunk_ondemand(struct btrfs_inode *inode, u64 bytes); void btrfs_free_reserved_data_space(struct inode *inode, u64 start, u64 len); void btrfs_free_reserved_data_space_noquota(struct inode *inode, u64 start, u64 len); @@ -2696,16 +2696,16 @@ void btrfs_trans_release_metadata(struct btrfs_trans_handle *trans, struct btrfs_fs_info *fs_info); void btrfs_trans_release_chunk_metadata(struct btrfs_trans_handle *trans); int btrfs_orphan_reserve_metadata(struct btrfs_trans_handle *trans, - struct inode *inode); -void btrfs_orphan_release_metadata(struct inode *inode); + struct btrfs_inode *inode); +void btrfs_orphan_release_metadata(struct btrfs_inode *inode); int btrfs_subvolume_reserve_metadata(struct btrfs_root *root, struct btrfs_block_rsv *rsv, int nitems, u64 *qgroup_reserved, bool use_global_rsv); void btrfs_subvolume_release_metadata(struct btrfs_fs_info *fs_info, struct btrfs_block_rsv *rsv); -int btrfs_delalloc_reserve_metadata(struct inode *inode, u64 num_bytes); -void btrfs_delalloc_release_metadata(struct inode *inode, u64 num_bytes); +int btrfs_delalloc_reserve_metadata(struct btrfs_inode *inode, u64 num_bytes); +void btrfs_delalloc_release_metadata(struct btrfs_inode *inode, u64 num_bytes); int btrfs_delalloc_reserve_space(struct inode *inode, u64 start, u64 len); void btrfs_delalloc_release_space(struct inode *inode, u64 start, u64 len); void btrfs_init_block_rsv(struct btrfs_block_rsv *rsv, unsigned short type); @@ -2983,7 +2983,7 @@ int btrfs_check_dir_item_collision(struct btrfs_root *root, u64 dir, const char *name, int name_len); int btrfs_insert_dir_item(struct btrfs_trans_handle *trans, struct btrfs_root *root, const char *name, - int name_len, struct inode *dir, + int name_len, struct btrfs_inode *dir, struct btrfs_key *location, u8 type, u64 index); struct btrfs_dir_item *btrfs_lookup_dir_item(struct btrfs_trans_handle *trans, struct btrfs_root *root, @@ -3082,7 +3082,7 @@ int btrfs_csum_one_bio(struct inode *inode, struct bio *bio, u64 file_start, int contig); int btrfs_lookup_csums_range(struct btrfs_root *root, u64 start, u64 end, struct list_head *list, int search_commit); -void btrfs_extent_item_to_extent_map(struct inode *inode, +void btrfs_extent_item_to_extent_map(struct btrfs_inode *inode, const struct btrfs_path *path, struct btrfs_file_extent_item *fi, const bool new_inline, @@ -3101,9 +3101,9 @@ struct btrfs_delalloc_work *btrfs_alloc_delalloc_work(struct inode *inode, int delay_iput); void btrfs_wait_and_free_delalloc_work(struct btrfs_delalloc_work *work); -struct extent_map *btrfs_get_extent_fiemap(struct inode *inode, struct page *page, - size_t pg_offset, u64 start, u64 len, - int create); +struct extent_map *btrfs_get_extent_fiemap(struct btrfs_inode *inode, + struct page *page, size_t pg_offset, u64 start, + u64 len, int create); noinline int can_nocow_extent(struct inode *inode, u64 offset, u64 *len, u64 *orig_start, u64 *orig_block_len, u64 *ram_bytes); @@ -3124,13 +3124,13 @@ static inline void btrfs_force_ra(struct address_space *mapping, } struct inode *btrfs_lookup_dentry(struct inode *dir, struct dentry *dentry); -int btrfs_set_inode_index(struct inode *dir, u64 *index); +int btrfs_set_inode_index(struct btrfs_inode *dir, u64 *index); int btrfs_unlink_inode(struct btrfs_trans_handle *trans, struct btrfs_root *root, struct btrfs_inode *dir, struct btrfs_inode *inode, const char *name, int name_len); int btrfs_add_link(struct btrfs_trans_handle *trans, - struct inode *parent_inode, struct inode *inode, + struct btrfs_inode *parent_inode, struct btrfs_inode *inode, const char *name, int name_len, int add_backref, u64 index); int btrfs_unlink_subvol(struct btrfs_trans_handle *trans, struct btrfs_root *root, @@ -3167,15 +3167,16 @@ void btrfs_destroy_cachep(void); long btrfs_ioctl_trans_end(struct file *file); struct inode *btrfs_iget(struct super_block *s, struct btrfs_key *location, struct btrfs_root *root, int *was_new); -struct extent_map *btrfs_get_extent(struct inode *inode, struct page *page, - size_t pg_offset, u64 start, u64 end, - int create); +struct extent_map *btrfs_get_extent(struct btrfs_inode *inode, + struct page *page, size_t pg_offset, + u64 start, u64 end, int create); int btrfs_update_inode(struct btrfs_trans_handle *trans, struct btrfs_root *root, struct inode *inode); int btrfs_update_inode_fallback(struct btrfs_trans_handle *trans, struct btrfs_root *root, struct inode *inode); -int btrfs_orphan_add(struct btrfs_trans_handle *trans, struct inode *inode); +int btrfs_orphan_add(struct btrfs_trans_handle *trans, + struct btrfs_inode *inode); int btrfs_orphan_cleanup(struct btrfs_root *root); void btrfs_orphan_commit_root(struct btrfs_trans_handle *trans, struct btrfs_root *root); @@ -3216,11 +3217,11 @@ ssize_t btrfs_dedupe_file_range(struct file *src_file, u64 loff, u64 olen, int btrfs_auto_defrag_init(void); void btrfs_auto_defrag_exit(void); int btrfs_add_inode_defrag(struct btrfs_trans_handle *trans, - struct inode *inode); + struct btrfs_inode *inode); int btrfs_run_defrag_inodes(struct btrfs_fs_info *fs_info); void btrfs_cleanup_defrag_inodes(struct btrfs_fs_info *fs_info); int btrfs_sync_file(struct file *file, loff_t start, loff_t end, int datasync); -void btrfs_drop_extent_cache(struct inode *inode, u64 start, u64 end, +void btrfs_drop_extent_cache(struct btrfs_inode *inode, u64 start, u64 end, int skip_pinned); extern const struct file_operations btrfs_file_operations; int __btrfs_drop_extents(struct btrfs_trans_handle *trans, @@ -3234,7 +3235,7 @@ int btrfs_drop_extents(struct btrfs_trans_handle *trans, struct btrfs_root *root, struct inode *inode, u64 start, u64 end, int drop_cache); int btrfs_mark_extent_written(struct btrfs_trans_handle *trans, - struct inode *inode, u64 start, u64 end); + struct btrfs_inode *inode, u64 start, u64 end); int btrfs_release_file(struct inode *inode, struct file *file); int btrfs_dirty_pages(struct inode *inode, struct page **pages, size_t num_pages, loff_t pos, size_t write_bytes, diff --git a/fs/btrfs/delayed-inode.c b/fs/btrfs/delayed-inode.c index f7a6ee5ccc80..1aff676f0e5b 100644 --- a/fs/btrfs/delayed-inode.c +++ b/fs/btrfs/delayed-inode.c @@ -1790,7 +1790,7 @@ int btrfs_fill_inode(struct inode *inode, u32 *rdev) i_uid_write(inode, btrfs_stack_inode_uid(inode_item)); i_gid_write(inode, btrfs_stack_inode_gid(inode_item)); - btrfs_i_size_write(inode, btrfs_stack_inode_size(inode_item)); + btrfs_i_size_write(BTRFS_I(inode), btrfs_stack_inode_size(inode_item)); inode->i_mode = btrfs_stack_inode_mode(inode_item); set_nlink(inode, btrfs_stack_inode_nlink(inode_item)); inode_set_bytes(inode, btrfs_stack_inode_nbytes(inode_item)); diff --git a/fs/btrfs/dev-replace.c b/fs/btrfs/dev-replace.c index 5de280b9ad73..e653921f05d9 100644 --- a/fs/btrfs/dev-replace.c +++ b/fs/btrfs/dev-replace.c @@ -304,8 +304,9 @@ void btrfs_after_dev_replace_commit(struct btrfs_fs_info *fs_info) dev_replace->cursor_left_last_write_of_item; } -int btrfs_dev_replace_start(struct btrfs_fs_info *fs_info, char *tgtdev_name, - u64 srcdevid, char *srcdev_name, int read_src) +int btrfs_dev_replace_start(struct btrfs_fs_info *fs_info, + const char *tgtdev_name, u64 srcdevid, const char *srcdev_name, + int read_src) { struct btrfs_root *root = fs_info->dev_root; struct btrfs_trans_handle *trans; diff --git a/fs/btrfs/dev-replace.h b/fs/btrfs/dev-replace.h index 54ea12bda15b..f94a76844ae7 100644 --- a/fs/btrfs/dev-replace.h +++ b/fs/btrfs/dev-replace.h @@ -27,8 +27,9 @@ int btrfs_run_dev_replace(struct btrfs_trans_handle *trans, void btrfs_after_dev_replace_commit(struct btrfs_fs_info *fs_info); int btrfs_dev_replace_by_ioctl(struct btrfs_fs_info *fs_info, struct btrfs_ioctl_dev_replace_args *args); -int btrfs_dev_replace_start(struct btrfs_fs_info *fs_info, char *tgtdev_name, - u64 srcdevid, char *srcdev_name, int read_src); +int btrfs_dev_replace_start(struct btrfs_fs_info *fs_info, + const char *tgtdev_name, u64 srcdevid, const char *srcdev_name, + int read_src); void btrfs_dev_replace_status(struct btrfs_fs_info *fs_info, struct btrfs_ioctl_dev_replace_args *args); int btrfs_dev_replace_cancel(struct btrfs_fs_info *fs_info, diff --git a/fs/btrfs/dir-item.c b/fs/btrfs/dir-item.c index 724504a2d7ac..60a750678a82 100644 --- a/fs/btrfs/dir-item.c +++ b/fs/btrfs/dir-item.c @@ -80,7 +80,8 @@ int btrfs_insert_xattr_item(struct btrfs_trans_handle *trans, struct extent_buffer *leaf; u32 data_size; - BUG_ON(name_len + data_len > BTRFS_MAX_XATTR_SIZE(root->fs_info)); + if (name_len + data_len > BTRFS_MAX_XATTR_SIZE(root->fs_info)) + return -ENOSPC; key.objectid = objectid; key.type = BTRFS_XATTR_ITEM_KEY; @@ -120,7 +121,7 @@ int btrfs_insert_xattr_item(struct btrfs_trans_handle *trans, */ int btrfs_insert_dir_item(struct btrfs_trans_handle *trans, struct btrfs_root *root, const char *name, int name_len, - struct inode *dir, struct btrfs_key *location, + struct btrfs_inode *dir, struct btrfs_key *location, u8 type, u64 index) { int ret = 0; @@ -133,7 +134,7 @@ int btrfs_insert_dir_item(struct btrfs_trans_handle *trans, struct btrfs_root struct btrfs_disk_key disk_key; u32 data_size; - key.objectid = btrfs_ino(BTRFS_I(dir)); + key.objectid = btrfs_ino(dir); key.type = BTRFS_DIR_ITEM_KEY; key.offset = btrfs_name_hash(name, name_len); @@ -174,7 +175,7 @@ second_insert: btrfs_release_path(path); ret2 = btrfs_insert_delayed_dir_index(trans, root->fs_info, name, - name_len, BTRFS_I(dir), &disk_key, type, index); + name_len, dir, &disk_key, type, index); out_free: btrfs_free_path(path); if (ret) diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c index 207db0270b15..08b74daf35d0 100644 --- a/fs/btrfs/disk-io.c +++ b/fs/btrfs/disk-io.c @@ -219,12 +219,12 @@ void btrfs_set_buffer_lockdep_class(u64 objectid, struct extent_buffer *eb, * extents on the btree inode are pretty simple, there's one extent * that covers the entire device */ -static struct extent_map *btree_get_extent(struct inode *inode, +static struct extent_map *btree_get_extent(struct btrfs_inode *inode, struct page *page, size_t pg_offset, u64 start, u64 len, int create) { - struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb); - struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree; + struct btrfs_fs_info *fs_info = btrfs_sb(inode->vfs_inode.i_sb); + struct extent_map_tree *em_tree = &inode->extent_tree; struct extent_map *em; int ret; @@ -265,7 +265,7 @@ out: return em; } -u32 btrfs_csum_data(char *data, u32 seed, size_t len) +u32 btrfs_csum_data(const char *data, u32 seed, size_t len) { return btrfs_crc32c(seed, data, len); } @@ -2205,11 +2205,9 @@ static void btrfs_stop_all_workers(struct btrfs_fs_info *fs_info) btrfs_destroy_workqueue(fs_info->delalloc_workers); btrfs_destroy_workqueue(fs_info->workers); btrfs_destroy_workqueue(fs_info->endio_workers); - btrfs_destroy_workqueue(fs_info->endio_meta_workers); btrfs_destroy_workqueue(fs_info->endio_raid56_workers); btrfs_destroy_workqueue(fs_info->endio_repair_workers); btrfs_destroy_workqueue(fs_info->rmw_workers); - btrfs_destroy_workqueue(fs_info->endio_meta_write_workers); btrfs_destroy_workqueue(fs_info->endio_write_workers); btrfs_destroy_workqueue(fs_info->endio_freespace_worker); btrfs_destroy_workqueue(fs_info->submit_workers); @@ -2219,6 +2217,13 @@ static void btrfs_stop_all_workers(struct btrfs_fs_info *fs_info) btrfs_destroy_workqueue(fs_info->flush_workers); btrfs_destroy_workqueue(fs_info->qgroup_rescan_workers); btrfs_destroy_workqueue(fs_info->extent_workers); + /* + * Now that all other work queues are destroyed, we can safely destroy + * the queues used for metadata I/O, since tasks from those other work + * queues can do metadata I/O operations. + */ + btrfs_destroy_workqueue(fs_info->endio_meta_workers); + btrfs_destroy_workqueue(fs_info->endio_meta_write_workers); } static void free_root_extent_buffers(struct btrfs_root *root) @@ -3261,7 +3266,6 @@ fail_fsdev_sysfs: fail_block_groups: btrfs_put_block_group_cache(fs_info); - btrfs_free_block_groups(fs_info); fail_tree_roots: free_root_pointers(fs_info, 1); @@ -3269,6 +3273,7 @@ fail_tree_roots: fail_sb_buffer: btrfs_stop_all_workers(fs_info); + btrfs_free_block_groups(fs_info); fail_alloc: fail_iput: btrfs_mapping_tree_free(&fs_info->mapping_tree); @@ -3448,7 +3453,7 @@ static int write_dev_supers(struct btrfs_device *device, btrfs_set_super_bytenr(sb, bytenr); crc = ~(u32)0; - crc = btrfs_csum_data((char *)sb + + crc = btrfs_csum_data((const char *)sb + BTRFS_CSUM_SIZE, crc, BTRFS_SUPER_INFO_SIZE - BTRFS_CSUM_SIZE); @@ -3977,8 +3982,6 @@ void close_ctree(struct btrfs_fs_info *fs_info) btrfs_put_block_group_cache(fs_info); - btrfs_free_block_groups(fs_info); - /* * we must make sure there is not any read request to * submit after we stopping all workers. @@ -3986,6 +3989,8 @@ void close_ctree(struct btrfs_fs_info *fs_info) invalidate_inode_pages2(fs_info->btree_inode->i_mapping); btrfs_stop_all_workers(fs_info); + btrfs_free_block_groups(fs_info); + clear_bit(BTRFS_FS_OPEN, &fs_info->flags); free_root_pointers(fs_info, 1); @@ -4653,9 +4658,12 @@ static int btrfs_cleanup_transaction(struct btrfs_fs_info *fs_info) } static const struct extent_io_ops btree_extent_io_ops = { - .readpage_end_io_hook = btree_readpage_end_io_hook, - .readpage_io_failed_hook = btree_io_failed_hook, + /* mandatory callbacks */ .submit_bio_hook = btree_submit_bio_hook, + .readpage_end_io_hook = btree_readpage_end_io_hook, /* note we're sharing with inode.c for the merge bio hook */ .merge_bio_hook = btrfs_merge_bio_hook, + .readpage_io_failed_hook = btree_io_failed_hook, + + /* optional callbacks */ }; diff --git a/fs/btrfs/disk-io.h b/fs/btrfs/disk-io.h index 0be2d4fe705b..2e0ec29bfd69 100644 --- a/fs/btrfs/disk-io.h +++ b/fs/btrfs/disk-io.h @@ -116,7 +116,7 @@ void btrfs_mark_buffer_dirty(struct extent_buffer *buf); int btrfs_buffer_uptodate(struct extent_buffer *buf, u64 parent_transid, int atomic); int btrfs_read_buffer(struct extent_buffer *buf, u64 parent_transid); -u32 btrfs_csum_data(char *data, u32 seed, size_t len); +u32 btrfs_csum_data(const char *data, u32 seed, size_t len); void btrfs_csum_final(u32 crc, u8 *result); int btrfs_bio_wq_end_io(struct btrfs_fs_info *info, struct bio *bio, enum btrfs_wq_endio_type metadata); diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c index dad395b0b9fc..be5477676cc8 100644 --- a/fs/btrfs/extent-tree.c +++ b/fs/btrfs/extent-tree.c @@ -4136,10 +4136,10 @@ static u64 btrfs_space_info_used(struct btrfs_space_info *s_info, (may_use_included ? s_info->bytes_may_use : 0); } -int btrfs_alloc_data_chunk_ondemand(struct inode *inode, u64 bytes) +int btrfs_alloc_data_chunk_ondemand(struct btrfs_inode *inode, u64 bytes) { struct btrfs_space_info *data_sinfo; - struct btrfs_root *root = BTRFS_I(inode)->root; + struct btrfs_root *root = inode->root; struct btrfs_fs_info *fs_info = root->fs_info; u64 used; int ret = 0; @@ -4282,7 +4282,7 @@ int btrfs_check_data_free_space(struct inode *inode, u64 start, u64 len) round_down(start, fs_info->sectorsize); start = round_down(start, fs_info->sectorsize); - ret = btrfs_alloc_data_chunk_ondemand(inode, len); + ret = btrfs_alloc_data_chunk_ondemand(BTRFS_I(inode), len); if (ret < 0) return ret; @@ -5743,10 +5743,10 @@ void btrfs_trans_release_chunk_metadata(struct btrfs_trans_handle *trans) /* Can only return 0 or -ENOSPC */ int btrfs_orphan_reserve_metadata(struct btrfs_trans_handle *trans, - struct inode *inode) + struct btrfs_inode *inode) { - struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb); - struct btrfs_root *root = BTRFS_I(inode)->root; + struct btrfs_fs_info *fs_info = btrfs_sb(inode->vfs_inode.i_sb); + struct btrfs_root *root = inode->root; /* * We always use trans->block_rsv here as we will have reserved space * for our orphan when starting the transaction, using get_block_rsv() @@ -5763,19 +5763,19 @@ int btrfs_orphan_reserve_metadata(struct btrfs_trans_handle *trans, */ u64 num_bytes = btrfs_calc_trans_metadata_size(fs_info, 1); - trace_btrfs_space_reservation(fs_info, "orphan", - btrfs_ino(BTRFS_I(inode)), num_bytes, 1); + trace_btrfs_space_reservation(fs_info, "orphan", btrfs_ino(inode), + num_bytes, 1); return btrfs_block_rsv_migrate(src_rsv, dst_rsv, num_bytes, 1); } -void btrfs_orphan_release_metadata(struct inode *inode) +void btrfs_orphan_release_metadata(struct btrfs_inode *inode) { - struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb); - struct btrfs_root *root = BTRFS_I(inode)->root; + struct btrfs_fs_info *fs_info = btrfs_sb(inode->vfs_inode.i_sb); + struct btrfs_root *root = inode->root; u64 num_bytes = btrfs_calc_trans_metadata_size(fs_info, 1); - trace_btrfs_space_reservation(fs_info, "orphan", - btrfs_ino(BTRFS_I(inode)), num_bytes, 0); + trace_btrfs_space_reservation(fs_info, "orphan", btrfs_ino(inode), + num_bytes, 0); btrfs_block_rsv_release(fs_info, root->orphan_block_rsv, num_bytes); } @@ -5847,7 +5847,8 @@ void btrfs_subvolume_release_metadata(struct btrfs_fs_info *fs_info, * reserved extents that need to be freed. This must be called with * BTRFS_I(inode)->lock held. */ -static unsigned drop_outstanding_extent(struct inode *inode, u64 num_bytes) +static unsigned drop_outstanding_extent(struct btrfs_inode *inode, + u64 num_bytes) { unsigned drop_inode_space = 0; unsigned dropped_extents = 0; @@ -5855,25 +5856,23 @@ static unsigned drop_outstanding_extent(struct inode *inode, u64 num_bytes) num_extents = count_max_extents(num_bytes); ASSERT(num_extents); - ASSERT(BTRFS_I(inode)->outstanding_extents >= num_extents); - BTRFS_I(inode)->outstanding_extents -= num_extents; + ASSERT(inode->outstanding_extents >= num_extents); + inode->outstanding_extents -= num_extents; - if (BTRFS_I(inode)->outstanding_extents == 0 && + if (inode->outstanding_extents == 0 && test_and_clear_bit(BTRFS_INODE_DELALLOC_META_RESERVED, - &BTRFS_I(inode)->runtime_flags)) + &inode->runtime_flags)) drop_inode_space = 1; /* * If we have more or the same amount of outstanding extents than we have * reserved then we need to leave the reserved extents count alone. */ - if (BTRFS_I(inode)->outstanding_extents >= - BTRFS_I(inode)->reserved_extents) + if (inode->outstanding_extents >= inode->reserved_extents) return drop_inode_space; - dropped_extents = BTRFS_I(inode)->reserved_extents - - BTRFS_I(inode)->outstanding_extents; - BTRFS_I(inode)->reserved_extents -= dropped_extents; + dropped_extents = inode->reserved_extents - inode->outstanding_extents; + inode->reserved_extents -= dropped_extents; return dropped_extents + drop_inode_space; } @@ -5895,24 +5894,21 @@ static unsigned drop_outstanding_extent(struct inode *inode, u64 num_bytes) * * This must be called with BTRFS_I(inode)->lock held. */ -static u64 calc_csum_metadata_size(struct inode *inode, u64 num_bytes, +static u64 calc_csum_metadata_size(struct btrfs_inode *inode, u64 num_bytes, int reserve) { - struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb); + struct btrfs_fs_info *fs_info = btrfs_sb(inode->vfs_inode.i_sb); u64 old_csums, num_csums; - if (BTRFS_I(inode)->flags & BTRFS_INODE_NODATASUM && - BTRFS_I(inode)->csum_bytes == 0) + if (inode->flags & BTRFS_INODE_NODATASUM && inode->csum_bytes == 0) return 0; - old_csums = btrfs_csum_bytes_to_leaves(fs_info, - BTRFS_I(inode)->csum_bytes); + old_csums = btrfs_csum_bytes_to_leaves(fs_info, inode->csum_bytes); if (reserve) - BTRFS_I(inode)->csum_bytes += num_bytes; + inode->csum_bytes += num_bytes; else - BTRFS_I(inode)->csum_bytes -= num_bytes; - num_csums = btrfs_csum_bytes_to_leaves(fs_info, - BTRFS_I(inode)->csum_bytes); + inode->csum_bytes -= num_bytes; + num_csums = btrfs_csum_bytes_to_leaves(fs_info, inode->csum_bytes); /* No change, no need to reserve more */ if (old_csums == num_csums) @@ -5925,10 +5921,10 @@ static u64 calc_csum_metadata_size(struct inode *inode, u64 num_bytes, return btrfs_calc_trans_metadata_size(fs_info, old_csums - num_csums); } -int btrfs_delalloc_reserve_metadata(struct inode *inode, u64 num_bytes) +int btrfs_delalloc_reserve_metadata(struct btrfs_inode *inode, u64 num_bytes) { - struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb); - struct btrfs_root *root = BTRFS_I(inode)->root; + struct btrfs_fs_info *fs_info = btrfs_sb(inode->vfs_inode.i_sb); + struct btrfs_root *root = inode->root; struct btrfs_block_rsv *block_rsv = &fs_info->delalloc_block_rsv; u64 to_reserve = 0; u64 csum_bytes; @@ -5960,25 +5956,24 @@ int btrfs_delalloc_reserve_metadata(struct inode *inode, u64 num_bytes) schedule_timeout(1); if (delalloc_lock) - mutex_lock(&BTRFS_I(inode)->delalloc_mutex); + mutex_lock(&inode->delalloc_mutex); num_bytes = ALIGN(num_bytes, fs_info->sectorsize); - spin_lock(&BTRFS_I(inode)->lock); + spin_lock(&inode->lock); nr_extents = count_max_extents(num_bytes); - BTRFS_I(inode)->outstanding_extents += nr_extents; + inode->outstanding_extents += nr_extents; nr_extents = 0; - if (BTRFS_I(inode)->outstanding_extents > - BTRFS_I(inode)->reserved_extents) - nr_extents += BTRFS_I(inode)->outstanding_extents - - BTRFS_I(inode)->reserved_extents; + if (inode->outstanding_extents > inode->reserved_extents) + nr_extents += inode->outstanding_extents - + inode->reserved_extents; /* We always want to reserve a slot for updating the inode. */ to_reserve = btrfs_calc_trans_metadata_size(fs_info, nr_extents + 1); to_reserve += calc_csum_metadata_size(inode, num_bytes, 1); - csum_bytes = BTRFS_I(inode)->csum_bytes; - spin_unlock(&BTRFS_I(inode)->lock); + csum_bytes = inode->csum_bytes; + spin_unlock(&inode->lock); if (test_bit(BTRFS_FS_QUOTA_ENABLED, &fs_info->flags)) { ret = btrfs_qgroup_reserve_meta(root, @@ -5994,38 +5989,38 @@ int btrfs_delalloc_reserve_metadata(struct inode *inode, u64 num_bytes) goto out_fail; } - spin_lock(&BTRFS_I(inode)->lock); + spin_lock(&inode->lock); if (test_and_set_bit(BTRFS_INODE_DELALLOC_META_RESERVED, - &BTRFS_I(inode)->runtime_flags)) { + &inode->runtime_flags)) { to_reserve -= btrfs_calc_trans_metadata_size(fs_info, 1); release_extra = true; } - BTRFS_I(inode)->reserved_extents += nr_extents; - spin_unlock(&BTRFS_I(inode)->lock); + inode->reserved_extents += nr_extents; + spin_unlock(&inode->lock); if (delalloc_lock) - mutex_unlock(&BTRFS_I(inode)->delalloc_mutex); + mutex_unlock(&inode->delalloc_mutex); if (to_reserve) trace_btrfs_space_reservation(fs_info, "delalloc", - btrfs_ino(BTRFS_I(inode)), to_reserve, 1); + btrfs_ino(inode), to_reserve, 1); if (release_extra) btrfs_block_rsv_release(fs_info, block_rsv, btrfs_calc_trans_metadata_size(fs_info, 1)); return 0; out_fail: - spin_lock(&BTRFS_I(inode)->lock); + spin_lock(&inode->lock); dropped = drop_outstanding_extent(inode, num_bytes); /* * If the inodes csum_bytes is the same as the original * csum_bytes then we know we haven't raced with any free()ers * so we can just reduce our inodes csum bytes and carry on. */ - if (BTRFS_I(inode)->csum_bytes == csum_bytes) { + if (inode->csum_bytes == csum_bytes) { calc_csum_metadata_size(inode, num_bytes, 0); } else { - u64 orig_csum_bytes = BTRFS_I(inode)->csum_bytes; + u64 orig_csum_bytes = inode->csum_bytes; u64 bytes; /* @@ -6036,8 +6031,8 @@ out_fail: * number of bytes that were freed while we were trying our * reservation. */ - bytes = csum_bytes - BTRFS_I(inode)->csum_bytes; - BTRFS_I(inode)->csum_bytes = csum_bytes; + bytes = csum_bytes - inode->csum_bytes; + inode->csum_bytes = csum_bytes; to_free = calc_csum_metadata_size(inode, bytes, 0); @@ -6046,7 +6041,7 @@ out_fail: * been making this reservation and our ->csum_bytes were not * artificially inflated. */ - BTRFS_I(inode)->csum_bytes = csum_bytes - num_bytes; + inode->csum_bytes = csum_bytes - num_bytes; bytes = csum_bytes - orig_csum_bytes; bytes = calc_csum_metadata_size(inode, bytes, 0); @@ -6058,23 +6053,23 @@ out_fail: * need to do anything, the other free-ers did the correct * thing. */ - BTRFS_I(inode)->csum_bytes = orig_csum_bytes - num_bytes; + inode->csum_bytes = orig_csum_bytes - num_bytes; if (bytes > to_free) to_free = bytes - to_free; else to_free = 0; } - spin_unlock(&BTRFS_I(inode)->lock); + spin_unlock(&inode->lock); if (dropped) to_free += btrfs_calc_trans_metadata_size(fs_info, dropped); if (to_free) { btrfs_block_rsv_release(fs_info, block_rsv, to_free); trace_btrfs_space_reservation(fs_info, "delalloc", - btrfs_ino(BTRFS_I(inode)), to_free, 0); + btrfs_ino(inode), to_free, 0); } if (delalloc_lock) - mutex_unlock(&BTRFS_I(inode)->delalloc_mutex); + mutex_unlock(&inode->delalloc_mutex); return ret; } @@ -6087,27 +6082,27 @@ out_fail: * once we complete IO for a given set of bytes to release their metadata * reservations. */ -void btrfs_delalloc_release_metadata(struct inode *inode, u64 num_bytes) +void btrfs_delalloc_release_metadata(struct btrfs_inode *inode, u64 num_bytes) { - struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb); + struct btrfs_fs_info *fs_info = btrfs_sb(inode->vfs_inode.i_sb); u64 to_free = 0; unsigned dropped; num_bytes = ALIGN(num_bytes, fs_info->sectorsize); - spin_lock(&BTRFS_I(inode)->lock); + spin_lock(&inode->lock); dropped = drop_outstanding_extent(inode, num_bytes); if (num_bytes) to_free = calc_csum_metadata_size(inode, num_bytes, 0); - spin_unlock(&BTRFS_I(inode)->lock); + spin_unlock(&inode->lock); if (dropped > 0) to_free += btrfs_calc_trans_metadata_size(fs_info, dropped); if (btrfs_is_testing(fs_info)) return; - trace_btrfs_space_reservation(fs_info, "delalloc", - btrfs_ino(BTRFS_I(inode)), to_free, 0); + trace_btrfs_space_reservation(fs_info, "delalloc", btrfs_ino(inode), + to_free, 0); btrfs_block_rsv_release(fs_info, &fs_info->delalloc_block_rsv, to_free); } @@ -6142,7 +6137,7 @@ int btrfs_delalloc_reserve_space(struct inode *inode, u64 start, u64 len) ret = btrfs_check_data_free_space(inode, start, len); if (ret < 0) return ret; - ret = btrfs_delalloc_reserve_metadata(inode, len); + ret = btrfs_delalloc_reserve_metadata(BTRFS_I(inode), len); if (ret < 0) btrfs_free_reserved_data_space(inode, start, len); return ret; @@ -6165,7 +6160,7 @@ int btrfs_delalloc_reserve_space(struct inode *inode, u64 start, u64 len) */ void btrfs_delalloc_release_space(struct inode *inode, u64 start, u64 len) { - btrfs_delalloc_release_metadata(inode, len); + btrfs_delalloc_release_metadata(BTRFS_I(inode), len); btrfs_free_reserved_data_space(inode, start, len); } @@ -9741,6 +9736,11 @@ void btrfs_put_block_group_cache(struct btrfs_fs_info *info) } } +/* + * Must be called only after stopping all workers, since we could have block + * group caching kthreads running, and therefore they could race with us if we + * freed the block groups before stopping them. + */ int btrfs_free_block_groups(struct btrfs_fs_info *info) { struct btrfs_block_group_cache *block_group; @@ -9780,9 +9780,6 @@ int btrfs_free_block_groups(struct btrfs_fs_info *info) list_del(&block_group->list); up_write(&block_group->space_info->groups_sem); - if (block_group->cached == BTRFS_CACHE_STARTED) - wait_block_group_cache_done(block_group); - /* * We haven't cached this block group, which means we could * possibly have excluded extents on this block group. @@ -9792,6 +9789,7 @@ int btrfs_free_block_groups(struct btrfs_fs_info *info) free_excluded_extents(info, block_group); btrfs_remove_free_space_cache(block_group); + ASSERT(block_group->cached != BTRFS_CACHE_STARTED); ASSERT(list_empty(&block_group->dirty_list)); ASSERT(list_empty(&block_group->io_list)); ASSERT(list_empty(&block_group->bg_list)); @@ -10343,7 +10341,7 @@ int btrfs_remove_block_group(struct btrfs_trans_handle *trans, mutex_unlock(&trans->transaction->cache_write_mutex); if (!IS_ERR(inode)) { - ret = btrfs_orphan_add(trans, inode); + ret = btrfs_orphan_add(trans, BTRFS_I(inode)); if (ret) { btrfs_add_delayed_iput(inode); goto out; diff --git a/fs/btrfs/extent_io.c b/fs/btrfs/extent_io.c index d15b5ddb6732..28e81922a21c 100644 --- a/fs/btrfs/extent_io.c +++ b/fs/btrfs/extent_io.c @@ -428,7 +428,8 @@ static void clear_state_cb(struct extent_io_tree *tree, struct extent_state *state, unsigned *bits) { if (tree->ops && tree->ops->clear_bit_hook) - tree->ops->clear_bit_hook(tree->mapping->host, state, bits); + tree->ops->clear_bit_hook(BTRFS_I(tree->mapping->host), + state, bits); } static void set_state_bits(struct extent_io_tree *tree, @@ -1959,11 +1960,11 @@ static void check_page_uptodate(struct extent_io_tree *tree, struct page *page) SetPageUptodate(page); } -int free_io_failure(struct inode *inode, struct io_failure_record *rec) +int free_io_failure(struct btrfs_inode *inode, struct io_failure_record *rec) { int ret; int err = 0; - struct extent_io_tree *failure_tree = &BTRFS_I(inode)->io_failure_tree; + struct extent_io_tree *failure_tree = &inode->io_failure_tree; set_state_failrec(failure_tree, rec->start, NULL); ret = clear_extent_bits(failure_tree, rec->start, @@ -1972,7 +1973,7 @@ int free_io_failure(struct inode *inode, struct io_failure_record *rec) if (ret) err = ret; - ret = clear_extent_bits(&BTRFS_I(inode)->io_tree, rec->start, + ret = clear_extent_bits(&inode->io_tree, rec->start, rec->start + rec->len - 1, EXTENT_DAMAGED); if (ret && !err) @@ -1992,10 +1993,11 @@ int free_io_failure(struct inode *inode, struct io_failure_record *rec) * currently, there can be no more than two copies of every data bit. thus, * exactly one rewrite is required. */ -int repair_io_failure(struct inode *inode, u64 start, u64 length, u64 logical, - struct page *page, unsigned int pg_offset, int mirror_num) +int repair_io_failure(struct btrfs_inode *inode, u64 start, u64 length, + u64 logical, struct page *page, + unsigned int pg_offset, int mirror_num) { - struct btrfs_fs_info *fs_info = BTRFS_I(inode)->root->fs_info; + struct btrfs_fs_info *fs_info = inode->root->fs_info; struct bio *bio; struct btrfs_device *dev; u64 map_length = 0; @@ -2054,7 +2056,7 @@ int repair_io_failure(struct inode *inode, u64 start, u64 length, u64 logical, btrfs_info_rl_in_rcu(fs_info, "read error corrected: ino %llu off %llu (dev %s sector %llu)", - btrfs_ino(BTRFS_I(inode)), start, + btrfs_ino(inode), start, rcu_str_deref(dev->name), sector); btrfs_bio_counter_dec(fs_info); bio_put(bio); @@ -2074,7 +2076,7 @@ int repair_eb_io_failure(struct btrfs_fs_info *fs_info, for (i = 0; i < num_pages; i++) { struct page *p = eb->pages[i]; - ret = repair_io_failure(fs_info->btree_inode, start, + ret = repair_io_failure(BTRFS_I(fs_info->btree_inode), start, PAGE_SIZE, start, p, start - page_offset(p), mirror_num); if (ret) @@ -2089,23 +2091,23 @@ int repair_eb_io_failure(struct btrfs_fs_info *fs_info, * each time an IO finishes, we do a fast check in the IO failure tree * to see if we need to process or clean up an io_failure_record */ -int clean_io_failure(struct inode *inode, u64 start, struct page *page, +int clean_io_failure(struct btrfs_inode *inode, u64 start, struct page *page, unsigned int pg_offset) { u64 private; struct io_failure_record *failrec; - struct btrfs_fs_info *fs_info = BTRFS_I(inode)->root->fs_info; + struct btrfs_fs_info *fs_info = inode->root->fs_info; struct extent_state *state; int num_copies; int ret; private = 0; - ret = count_range_bits(&BTRFS_I(inode)->io_failure_tree, &private, + ret = count_range_bits(&inode->io_failure_tree, &private, (u64)-1, 1, EXTENT_DIRTY, 0); if (!ret) return 0; - ret = get_state_failrec(&BTRFS_I(inode)->io_failure_tree, start, + ret = get_state_failrec(&inode->io_failure_tree, start, &failrec); if (ret) return 0; @@ -2122,11 +2124,11 @@ int clean_io_failure(struct inode *inode, u64 start, struct page *page, if (fs_info->sb->s_flags & MS_RDONLY) goto out; - spin_lock(&BTRFS_I(inode)->io_tree.lock); - state = find_first_extent_bit_state(&BTRFS_I(inode)->io_tree, + spin_lock(&inode->io_tree.lock); + state = find_first_extent_bit_state(&inode->io_tree, failrec->start, EXTENT_LOCKED); - spin_unlock(&BTRFS_I(inode)->io_tree.lock); + spin_unlock(&inode->io_tree.lock); if (state && state->start <= failrec->start && state->end >= failrec->start + failrec->len - 1) { @@ -2151,9 +2153,9 @@ out: * - under ordered extent * - the inode is freeing */ -void btrfs_free_io_failure_record(struct inode *inode, u64 start, u64 end) +void btrfs_free_io_failure_record(struct btrfs_inode *inode, u64 start, u64 end) { - struct extent_io_tree *failure_tree = &BTRFS_I(inode)->io_failure_tree; + struct extent_io_tree *failure_tree = &inode->io_failure_tree; struct io_failure_record *failrec; struct extent_state *state, *next; @@ -2393,7 +2395,7 @@ static int bio_readpage_error(struct bio *failed_bio, u64 phy_offset, ret = btrfs_check_repairable(inode, failed_bio, failrec, failed_mirror); if (!ret) { - free_io_failure(inode, failrec); + free_io_failure(BTRFS_I(inode), failrec); return -EIO; } @@ -2406,7 +2408,7 @@ static int bio_readpage_error(struct bio *failed_bio, u64 phy_offset, (int)phy_offset, failed_bio->bi_end_io, NULL); if (!bio) { - free_io_failure(inode, failrec); + free_io_failure(BTRFS_I(inode), failrec); return -EIO; } bio_set_op_attrs(bio, REQ_OP_READ, read_mode); @@ -2418,7 +2420,7 @@ static int bio_readpage_error(struct bio *failed_bio, u64 phy_offset, ret = tree->ops->submit_bio_hook(inode, bio, failrec->this_mirror, failrec->bio_flags, 0); if (ret) { - free_io_failure(inode, failrec); + free_io_failure(BTRFS_I(inode), failrec); bio_put(bio); } @@ -2435,12 +2437,9 @@ void end_extent_writepage(struct page *page, int err, u64 start, u64 end) tree = &BTRFS_I(page->mapping->host)->io_tree; - if (tree->ops && tree->ops->writepage_end_io_hook) { - ret = tree->ops->writepage_end_io_hook(page, start, - end, NULL, uptodate); - if (ret) - uptodate = 0; - } + if (tree->ops && tree->ops->writepage_end_io_hook) + tree->ops->writepage_end_io_hook(page, start, end, NULL, + uptodate); if (!uptodate) { ClearPageUptodate(page); @@ -2568,21 +2567,21 @@ static void end_bio_extent_readpage(struct bio *bio) len = bvec->bv_len; mirror = io_bio->mirror_num; - if (likely(uptodate && tree->ops && - tree->ops->readpage_end_io_hook)) { + if (likely(uptodate && tree->ops)) { ret = tree->ops->readpage_end_io_hook(io_bio, offset, page, start, end, mirror); if (ret) uptodate = 0; else - clean_io_failure(inode, start, page, 0); + clean_io_failure(BTRFS_I(inode), start, + page, 0); } if (likely(uptodate)) goto readpage_ok; - if (tree->ops && tree->ops->readpage_io_failed_hook) { + if (tree->ops) { ret = tree->ops->readpage_io_failed_hook(page, mirror); if (!ret && !bio->bi_error) uptodate = 1; @@ -2731,7 +2730,7 @@ static int __must_check submit_one_bio(struct bio *bio, int mirror_num, bio->bi_private = NULL; bio_get(bio); - if (tree->ops && tree->ops->submit_bio_hook) + if (tree->ops) ret = tree->ops->submit_bio_hook(page->mapping->host, bio, mirror_num, bio_flags, start); else @@ -2746,7 +2745,7 @@ static int merge_bio(struct extent_io_tree *tree, struct page *page, unsigned long bio_flags) { int ret = 0; - if (tree->ops && tree->ops->merge_bio_hook) + if (tree->ops) ret = tree->ops->merge_bio_hook(page, offset, size, bio, bio_flags); return ret; @@ -2857,7 +2856,7 @@ __get_extent_map(struct inode *inode, struct page *page, size_t pg_offset, *em_cached = NULL; } - em = get_extent(inode, page, pg_offset, start, len, 0); + em = get_extent(BTRFS_I(inode), page, pg_offset, start, len, 0); if (em_cached && !IS_ERR_OR_NULL(em)) { BUG_ON(*em_cached); atomic_inc(&em->refs); @@ -3101,7 +3100,7 @@ static inline void __do_contiguous_readpages(struct extent_io_tree *tree, inode = pages[0]->mapping->host; while (1) { lock_extent(tree, start, end); - ordered = btrfs_lookup_ordered_range(inode, start, + ordered = btrfs_lookup_ordered_range(BTRFS_I(inode), start, end - start + 1); if (!ordered) break; @@ -3173,7 +3172,7 @@ static int __extent_read_full_page(struct extent_io_tree *tree, while (1) { lock_extent(tree, start, end); - ordered = btrfs_lookup_ordered_range(inode, start, + ordered = btrfs_lookup_ordered_range(BTRFS_I(inode), start, PAGE_SIZE); if (!ordered) break; @@ -3370,7 +3369,7 @@ static noinline_for_stack int __extent_writepage_io(struct inode *inode, page_end, NULL, 1); break; } - em = epd->get_extent(inode, page, pg_offset, cur, + em = epd->get_extent(BTRFS_I(inode), page, pg_offset, cur, end - cur + 1, 1); if (IS_ERR_OR_NULL(em)) { SetPageError(page); @@ -4335,7 +4334,7 @@ static struct extent_map *get_extent_skip_holes(struct inode *inode, if (len == 0) break; len = ALIGN(len, sectorsize); - em = get_extent(inode, NULL, 0, offset, len, 0); + em = get_extent(BTRFS_I(inode), NULL, 0, offset, len, 0); if (IS_ERR_OR_NULL(em)) return em; diff --git a/fs/btrfs/extent_io.h b/fs/btrfs/extent_io.h index 270d03be290e..3e4fad4a909d 100644 --- a/fs/btrfs/extent_io.h +++ b/fs/btrfs/extent_io.h @@ -84,6 +84,7 @@ extern void le_bitmap_clear(u8 *map, unsigned int start, int len); struct extent_state; struct btrfs_root; +struct btrfs_inode; struct btrfs_io_bio; struct io_failure_record; @@ -91,24 +92,34 @@ typedef int (extent_submit_bio_hook_t)(struct inode *inode, struct bio *bio, int mirror_num, unsigned long bio_flags, u64 bio_offset); struct extent_io_ops { - int (*fill_delalloc)(struct inode *inode, struct page *locked_page, - u64 start, u64 end, int *page_started, - unsigned long *nr_written); - int (*writepage_start_hook)(struct page *page, u64 start, u64 end); + /* + * The following callbacks must be allways defined, the function + * pointer will be called unconditionally. + */ extent_submit_bio_hook_t *submit_bio_hook; + int (*readpage_end_io_hook)(struct btrfs_io_bio *io_bio, u64 phy_offset, + struct page *page, u64 start, u64 end, + int mirror); int (*merge_bio_hook)(struct page *page, unsigned long offset, size_t size, struct bio *bio, unsigned long bio_flags); int (*readpage_io_failed_hook)(struct page *page, int failed_mirror); - int (*readpage_end_io_hook)(struct btrfs_io_bio *io_bio, u64 phy_offset, - struct page *page, u64 start, u64 end, - int mirror); - int (*writepage_end_io_hook)(struct page *page, u64 start, u64 end, + + /* + * Optional hooks, called if the pointer is not NULL + */ + int (*fill_delalloc)(struct inode *inode, struct page *locked_page, + u64 start, u64 end, int *page_started, + unsigned long *nr_written); + + int (*writepage_start_hook)(struct page *page, u64 start, u64 end); + void (*writepage_end_io_hook)(struct page *page, u64 start, u64 end, struct extent_state *state, int uptodate); void (*set_bit_hook)(struct inode *inode, struct extent_state *state, unsigned *bits); - void (*clear_bit_hook)(struct inode *inode, struct extent_state *state, - unsigned *bits); + void (*clear_bit_hook)(struct btrfs_inode *inode, + struct extent_state *state, + unsigned *bits); void (*merge_extent_hook)(struct inode *inode, struct extent_state *new, struct extent_state *other); @@ -209,7 +220,7 @@ static inline int extent_compress_type(unsigned long bio_flags) struct extent_map_tree; -typedef struct extent_map *(get_extent_t)(struct inode *inode, +typedef struct extent_map *(get_extent_t)(struct btrfs_inode *inode, struct page *page, size_t pg_offset, u64 start, u64 len, @@ -451,12 +462,13 @@ struct bio *btrfs_io_bio_alloc(gfp_t gfp_mask, unsigned int nr_iovecs); struct bio *btrfs_bio_clone(struct bio *bio, gfp_t gfp_mask); struct btrfs_fs_info; +struct btrfs_inode; -int repair_io_failure(struct inode *inode, u64 start, u64 length, u64 logical, - struct page *page, unsigned int pg_offset, - int mirror_num); -int clean_io_failure(struct inode *inode, u64 start, struct page *page, - unsigned int pg_offset); +int repair_io_failure(struct btrfs_inode *inode, u64 start, u64 length, + u64 logical, struct page *page, + unsigned int pg_offset, int mirror_num); +int clean_io_failure(struct btrfs_inode *inode, u64 start, + struct page *page, unsigned int pg_offset); void end_extent_writepage(struct page *page, int err, u64 start, u64 end); int repair_eb_io_failure(struct btrfs_fs_info *fs_info, struct extent_buffer *eb, int mirror_num); @@ -480,7 +492,9 @@ struct io_failure_record { int in_validation; }; -void btrfs_free_io_failure_record(struct inode *inode, u64 start, u64 end); + +void btrfs_free_io_failure_record(struct btrfs_inode *inode, u64 start, + u64 end); int btrfs_get_io_failure_record(struct inode *inode, u64 start, u64 end, struct io_failure_record **failrec_ret); int btrfs_check_repairable(struct inode *inode, struct bio *failed_bio, @@ -489,7 +503,7 @@ struct bio *btrfs_create_repair_bio(struct inode *inode, struct bio *failed_bio, struct io_failure_record *failrec, struct page *page, int pg_offset, int icsum, bio_end_io_t *endio_func, void *data); -int free_io_failure(struct inode *inode, struct io_failure_record *rec); +int free_io_failure(struct btrfs_inode *inode, struct io_failure_record *rec); #ifdef CONFIG_BTRFS_FS_RUN_SANITY_TESTS noinline u64 find_lock_delalloc_range(struct inode *inode, struct extent_io_tree *tree, diff --git a/fs/btrfs/file-item.c b/fs/btrfs/file-item.c index f7b9a92ad56d..64fcb31d7163 100644 --- a/fs/btrfs/file-item.c +++ b/fs/btrfs/file-item.c @@ -214,7 +214,7 @@ static int __btrfs_lookup_bio_sums(struct inode *inode, struct bio *bio, * read from the commit root and sidestep a nasty deadlock * between reading the free space cache and updating the csum tree. */ - if (btrfs_is_free_space_inode(inode)) { + if (btrfs_is_free_space_inode(BTRFS_I(inode))) { path->search_commit_root = 1; path->skip_locking = 1; } @@ -643,7 +643,33 @@ int btrfs_del_csums(struct btrfs_trans_handle *trans, /* delete the entire item, it is inside our range */ if (key.offset >= bytenr && csum_end <= end_byte) { - ret = btrfs_del_item(trans, root, path); + int del_nr = 1; + + /* + * Check how many csum items preceding this one in this + * leaf correspond to our range and then delete them all + * at once. + */ + if (key.offset > bytenr && path->slots[0] > 0) { + int slot = path->slots[0] - 1; + + while (slot >= 0) { + struct btrfs_key pk; + + btrfs_item_key_to_cpu(leaf, &pk, slot); + if (pk.offset < bytenr || + pk.type != BTRFS_EXTENT_CSUM_KEY || + pk.objectid != + BTRFS_EXTENT_CSUM_OBJECTID) + break; + path->slots[0] = slot; + del_nr++; + key.offset = pk.offset; + slot--; + } + } + ret = btrfs_del_items(trans, root, path, + path->slots[0], del_nr); if (ret) goto out; if (key.offset == bytenr) @@ -904,14 +930,14 @@ fail_unlock: goto out; } -void btrfs_extent_item_to_extent_map(struct inode *inode, +void btrfs_extent_item_to_extent_map(struct btrfs_inode *inode, const struct btrfs_path *path, struct btrfs_file_extent_item *fi, const bool new_inline, struct extent_map *em) { - struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb); - struct btrfs_root *root = BTRFS_I(inode)->root; + struct btrfs_fs_info *fs_info = btrfs_sb(inode->vfs_inode.i_sb); + struct btrfs_root *root = inode->root; struct extent_buffer *leaf = path->nodes[0]; const int slot = path->slots[0]; struct btrfs_key key; @@ -976,8 +1002,8 @@ void btrfs_extent_item_to_extent_map(struct inode *inode, } } else { btrfs_err(fs_info, - "unknown file extent item type %d, inode %llu, offset %llu, root %llu", - type, btrfs_ino(BTRFS_I(inode)), extent_start, + "unknown file extent item type %d, inode %llu, offset %llu, " + "root %llu", type, btrfs_ino(inode), extent_start, root->root_key.objectid); } } diff --git a/fs/btrfs/file.c b/fs/btrfs/file.c index c1d2a07205da..520cb7230b2d 100644 --- a/fs/btrfs/file.c +++ b/fs/btrfs/file.c @@ -92,10 +92,10 @@ static int __compare_inode_defrag(struct inode_defrag *defrag1, * If an existing record is found the defrag item you * pass in is freed */ -static int __btrfs_add_inode_defrag(struct inode *inode, +static int __btrfs_add_inode_defrag(struct btrfs_inode *inode, struct inode_defrag *defrag) { - struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb); + struct btrfs_fs_info *fs_info = btrfs_sb(inode->vfs_inode.i_sb); struct inode_defrag *entry; struct rb_node **p; struct rb_node *parent = NULL; @@ -123,7 +123,7 @@ static int __btrfs_add_inode_defrag(struct inode *inode, return -EEXIST; } } - set_bit(BTRFS_INODE_IN_DEFRAG, &BTRFS_I(inode)->runtime_flags); + set_bit(BTRFS_INODE_IN_DEFRAG, &inode->runtime_flags); rb_link_node(&defrag->rb_node, parent, p); rb_insert_color(&defrag->rb_node, &fs_info->defrag_inodes); return 0; @@ -145,10 +145,10 @@ static inline int __need_auto_defrag(struct btrfs_fs_info *fs_info) * enabled */ int btrfs_add_inode_defrag(struct btrfs_trans_handle *trans, - struct inode *inode) + struct btrfs_inode *inode) { - struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb); - struct btrfs_root *root = BTRFS_I(inode)->root; + struct btrfs_fs_info *fs_info = btrfs_sb(inode->vfs_inode.i_sb); + struct btrfs_root *root = inode->root; struct inode_defrag *defrag; u64 transid; int ret; @@ -156,24 +156,24 @@ int btrfs_add_inode_defrag(struct btrfs_trans_handle *trans, if (!__need_auto_defrag(fs_info)) return 0; - if (test_bit(BTRFS_INODE_IN_DEFRAG, &BTRFS_I(inode)->runtime_flags)) + if (test_bit(BTRFS_INODE_IN_DEFRAG, &inode->runtime_flags)) return 0; if (trans) transid = trans->transid; else - transid = BTRFS_I(inode)->root->last_trans; + transid = inode->root->last_trans; defrag = kmem_cache_zalloc(btrfs_inode_defrag_cachep, GFP_NOFS); if (!defrag) return -ENOMEM; - defrag->ino = btrfs_ino(BTRFS_I(inode)); + defrag->ino = btrfs_ino(inode); defrag->transid = transid; defrag->root = root->root_key.objectid; spin_lock(&fs_info->defrag_inodes_lock); - if (!test_bit(BTRFS_INODE_IN_DEFRAG, &BTRFS_I(inode)->runtime_flags)) { + if (!test_bit(BTRFS_INODE_IN_DEFRAG, &inode->runtime_flags)) { /* * If we set IN_DEFRAG flag and evict the inode from memory, * and then re-read this inode, this new inode doesn't have @@ -194,10 +194,10 @@ int btrfs_add_inode_defrag(struct btrfs_trans_handle *trans, * the same inode in the tree, we will merge them together (by * __btrfs_add_inode_defrag()) and free the one that we want to requeue. */ -static void btrfs_requeue_inode_defrag(struct inode *inode, +static void btrfs_requeue_inode_defrag(struct btrfs_inode *inode, struct inode_defrag *defrag) { - struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb); + struct btrfs_fs_info *fs_info = btrfs_sb(inode->vfs_inode.i_sb); int ret; if (!__need_auto_defrag(fs_info)) @@ -334,7 +334,7 @@ static int __btrfs_run_defrag_inode(struct btrfs_fs_info *fs_info, */ if (num_defrag == BTRFS_DEFRAG_BATCH) { defrag->last_offset = range.start; - btrfs_requeue_inode_defrag(inode, defrag); + btrfs_requeue_inode_defrag(BTRFS_I(inode), defrag); } else if (defrag->last_offset && !defrag->cycled) { /* * we didn't fill our defrag batch, but @@ -343,7 +343,7 @@ static int __btrfs_run_defrag_inode(struct btrfs_fs_info *fs_info, */ defrag->last_offset = 0; defrag->cycled = 1; - btrfs_requeue_inode_defrag(inode, defrag); + btrfs_requeue_inode_defrag(BTRFS_I(inode), defrag); } else { kmem_cache_free(btrfs_inode_defrag_cachep, defrag); } @@ -529,13 +529,13 @@ int btrfs_dirty_pages(struct inode *inode, struct page **pages, * this drops all the extents in the cache that intersect the range * [start, end]. Existing extents are split as required. */ -void btrfs_drop_extent_cache(struct inode *inode, u64 start, u64 end, +void btrfs_drop_extent_cache(struct btrfs_inode *inode, u64 start, u64 end, int skip_pinned) { struct extent_map *em; struct extent_map *split = NULL; struct extent_map *split2 = NULL; - struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree; + struct extent_map_tree *em_tree = &inode->extent_tree; u64 len = end - start + 1; u64 gen; int ret; @@ -720,7 +720,7 @@ int __btrfs_drop_extents(struct btrfs_trans_handle *trans, int leafs_visited = 0; if (drop_cache) - btrfs_drop_extent_cache(inode, start, end - 1, 0); + btrfs_drop_extent_cache(BTRFS_I(inode), start, end - 1, 0); if (start >= BTRFS_I(inode)->disk_i_size && !replace_extent) modify_tree = 0; @@ -1082,10 +1082,10 @@ static int extent_mergeable(struct extent_buffer *leaf, int slot, * two or three. */ int btrfs_mark_extent_written(struct btrfs_trans_handle *trans, - struct inode *inode, u64 start, u64 end) + struct btrfs_inode *inode, u64 start, u64 end) { - struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb); - struct btrfs_root *root = BTRFS_I(inode)->root; + struct btrfs_fs_info *fs_info = btrfs_sb(inode->vfs_inode.i_sb); + struct btrfs_root *root = inode->root; struct extent_buffer *leaf; struct btrfs_path *path; struct btrfs_file_extent_item *fi; @@ -1102,7 +1102,7 @@ int btrfs_mark_extent_written(struct btrfs_trans_handle *trans, int del_slot = 0; int recow; int ret; - u64 ino = btrfs_ino(BTRFS_I(inode)); + u64 ino = btrfs_ino(inode); path = btrfs_alloc_path(); if (!path) @@ -1415,13 +1415,13 @@ fail: * the other < 0 number - Something wrong happens */ static noinline int -lock_and_cleanup_extent_if_need(struct inode *inode, struct page **pages, +lock_and_cleanup_extent_if_need(struct btrfs_inode *inode, struct page **pages, size_t num_pages, loff_t pos, size_t write_bytes, u64 *lockstart, u64 *lockend, struct extent_state **cached_state) { - struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb); + struct btrfs_fs_info *fs_info = btrfs_sb(inode->vfs_inode.i_sb); u64 start_pos; u64 last_pos; int i; @@ -1432,30 +1432,30 @@ lock_and_cleanup_extent_if_need(struct inode *inode, struct page **pages, + round_up(pos + write_bytes - start_pos, fs_info->sectorsize) - 1; - if (start_pos < inode->i_size) { + if (start_pos < inode->vfs_inode.i_size) { struct btrfs_ordered_extent *ordered; - lock_extent_bits(&BTRFS_I(inode)->io_tree, - start_pos, last_pos, cached_state); + lock_extent_bits(&inode->io_tree, start_pos, last_pos, + cached_state); ordered = btrfs_lookup_ordered_range(inode, start_pos, last_pos - start_pos + 1); if (ordered && ordered->file_offset + ordered->len > start_pos && ordered->file_offset <= last_pos) { - unlock_extent_cached(&BTRFS_I(inode)->io_tree, - start_pos, last_pos, - cached_state, GFP_NOFS); + unlock_extent_cached(&inode->io_tree, start_pos, + last_pos, cached_state, GFP_NOFS); for (i = 0; i < num_pages; i++) { unlock_page(pages[i]); put_page(pages[i]); } - btrfs_start_ordered_extent(inode, ordered, 1); + btrfs_start_ordered_extent(&inode->vfs_inode, + ordered, 1); btrfs_put_ordered_extent(ordered); return -EAGAIN; } if (ordered) btrfs_put_ordered_extent(ordered); - clear_extent_bit(&BTRFS_I(inode)->io_tree, start_pos, + clear_extent_bit(&inode->io_tree, start_pos, last_pos, EXTENT_DIRTY | EXTENT_DELALLOC | EXTENT_DO_ACCOUNTING | EXTENT_DEFRAG, 0, 0, cached_state, GFP_NOFS); @@ -1474,11 +1474,11 @@ lock_and_cleanup_extent_if_need(struct inode *inode, struct page **pages, return ret; } -static noinline int check_can_nocow(struct inode *inode, loff_t pos, +static noinline int check_can_nocow(struct btrfs_inode *inode, loff_t pos, size_t *write_bytes) { - struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb); - struct btrfs_root *root = BTRFS_I(inode)->root; + struct btrfs_fs_info *fs_info = btrfs_sb(inode->vfs_inode.i_sb); + struct btrfs_root *root = inode->root; struct btrfs_ordered_extent *ordered; u64 lockstart, lockend; u64 num_bytes; @@ -1493,19 +1493,20 @@ static noinline int check_can_nocow(struct inode *inode, loff_t pos, fs_info->sectorsize) - 1; while (1) { - lock_extent(&BTRFS_I(inode)->io_tree, lockstart, lockend); + lock_extent(&inode->io_tree, lockstart, lockend); ordered = btrfs_lookup_ordered_range(inode, lockstart, lockend - lockstart + 1); if (!ordered) { break; } - unlock_extent(&BTRFS_I(inode)->io_tree, lockstart, lockend); - btrfs_start_ordered_extent(inode, ordered, 1); + unlock_extent(&inode->io_tree, lockstart, lockend); + btrfs_start_ordered_extent(&inode->vfs_inode, ordered, 1); btrfs_put_ordered_extent(ordered); } num_bytes = lockend - lockstart + 1; - ret = can_nocow_extent(inode, lockstart, &num_bytes, NULL, NULL, NULL); + ret = can_nocow_extent(&inode->vfs_inode, lockstart, &num_bytes, + NULL, NULL, NULL); if (ret <= 0) { ret = 0; btrfs_end_write_no_snapshoting(root); @@ -1514,7 +1515,7 @@ static noinline int check_can_nocow(struct inode *inode, loff_t pos, num_bytes - pos + lockstart); } - unlock_extent(&BTRFS_I(inode)->io_tree, lockstart, lockend); + unlock_extent(&inode->io_tree, lockstart, lockend); return ret; } @@ -1579,7 +1580,8 @@ static noinline ssize_t __btrfs_buffered_write(struct file *file, if (ret < 0) { if ((BTRFS_I(inode)->flags & (BTRFS_INODE_NODATACOW | BTRFS_INODE_PREALLOC)) && - check_can_nocow(inode, pos, &write_bytes) > 0) { + check_can_nocow(BTRFS_I(inode), pos, + &write_bytes) > 0) { /* * For nodata cow case, no need to reserve * data space. @@ -1599,7 +1601,8 @@ static noinline ssize_t __btrfs_buffered_write(struct file *file, } } - ret = btrfs_delalloc_reserve_metadata(inode, reserve_bytes); + ret = btrfs_delalloc_reserve_metadata(BTRFS_I(inode), + reserve_bytes); if (ret) { if (!only_release_metadata) btrfs_free_reserved_data_space(inode, pos, @@ -1623,9 +1626,9 @@ again: if (ret) break; - ret = lock_and_cleanup_extent_if_need(inode, pages, num_pages, - pos, write_bytes, &lockstart, - &lockend, &cached_state); + ret = lock_and_cleanup_extent_if_need(BTRFS_I(inode), pages, + num_pages, pos, write_bytes, &lockstart, + &lockend, &cached_state); if (ret < 0) { if (ret == -EAGAIN) goto again; @@ -1677,7 +1680,7 @@ again: spin_unlock(&BTRFS_I(inode)->lock); } if (only_release_metadata) { - btrfs_delalloc_release_metadata(inode, + btrfs_delalloc_release_metadata(BTRFS_I(inode), release_bytes); } else { u64 __pos; @@ -1738,7 +1741,8 @@ again: if (release_bytes) { if (only_release_metadata) { btrfs_end_write_no_snapshoting(root); - btrfs_delalloc_release_metadata(inode, release_bytes); + btrfs_delalloc_release_metadata(BTRFS_I(inode), + release_bytes); } else { btrfs_delalloc_release_space(inode, round_down(pos, fs_info->sectorsize), @@ -2193,7 +2197,7 @@ static int btrfs_file_mmap(struct file *filp, struct vm_area_struct *vma) return 0; } -static int hole_mergeable(struct inode *inode, struct extent_buffer *leaf, +static int hole_mergeable(struct btrfs_inode *inode, struct extent_buffer *leaf, int slot, u64 start, u64 end) { struct btrfs_file_extent_item *fi; @@ -2203,7 +2207,7 @@ static int hole_mergeable(struct inode *inode, struct extent_buffer *leaf, return 0; btrfs_item_key_to_cpu(leaf, &key, slot); - if (key.objectid != btrfs_ino(BTRFS_I(inode)) || + if (key.objectid != btrfs_ino(inode) || key.type != BTRFS_EXTENT_DATA_KEY) return 0; @@ -2222,22 +2226,23 @@ static int hole_mergeable(struct inode *inode, struct extent_buffer *leaf, return 0; } -static int fill_holes(struct btrfs_trans_handle *trans, struct inode *inode, - struct btrfs_path *path, u64 offset, u64 end) +static int fill_holes(struct btrfs_trans_handle *trans, + struct btrfs_inode *inode, + struct btrfs_path *path, u64 offset, u64 end) { - struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb); - struct btrfs_root *root = BTRFS_I(inode)->root; + struct btrfs_fs_info *fs_info = btrfs_sb(inode->vfs_inode.i_sb); + struct btrfs_root *root = inode->root; struct extent_buffer *leaf; struct btrfs_file_extent_item *fi; struct extent_map *hole_em; - struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree; + struct extent_map_tree *em_tree = &inode->extent_tree; struct btrfs_key key; int ret; if (btrfs_fs_incompat(fs_info, NO_HOLES)) goto out; - key.objectid = btrfs_ino(BTRFS_I(inode)); + key.objectid = btrfs_ino(inode); key.type = BTRFS_EXTENT_DATA_KEY; key.offset = offset; @@ -2253,7 +2258,7 @@ static int fill_holes(struct btrfs_trans_handle *trans, struct inode *inode, } leaf = path->nodes[0]; - if (hole_mergeable(inode, leaf, path->slots[0]-1, offset, end)) { + if (hole_mergeable(inode, leaf, path->slots[0] - 1, offset, end)) { u64 num_bytes; path->slots[0]--; @@ -2285,7 +2290,7 @@ static int fill_holes(struct btrfs_trans_handle *trans, struct inode *inode, } btrfs_release_path(path); - ret = btrfs_insert_file_extent(trans, root, btrfs_ino(BTRFS_I(inode)), + ret = btrfs_insert_file_extent(trans, root, btrfs_ino(inode), offset, 0, 0, end - offset, 0, end - offset, 0, 0, 0); if (ret) return ret; @@ -2296,8 +2301,7 @@ out: hole_em = alloc_extent_map(); if (!hole_em) { btrfs_drop_extent_cache(inode, offset, end - 1, 0); - set_bit(BTRFS_INODE_NEEDS_FULL_SYNC, - &BTRFS_I(inode)->runtime_flags); + set_bit(BTRFS_INODE_NEEDS_FULL_SYNC, &inode->runtime_flags); } else { hole_em->start = offset; hole_em->len = end - offset; @@ -2320,7 +2324,7 @@ out: free_extent_map(hole_em); if (ret) set_bit(BTRFS_INODE_NEEDS_FULL_SYNC, - &BTRFS_I(inode)->runtime_flags); + &inode->runtime_flags); } return 0; @@ -2337,7 +2341,7 @@ static int find_first_non_hole(struct inode *inode, u64 *start, u64 *len) struct extent_map *em; int ret = 0; - em = btrfs_get_extent(inode, NULL, 0, *start, *len, 0); + em = btrfs_get_extent(BTRFS_I(inode), NULL, 0, *start, *len, 0); if (IS_ERR_OR_NULL(em)) { if (!em) ret = -ENOMEM; @@ -2550,8 +2554,8 @@ static int btrfs_punch_hole(struct inode *inode, loff_t offset, loff_t len) trans->block_rsv = &fs_info->trans_block_rsv; if (cur_offset < drop_end && cur_offset < ino_size) { - ret = fill_holes(trans, inode, path, cur_offset, - drop_end); + ret = fill_holes(trans, BTRFS_I(inode), path, + cur_offset, drop_end); if (ret) { /* * If we failed then we didn't insert our hole @@ -2622,7 +2626,8 @@ static int btrfs_punch_hole(struct inode *inode, loff_t offset, loff_t len) * cur_offset == drop_end). */ if (cur_offset < ino_size && cur_offset < drop_end) { - ret = fill_holes(trans, inode, path, cur_offset, drop_end); + ret = fill_holes(trans, BTRFS_I(inode), path, + cur_offset, drop_end); if (ret) { /* Same comment as above. */ btrfs_abort_transaction(trans, ret); @@ -2747,7 +2752,8 @@ static long btrfs_fallocate(struct file *file, int mode, * * For qgroup space, it will be checked later. */ - ret = btrfs_alloc_data_chunk_ondemand(inode, alloc_end - alloc_start); + ret = btrfs_alloc_data_chunk_ondemand(BTRFS_I(inode), + alloc_end - alloc_start); if (ret < 0) return ret; @@ -2827,7 +2833,7 @@ static long btrfs_fallocate(struct file *file, int mode, /* First, check if we exceed the qgroup limit */ INIT_LIST_HEAD(&reserve_list); while (1) { - em = btrfs_get_extent(inode, NULL, 0, cur_offset, + em = btrfs_get_extent(BTRFS_I(inode), NULL, 0, cur_offset, alloc_end - cur_offset, 0); if (IS_ERR_OR_NULL(em)) { if (!em) @@ -2954,7 +2960,8 @@ static int find_desired_extent(struct inode *inode, loff_t *offset, int whence) &cached_state); while (start < inode->i_size) { - em = btrfs_get_extent_fiemap(inode, NULL, 0, start, len, 0); + em = btrfs_get_extent_fiemap(BTRFS_I(inode), NULL, 0, + start, len, 0); if (IS_ERR(em)) { ret = PTR_ERR(em); em = NULL; diff --git a/fs/btrfs/free-space-cache.c b/fs/btrfs/free-space-cache.c index 493a654b6012..da6841efac26 100644 --- a/fs/btrfs/free-space-cache.c +++ b/fs/btrfs/free-space-cache.c @@ -261,7 +261,7 @@ int btrfs_truncate_free_space_cache(struct btrfs_trans_handle *trans, btrfs_free_path(path); } - btrfs_i_size_write(inode, 0); + btrfs_i_size_write(BTRFS_I(inode), 0); truncate_pagecache(inode, 0); /* @@ -3546,7 +3546,8 @@ int btrfs_write_out_ino_cache(struct btrfs_root *root, if (ret) { if (release_metadata) - btrfs_delalloc_release_metadata(inode, inode->i_size); + btrfs_delalloc_release_metadata(BTRFS_I(inode), + inode->i_size); #ifdef DEBUG btrfs_err(fs_info, "failed to write free ino cache for root %llu", diff --git a/fs/btrfs/inode-map.c b/fs/btrfs/inode-map.c index 3bbb8f095953..5c6c20ec64d8 100644 --- a/fs/btrfs/inode-map.c +++ b/fs/btrfs/inode-map.c @@ -499,7 +499,7 @@ again: ret = btrfs_prealloc_file_range_trans(inode, trans, 0, 0, prealloc, prealloc, prealloc, &alloc_hint); if (ret) { - btrfs_delalloc_release_metadata(inode, prealloc); + btrfs_delalloc_release_metadata(BTRFS_I(inode), prealloc); goto out_put; } diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c index f02823f088c2..ee6978d80491 100644 --- a/fs/btrfs/inode.c +++ b/fs/btrfs/inode.c @@ -316,8 +316,8 @@ static noinline int cow_file_range_inline(struct btrfs_root *root, } set_bit(BTRFS_INODE_NEEDS_FULL_SYNC, &BTRFS_I(inode)->runtime_flags); - btrfs_delalloc_release_metadata(inode, end + 1 - start); - btrfs_drop_extent_cache(inode, start, aligned_end - 1, 0); + btrfs_delalloc_release_metadata(BTRFS_I(inode), end + 1 - start); + btrfs_drop_extent_cache(BTRFS_I(inode), start, aligned_end - 1, 0); out: /* * Don't forget to free the reserved space, as for inlined extent @@ -389,12 +389,12 @@ static inline int inode_need_compress(struct inode *inode) return 0; } -static inline void inode_should_defrag(struct inode *inode, +static inline void inode_should_defrag(struct btrfs_inode *inode, u64 start, u64 end, u64 num_bytes, u64 small_write) { /* If this is a small write inside eof, kick off a defrag */ if (num_bytes < small_write && - (start > 0 || end + 1 < BTRFS_I(inode)->disk_i_size)) + (start > 0 || end + 1 < inode->disk_i_size)) btrfs_add_inode_defrag(NULL, inode); } @@ -430,23 +430,23 @@ static noinline void compress_file_range(struct inode *inode, int ret = 0; struct page **pages = NULL; unsigned long nr_pages; - unsigned long nr_pages_ret = 0; unsigned long total_compressed = 0; unsigned long total_in = 0; - unsigned long max_compressed = SZ_128K; - unsigned long max_uncompressed = SZ_128K; int i; int will_compress; int compress_type = fs_info->compress_type; int redirty = 0; - inode_should_defrag(inode, start, end, end - start + 1, SZ_16K); + inode_should_defrag(BTRFS_I(inode), start, end, end - start + 1, + SZ_16K); actual_end = min_t(u64, isize, end + 1); again: will_compress = 0; nr_pages = (end >> PAGE_SHIFT) - (start >> PAGE_SHIFT) + 1; - nr_pages = min_t(unsigned long, nr_pages, SZ_128K / PAGE_SIZE); + BUILD_BUG_ON((BTRFS_MAX_COMPRESSED % PAGE_SIZE) != 0); + nr_pages = min_t(unsigned long, nr_pages, + BTRFS_MAX_COMPRESSED / PAGE_SIZE); /* * we don't want to send crud past the end of i_size through @@ -471,17 +471,8 @@ again: (start > 0 || end + 1 < BTRFS_I(inode)->disk_i_size)) goto cleanup_and_bail_uncompressed; - /* we want to make sure that amount of ram required to uncompress - * an extent is reasonable, so we limit the total size in ram - * of a compressed extent to 128k. This is a crucial number - * because it also controls how easily we can spread reads across - * cpus for decompression. - * - * We also want to make sure the amount of IO required to do - * a random read is reasonably small, so we limit the size of - * a compressed extent to 128k. - */ - total_compressed = min(total_compressed, max_uncompressed); + total_compressed = min_t(unsigned long, total_compressed, + BTRFS_MAX_UNCOMPRESSED); num_bytes = ALIGN(end - start + 1, blocksize); num_bytes = max(blocksize, num_bytes); total_in = 0; @@ -516,16 +507,15 @@ again: redirty = 1; ret = btrfs_compress_pages(compress_type, inode->i_mapping, start, - total_compressed, pages, - nr_pages, &nr_pages_ret, + pages, + &nr_pages, &total_in, - &total_compressed, - max_compressed); + &total_compressed); if (!ret) { unsigned long offset = total_compressed & (PAGE_SIZE - 1); - struct page *page = pages[nr_pages_ret - 1]; + struct page *page = pages[nr_pages - 1]; char *kaddr; /* zero the tail end of the last page, we might be @@ -606,7 +596,7 @@ cont: * will submit them to the elevator. */ add_async_extent(async_cow, start, num_bytes, - total_compressed, pages, nr_pages_ret, + total_compressed, pages, nr_pages, compress_type); if (start + num_bytes < end) { @@ -623,14 +613,14 @@ cont: * the compression code ran but failed to make things smaller, * free any pages it allocated and our page pointer array */ - for (i = 0; i < nr_pages_ret; i++) { + for (i = 0; i < nr_pages; i++) { WARN_ON(pages[i]->mapping); put_page(pages[i]); } kfree(pages); pages = NULL; total_compressed = 0; - nr_pages_ret = 0; + nr_pages = 0; /* flag the file so we don't compress in the future */ if (!btrfs_test_opt(fs_info, FORCE_COMPRESS) && @@ -659,7 +649,7 @@ cleanup_and_bail_uncompressed: return; free_pages_out: - for (i = 0; i < nr_pages_ret; i++) { + for (i = 0; i < nr_pages; i++) { WARN_ON(pages[i]->mapping); put_page(pages[i]); } @@ -806,7 +796,8 @@ retry: BTRFS_ORDERED_COMPRESSED, async_extent->compress_type); if (ret) { - btrfs_drop_extent_cache(inode, async_extent->start, + btrfs_drop_extent_cache(BTRFS_I(inode), + async_extent->start, async_extent->start + async_extent->ram_size - 1, 0); goto out_free_reserve; @@ -933,7 +924,7 @@ static noinline int cow_file_range(struct inode *inode, struct extent_map *em; int ret = 0; - if (btrfs_is_free_space_inode(inode)) { + if (btrfs_is_free_space_inode(BTRFS_I(inode))) { WARN_ON_ONCE(1); ret = -EINVAL; goto out_unlock; @@ -943,7 +934,7 @@ static noinline int cow_file_range(struct inode *inode, num_bytes = max(blocksize, num_bytes); disk_num_bytes = num_bytes; - inode_should_defrag(inode, start, end, num_bytes, SZ_64K); + inode_should_defrag(BTRFS_I(inode), start, end, num_bytes, SZ_64K); if (start == 0) { /* lets try to make an inline extent */ @@ -971,7 +962,8 @@ static noinline int cow_file_range(struct inode *inode, btrfs_super_total_bytes(fs_info->super_copy)); alloc_hint = get_extent_allocation_hint(inode, start, num_bytes); - btrfs_drop_extent_cache(inode, start, start + num_bytes - 1, 0); + btrfs_drop_extent_cache(BTRFS_I(inode), start, + start + num_bytes - 1, 0); while (disk_num_bytes > 0) { unsigned long op; @@ -1039,7 +1031,7 @@ out: return ret; out_drop_extent_cache: - btrfs_drop_extent_cache(inode, start, start + ram_size - 1, 0); + btrfs_drop_extent_cache(BTRFS_I(inode), start, start + ram_size - 1, 0); out_reserve: btrfs_dec_block_group_reservations(fs_info, ins.objectid); btrfs_free_reserved_extent(fs_info, ins.objectid, ins.offset, 1); @@ -1231,7 +1223,7 @@ static noinline int run_delalloc_nocow(struct inode *inode, return -ENOMEM; } - nolock = btrfs_is_free_space_inode(inode); + nolock = btrfs_is_free_space_inode(BTRFS_I(inode)); cow_start = (u64)-1; cur_offset = start; @@ -1331,10 +1323,16 @@ next_slot: * either valid or do not exist. */ if (csum_exist_in_range(fs_info, disk_bytenr, - num_bytes)) + num_bytes)) { + if (!nolock) + btrfs_end_write_no_snapshoting(root); goto out_check; - if (!btrfs_inc_nocow_writers(fs_info, disk_bytenr)) + } + if (!btrfs_inc_nocow_writers(fs_info, disk_bytenr)) { + if (!nolock) + btrfs_end_write_no_snapshoting(root); goto out_check; + } nocow = 1; } else if (extent_type == BTRFS_FILE_EXTENT_INLINE) { extent_end = found_key.offset + @@ -1629,15 +1627,15 @@ static void btrfs_add_delalloc_inodes(struct btrfs_root *root, } static void btrfs_del_delalloc_inode(struct btrfs_root *root, - struct inode *inode) + struct btrfs_inode *inode) { - struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb); + struct btrfs_fs_info *fs_info = btrfs_sb(inode->vfs_inode.i_sb); spin_lock(&root->delalloc_lock); - if (!list_empty(&BTRFS_I(inode)->delalloc_inodes)) { - list_del_init(&BTRFS_I(inode)->delalloc_inodes); + if (!list_empty(&inode->delalloc_inodes)) { + list_del_init(&inode->delalloc_inodes); clear_bit(BTRFS_INODE_IN_DELALLOC_LIST, - &BTRFS_I(inode)->runtime_flags); + &inode->runtime_flags); root->nr_delalloc_inodes--; if (!root->nr_delalloc_inodes) { spin_lock(&fs_info->delalloc_root_lock); @@ -1670,7 +1668,7 @@ static void btrfs_set_bit_hook(struct inode *inode, if (!(state->state & EXTENT_DELALLOC) && (*bits & EXTENT_DELALLOC)) { struct btrfs_root *root = BTRFS_I(inode)->root; u64 len = state->end + 1 - state->start; - bool do_list = !btrfs_is_free_space_inode(inode); + bool do_list = !btrfs_is_free_space_inode(BTRFS_I(inode)); if (*bits & EXTENT_FIRST_DELALLOC) { *bits &= ~EXTENT_FIRST_DELALLOC; @@ -1700,18 +1698,18 @@ static void btrfs_set_bit_hook(struct inode *inode, /* * extent_io.c clear_bit_hook, see set_bit_hook for why */ -static void btrfs_clear_bit_hook(struct inode *inode, +static void btrfs_clear_bit_hook(struct btrfs_inode *inode, struct extent_state *state, unsigned *bits) { - struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb); + struct btrfs_fs_info *fs_info = btrfs_sb(inode->vfs_inode.i_sb); u64 len = state->end + 1 - state->start; u32 num_extents = count_max_extents(len); - spin_lock(&BTRFS_I(inode)->lock); + spin_lock(&inode->lock); if ((state->state & EXTENT_DEFRAG) && (*bits & EXTENT_DEFRAG)) - BTRFS_I(inode)->defrag_bytes -= len; - spin_unlock(&BTRFS_I(inode)->lock); + inode->defrag_bytes -= len; + spin_unlock(&inode->lock); /* * set_bit and clear bit hooks normally require _irqsave/restore @@ -1719,15 +1717,15 @@ static void btrfs_clear_bit_hook(struct inode *inode, * bit, which is only set or cleared with irqs on */ if ((state->state & EXTENT_DELALLOC) && (*bits & EXTENT_DELALLOC)) { - struct btrfs_root *root = BTRFS_I(inode)->root; + struct btrfs_root *root = inode->root; bool do_list = !btrfs_is_free_space_inode(inode); if (*bits & EXTENT_FIRST_DELALLOC) { *bits &= ~EXTENT_FIRST_DELALLOC; } else if (!(*bits & EXTENT_DO_ACCOUNTING)) { - spin_lock(&BTRFS_I(inode)->lock); - BTRFS_I(inode)->outstanding_extents -= num_extents; - spin_unlock(&BTRFS_I(inode)->lock); + spin_lock(&inode->lock); + inode->outstanding_extents -= num_extents; + spin_unlock(&inode->lock); } /* @@ -1747,18 +1745,19 @@ static void btrfs_clear_bit_hook(struct inode *inode, && do_list && !(state->state & EXTENT_NORESERVE) && (*bits & (EXTENT_DO_ACCOUNTING | EXTENT_CLEAR_DATA_RESV))) - btrfs_free_reserved_data_space_noquota(inode, + btrfs_free_reserved_data_space_noquota( + &inode->vfs_inode, state->start, len); __percpu_counter_add(&fs_info->delalloc_bytes, -len, fs_info->delalloc_batch); - spin_lock(&BTRFS_I(inode)->lock); - BTRFS_I(inode)->delalloc_bytes -= len; - if (do_list && BTRFS_I(inode)->delalloc_bytes == 0 && + spin_lock(&inode->lock); + inode->delalloc_bytes -= len; + if (do_list && inode->delalloc_bytes == 0 && test_bit(BTRFS_INODE_IN_DELALLOC_LIST, - &BTRFS_I(inode)->runtime_flags)) + &inode->runtime_flags)) btrfs_del_delalloc_inode(root, inode); - spin_unlock(&BTRFS_I(inode)->lock); + spin_unlock(&inode->lock); } } @@ -1854,7 +1853,7 @@ static int btrfs_submit_bio_hook(struct inode *inode, struct bio *bio, skip_sum = BTRFS_I(inode)->flags & BTRFS_INODE_NODATASUM; - if (btrfs_is_free_space_inode(inode)) + if (btrfs_is_free_space_inode(BTRFS_I(inode))) metadata = BTRFS_WQ_ENDIO_FREE_SPACE; if (bio_op(bio) != REQ_OP_WRITE) { @@ -1963,7 +1962,7 @@ again: if (PagePrivate2(page)) goto out; - ordered = btrfs_lookup_ordered_range(inode, page_start, + ordered = btrfs_lookup_ordered_range(BTRFS_I(inode), page_start, PAGE_SIZE); if (ordered) { unlock_extent_cached(&BTRFS_I(inode)->io_tree, page_start, @@ -2793,16 +2792,17 @@ static int btrfs_finish_ordered_io(struct btrfs_ordered_extent *ordered_extent) bool nolock; bool truncated = false; - nolock = btrfs_is_free_space_inode(inode); + nolock = btrfs_is_free_space_inode(BTRFS_I(inode)); if (test_bit(BTRFS_ORDERED_IOERR, &ordered_extent->flags)) { ret = -EIO; goto out; } - btrfs_free_io_failure_record(inode, ordered_extent->file_offset, - ordered_extent->file_offset + - ordered_extent->len - 1); + btrfs_free_io_failure_record(BTRFS_I(inode), + ordered_extent->file_offset, + ordered_extent->file_offset + + ordered_extent->len - 1); if (test_bit(BTRFS_ORDERED_TRUNCATED, &ordered_extent->flags)) { truncated = true; @@ -2873,7 +2873,7 @@ static int btrfs_finish_ordered_io(struct btrfs_ordered_extent *ordered_extent) compress_type = ordered_extent->compress_type; if (test_bit(BTRFS_ORDERED_PREALLOC, &ordered_extent->flags)) { BUG_ON(compress_type); - ret = btrfs_mark_extent_written(trans, inode, + ret = btrfs_mark_extent_written(trans, BTRFS_I(inode), ordered_extent->file_offset, ordered_extent->file_offset + logical_len); @@ -2914,7 +2914,8 @@ out_unlock: ordered_extent->len - 1, &cached_state, GFP_NOFS); out: if (root != fs_info->tree_root) - btrfs_delalloc_release_metadata(inode, ordered_extent->len); + btrfs_delalloc_release_metadata(BTRFS_I(inode), + ordered_extent->len); if (trans) btrfs_end_transaction(trans); @@ -2929,7 +2930,7 @@ out: clear_extent_uptodate(io_tree, start, end, NULL, GFP_NOFS); /* Drop the cache for the part of the extent we didn't write. */ - btrfs_drop_extent_cache(inode, start, end, 0); + btrfs_drop_extent_cache(BTRFS_I(inode), start, end, 0); /* * If the ordered extent had an IOERR or something else went @@ -2977,7 +2978,7 @@ static void finish_ordered_fn(struct btrfs_work *work) btrfs_finish_ordered_io(ordered_extent); } -static int btrfs_writepage_end_io_hook(struct page *page, u64 start, u64 end, +static void btrfs_writepage_end_io_hook(struct page *page, u64 start, u64 end, struct extent_state *state, int uptodate) { struct inode *inode = page->mapping->host; @@ -2991,9 +2992,9 @@ static int btrfs_writepage_end_io_hook(struct page *page, u64 start, u64 end, ClearPagePrivate2(page); if (!btrfs_dec_test_ordered_pending(inode, &ordered_extent, start, end - start + 1, uptodate)) - return 0; + return; - if (btrfs_is_free_space_inode(inode)) { + if (btrfs_is_free_space_inode(BTRFS_I(inode))) { wq = fs_info->endio_freespace_worker; func = btrfs_freespace_write_helper; } else { @@ -3004,8 +3005,6 @@ static int btrfs_writepage_end_io_hook(struct page *page, u64 start, u64 end, btrfs_init_work(&ordered_extent->work, func, finish_ordered_fn, NULL, NULL); btrfs_queue_work(wq, &ordered_extent->work); - - return 0; } static int __readpage_endio_check(struct inode *inode, @@ -3028,7 +3027,7 @@ static int __readpage_endio_check(struct inode *inode, kunmap_atomic(kaddr); return 0; zeroit: - btrfs_print_data_csum_error(inode, start, csum, csum_expected, + btrfs_print_data_csum_error(BTRFS_I(inode), start, csum, csum_expected, io_bio->mirror_num); memset(kaddr + pgoff, 1, len); flush_dcache_page(page); @@ -3167,10 +3166,11 @@ void btrfs_orphan_commit_root(struct btrfs_trans_handle *trans, * NOTE: caller of this function should reserve 5 units of metadata for * this function. */ -int btrfs_orphan_add(struct btrfs_trans_handle *trans, struct inode *inode) +int btrfs_orphan_add(struct btrfs_trans_handle *trans, + struct btrfs_inode *inode) { - struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb); - struct btrfs_root *root = BTRFS_I(inode)->root; + struct btrfs_fs_info *fs_info = btrfs_sb(inode->vfs_inode.i_sb); + struct btrfs_root *root = inode->root; struct btrfs_block_rsv *block_rsv = NULL; int reserve = 0; int insert = 0; @@ -3192,7 +3192,7 @@ int btrfs_orphan_add(struct btrfs_trans_handle *trans, struct inode *inode) } if (!test_and_set_bit(BTRFS_INODE_HAS_ORPHAN_ITEM, - &BTRFS_I(inode)->runtime_flags)) { + &inode->runtime_flags)) { #if 0 /* * For proper ENOSPC handling, we should do orphan @@ -3209,7 +3209,7 @@ int btrfs_orphan_add(struct btrfs_trans_handle *trans, struct inode *inode) } if (!test_and_set_bit(BTRFS_INODE_ORPHAN_META_RESERVED, - &BTRFS_I(inode)->runtime_flags)) + &inode->runtime_flags)) reserve = 1; spin_unlock(&root->orphan_lock); @@ -3220,28 +3220,27 @@ int btrfs_orphan_add(struct btrfs_trans_handle *trans, struct inode *inode) if (ret) { atomic_dec(&root->orphan_inodes); clear_bit(BTRFS_INODE_ORPHAN_META_RESERVED, - &BTRFS_I(inode)->runtime_flags); + &inode->runtime_flags); if (insert) clear_bit(BTRFS_INODE_HAS_ORPHAN_ITEM, - &BTRFS_I(inode)->runtime_flags); + &inode->runtime_flags); return ret; } } /* insert an orphan item to track this unlinked/truncated file */ if (insert >= 1) { - ret = btrfs_insert_orphan_item(trans, root, - btrfs_ino(BTRFS_I(inode))); + ret = btrfs_insert_orphan_item(trans, root, btrfs_ino(inode)); if (ret) { atomic_dec(&root->orphan_inodes); if (reserve) { clear_bit(BTRFS_INODE_ORPHAN_META_RESERVED, - &BTRFS_I(inode)->runtime_flags); + &inode->runtime_flags); btrfs_orphan_release_metadata(inode); } if (ret != -EEXIST) { clear_bit(BTRFS_INODE_HAS_ORPHAN_ITEM, - &BTRFS_I(inode)->runtime_flags); + &inode->runtime_flags); btrfs_abort_transaction(trans, ret); return ret; } @@ -3266,20 +3265,20 @@ int btrfs_orphan_add(struct btrfs_trans_handle *trans, struct inode *inode) * item for this particular inode. */ static int btrfs_orphan_del(struct btrfs_trans_handle *trans, - struct inode *inode) + struct btrfs_inode *inode) { - struct btrfs_root *root = BTRFS_I(inode)->root; + struct btrfs_root *root = inode->root; int delete_item = 0; int release_rsv = 0; int ret = 0; spin_lock(&root->orphan_lock); if (test_and_clear_bit(BTRFS_INODE_HAS_ORPHAN_ITEM, - &BTRFS_I(inode)->runtime_flags)) + &inode->runtime_flags)) delete_item = 1; if (test_and_clear_bit(BTRFS_INODE_ORPHAN_META_RESERVED, - &BTRFS_I(inode)->runtime_flags)) + &inode->runtime_flags)) release_rsv = 1; spin_unlock(&root->orphan_lock); @@ -3287,7 +3286,7 @@ static int btrfs_orphan_del(struct btrfs_trans_handle *trans, atomic_dec(&root->orphan_inodes); if (trans) ret = btrfs_del_orphan_item(trans, root, - btrfs_ino(BTRFS_I(inode))); + btrfs_ino(inode)); } if (release_rsv) @@ -3453,7 +3452,7 @@ int btrfs_orphan_cleanup(struct btrfs_root *root) ret = PTR_ERR(trans); goto out; } - ret = btrfs_orphan_add(trans, inode); + ret = btrfs_orphan_add(trans, BTRFS_I(inode)); btrfs_end_transaction(trans); if (ret) { iput(inode); @@ -3462,7 +3461,7 @@ int btrfs_orphan_cleanup(struct btrfs_root *root) ret = btrfs_truncate(inode); if (ret) - btrfs_orphan_del(NULL, inode); + btrfs_orphan_del(NULL, BTRFS_I(inode)); } else { nr_unlink++; } @@ -3617,7 +3616,7 @@ static int btrfs_read_locked_inode(struct inode *inode) set_nlink(inode, btrfs_inode_nlink(leaf, inode_item)); i_uid_write(inode, btrfs_inode_uid(leaf, inode_item)); i_gid_write(inode, btrfs_inode_gid(leaf, inode_item)); - btrfs_i_size_write(inode, btrfs_inode_size(leaf, inode_item)); + btrfs_i_size_write(BTRFS_I(inode), btrfs_inode_size(leaf, inode_item)); inode->i_atime.tv_sec = btrfs_timespec_sec(leaf, &inode_item->atime); inode->i_atime.tv_nsec = btrfs_timespec_nsec(leaf, &inode_item->atime); @@ -3865,7 +3864,7 @@ noinline int btrfs_update_inode(struct btrfs_trans_handle *trans, * The data relocation inode should also be directly updated * without delay */ - if (!btrfs_is_free_space_inode(inode) + if (!btrfs_is_free_space_inode(BTRFS_I(inode)) && root->root_key.objectid != BTRFS_DATA_RELOC_TREE_OBJECTID && !test_bit(BTRFS_FS_LOG_RECOVERING, &fs_info->flags)) { btrfs_update_root_times(trans, root); @@ -3988,8 +3987,7 @@ err: if (ret) goto out; - btrfs_i_size_write(&dir->vfs_inode, - dir->vfs_inode.i_size - name_len * 2); + btrfs_i_size_write(dir, dir->vfs_inode.i_size - name_len * 2); inode_inc_iversion(&inode->vfs_inode); inode_inc_iversion(&dir->vfs_inode); inode->vfs_inode.i_ctime = dir->vfs_inode.i_mtime = @@ -4056,7 +4054,7 @@ static int btrfs_unlink(struct inode *dir, struct dentry *dentry) goto out; if (inode->i_nlink == 0) { - ret = btrfs_orphan_add(trans, inode); + ret = btrfs_orphan_add(trans, BTRFS_I(inode)); if (ret) goto out; } @@ -4137,7 +4135,7 @@ int btrfs_unlink_subvol(struct btrfs_trans_handle *trans, goto out; } - btrfs_i_size_write(dir, dir->i_size - name_len * 2); + btrfs_i_size_write(BTRFS_I(dir), dir->i_size - name_len * 2); inode_inc_iversion(dir); dir->i_mtime = dir->i_ctime = current_time(dir); ret = btrfs_update_inode_fallback(trans, root, dir); @@ -4173,7 +4171,7 @@ static int btrfs_rmdir(struct inode *dir, struct dentry *dentry) goto out; } - err = btrfs_orphan_add(trans, inode); + err = btrfs_orphan_add(trans, BTRFS_I(inode)); if (err) goto out; @@ -4184,7 +4182,7 @@ static int btrfs_rmdir(struct inode *dir, struct dentry *dentry) BTRFS_I(d_inode(dentry)), dentry->d_name.name, dentry->d_name.len); if (!err) { - btrfs_i_size_write(inode, 0); + btrfs_i_size_write(BTRFS_I(inode), 0); /* * Propagate the last_unlink_trans value of the deleted dir to * its parent directory. This is to prevent an unrecoverable @@ -4320,7 +4318,7 @@ int btrfs_truncate_inode_items(struct btrfs_trans_handle *trans, * for non-free space inodes and ref cows, we want to back off from * time to time */ - if (!btrfs_is_free_space_inode(inode) && + if (!btrfs_is_free_space_inode(BTRFS_I(inode)) && test_bit(BTRFS_ROOT_REF_COWS, &root->state)) be_nice = 1; @@ -4336,7 +4334,7 @@ int btrfs_truncate_inode_items(struct btrfs_trans_handle *trans, */ if (test_bit(BTRFS_ROOT_REF_COWS, &root->state) || root == fs_info->tree_root) - btrfs_drop_extent_cache(inode, ALIGN(new_size, + btrfs_drop_extent_cache(BTRFS_I(inode), ALIGN(new_size, fs_info->sectorsize), (u64)-1, 0); @@ -4412,19 +4410,8 @@ search_again: if (found_type > min_type) { del_item = 1; } else { - if (item_end < new_size) { - /* - * With NO_HOLES mode, for the following mapping - * - * [0-4k][hole][8k-12k] - * - * if truncating isize down to 6k, it ends up - * isize being 8k. - */ - if (btrfs_fs_incompat(root->fs_info, NO_HOLES)) - last_size = new_size; + if (item_end < new_size) break; - } if (found_key.offset >= new_size) del_item = 1; else @@ -4607,8 +4594,12 @@ out: btrfs_abort_transaction(trans, ret); } error: - if (root->root_key.objectid != BTRFS_TREE_LOG_OBJECTID) + if (root->root_key.objectid != BTRFS_TREE_LOG_OBJECTID) { + ASSERT(last_size >= new_size); + if (!err && last_size > new_size) + last_size = new_size; btrfs_ordered_update_i_size(inode, last_size, NULL); + } btrfs_free_path(path); @@ -4835,7 +4826,7 @@ int btrfs_cont_expand(struct inode *inode, loff_t oldsize, loff_t size) lock_extent_bits(io_tree, hole_start, block_end - 1, &cached_state); - ordered = btrfs_lookup_ordered_range(inode, hole_start, + ordered = btrfs_lookup_ordered_range(BTRFS_I(inode), hole_start, block_end - hole_start); if (!ordered) break; @@ -4847,7 +4838,7 @@ int btrfs_cont_expand(struct inode *inode, loff_t oldsize, loff_t size) cur_offset = hole_start; while (1) { - em = btrfs_get_extent(inode, NULL, 0, cur_offset, + em = btrfs_get_extent(BTRFS_I(inode), NULL, 0, cur_offset, block_end - cur_offset, 0); if (IS_ERR(em)) { err = PTR_ERR(em); @@ -4864,7 +4855,7 @@ int btrfs_cont_expand(struct inode *inode, loff_t oldsize, loff_t size) hole_size); if (err) break; - btrfs_drop_extent_cache(inode, cur_offset, + btrfs_drop_extent_cache(BTRFS_I(inode), cur_offset, cur_offset + hole_size - 1, 0); hole_em = alloc_extent_map(); if (!hole_em) { @@ -4890,7 +4881,8 @@ int btrfs_cont_expand(struct inode *inode, loff_t oldsize, loff_t size) write_unlock(&em_tree->lock); if (err != -EEXIST) break; - btrfs_drop_extent_cache(inode, cur_offset, + btrfs_drop_extent_cache(BTRFS_I(inode), + cur_offset, cur_offset + hole_size - 1, 0); } @@ -4987,7 +4979,7 @@ static int btrfs_setsize(struct inode *inode, struct iattr *attr) * so we need to guarantee from this point on that everything * will be consistent. */ - ret = btrfs_orphan_add(trans, inode); + ret = btrfs_orphan_add(trans, BTRFS_I(inode)); btrfs_end_transaction(trans); if (ret) return ret; @@ -4996,9 +4988,9 @@ static int btrfs_setsize(struct inode *inode, struct iattr *attr) truncate_setsize(inode, newsize); /* Disable nonlocked read DIO to avoid the end less truncate */ - btrfs_inode_block_unlocked_dio(inode); + btrfs_inode_block_unlocked_dio(BTRFS_I(inode)); inode_dio_wait(inode); - btrfs_inode_resume_unlocked_dio(inode); + btrfs_inode_resume_unlocked_dio(BTRFS_I(inode)); ret = btrfs_truncate(inode); if (ret && inode->i_nlink) { @@ -5007,7 +4999,7 @@ static int btrfs_setsize(struct inode *inode, struct iattr *attr) /* To get a stable disk_i_size */ err = btrfs_wait_ordered_range(inode, 0, (u64)-1); if (err) { - btrfs_orphan_del(NULL, inode); + btrfs_orphan_del(NULL, BTRFS_I(inode)); return err; } @@ -5019,11 +5011,11 @@ static int btrfs_setsize(struct inode *inode, struct iattr *attr) */ trans = btrfs_join_transaction(root); if (IS_ERR(trans)) { - btrfs_orphan_del(NULL, inode); + btrfs_orphan_del(NULL, BTRFS_I(inode)); return ret; } i_size_write(inode, BTRFS_I(inode)->disk_i_size); - err = btrfs_orphan_del(trans, inode); + err = btrfs_orphan_del(trans, BTRFS_I(inode)); if (err) btrfs_abort_transaction(trans, err); btrfs_end_transaction(trans); @@ -5181,18 +5173,18 @@ void btrfs_evict_inode(struct inode *inode) if (inode->i_nlink && ((btrfs_root_refs(&root->root_item) != 0 && root->root_key.objectid != BTRFS_ROOT_TREE_OBJECTID) || - btrfs_is_free_space_inode(inode))) + btrfs_is_free_space_inode(BTRFS_I(inode)))) goto no_delete; if (is_bad_inode(inode)) { - btrfs_orphan_del(NULL, inode); + btrfs_orphan_del(NULL, BTRFS_I(inode)); goto no_delete; } /* do we really want it for ->i_nlink > 0 and zero btrfs_root_refs? */ if (!special_file(inode->i_mode)) btrfs_wait_ordered_range(inode, 0, (u64)-1); - btrfs_free_io_failure_record(inode, 0, (u64)-1); + btrfs_free_io_failure_record(BTRFS_I(inode), 0, (u64)-1); if (test_bit(BTRFS_FS_LOG_RECOVERING, &fs_info->flags)) { BUG_ON(test_bit(BTRFS_INODE_HAS_ORPHAN_ITEM, @@ -5208,20 +5200,20 @@ void btrfs_evict_inode(struct inode *inode) ret = btrfs_commit_inode_delayed_inode(BTRFS_I(inode)); if (ret) { - btrfs_orphan_del(NULL, inode); + btrfs_orphan_del(NULL, BTRFS_I(inode)); goto no_delete; } rsv = btrfs_alloc_block_rsv(fs_info, BTRFS_BLOCK_RSV_TEMP); if (!rsv) { - btrfs_orphan_del(NULL, inode); + btrfs_orphan_del(NULL, BTRFS_I(inode)); goto no_delete; } rsv->size = min_size; rsv->failfast = 1; global_rsv = &fs_info->global_block_rsv; - btrfs_i_size_write(inode, 0); + btrfs_i_size_write(BTRFS_I(inode), 0); /* * This is a bit simpler than btrfs_truncate since we've already @@ -5256,14 +5248,14 @@ void btrfs_evict_inode(struct inode *inode) btrfs_warn(fs_info, "Could not get space for a delete, will truncate on mount %d", ret); - btrfs_orphan_del(NULL, inode); + btrfs_orphan_del(NULL, BTRFS_I(inode)); btrfs_free_block_rsv(fs_info, rsv); goto no_delete; } trans = btrfs_join_transaction(root); if (IS_ERR(trans)) { - btrfs_orphan_del(NULL, inode); + btrfs_orphan_del(NULL, BTRFS_I(inode)); btrfs_free_block_rsv(fs_info, rsv); goto no_delete; } @@ -5289,7 +5281,7 @@ void btrfs_evict_inode(struct inode *inode) if (ret) { ret = btrfs_commit_transaction(trans); if (ret) { - btrfs_orphan_del(NULL, inode); + btrfs_orphan_del(NULL, BTRFS_I(inode)); btrfs_free_block_rsv(fs_info, rsv); goto no_delete; } @@ -5318,9 +5310,9 @@ void btrfs_evict_inode(struct inode *inode) */ if (ret == 0) { trans->block_rsv = root->orphan_block_rsv; - btrfs_orphan_del(trans, inode); + btrfs_orphan_del(trans, BTRFS_I(inode)); } else { - btrfs_orphan_del(NULL, inode); + btrfs_orphan_del(NULL, BTRFS_I(inode)); } trans->block_rsv = &fs_info->trans_block_rsv; @@ -5898,7 +5890,8 @@ int btrfs_write_inode(struct inode *inode, struct writeback_control *wbc) if (test_bit(BTRFS_INODE_DUMMY, &BTRFS_I(inode)->runtime_flags)) return 0; - if (btrfs_fs_closing(root->fs_info) && btrfs_is_free_space_inode(inode)) + if (btrfs_fs_closing(root->fs_info) && + btrfs_is_free_space_inode(BTRFS_I(inode))) nolock = true; if (wbc->sync_mode == WB_SYNC_ALL) { @@ -5978,15 +5971,15 @@ static int btrfs_update_time(struct inode *inode, struct timespec *now, * and then set the in-memory index_cnt variable to reflect * free sequence numbers */ -static int btrfs_set_inode_index_count(struct inode *inode) +static int btrfs_set_inode_index_count(struct btrfs_inode *inode) { - struct btrfs_root *root = BTRFS_I(inode)->root; + struct btrfs_root *root = inode->root; struct btrfs_key key, found_key; struct btrfs_path *path; struct extent_buffer *leaf; int ret; - key.objectid = btrfs_ino(BTRFS_I(inode)); + key.objectid = btrfs_ino(inode); key.type = BTRFS_DIR_INDEX_KEY; key.offset = (u64)-1; @@ -6009,7 +6002,7 @@ static int btrfs_set_inode_index_count(struct inode *inode) * else has to start at 2 */ if (path->slots[0] == 0) { - BTRFS_I(inode)->index_cnt = 2; + inode->index_cnt = 2; goto out; } @@ -6018,13 +6011,13 @@ static int btrfs_set_inode_index_count(struct inode *inode) leaf = path->nodes[0]; btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]); - if (found_key.objectid != btrfs_ino(BTRFS_I(inode)) || + if (found_key.objectid != btrfs_ino(inode) || found_key.type != BTRFS_DIR_INDEX_KEY) { - BTRFS_I(inode)->index_cnt = 2; + inode->index_cnt = 2; goto out; } - BTRFS_I(inode)->index_cnt = found_key.offset + 1; + inode->index_cnt = found_key.offset + 1; out: btrfs_free_path(path); return ret; @@ -6034,12 +6027,12 @@ out: * helper to find a free sequence number in a given directory. This current * code is very simple, later versions will do smarter things in the btree */ -int btrfs_set_inode_index(struct inode *dir, u64 *index) +int btrfs_set_inode_index(struct btrfs_inode *dir, u64 *index) { int ret = 0; - if (BTRFS_I(dir)->index_cnt == (u64)-1) { - ret = btrfs_inode_delayed_dir_index_count(BTRFS_I(dir)); + if (dir->index_cnt == (u64)-1) { + ret = btrfs_inode_delayed_dir_index_count(dir); if (ret) { ret = btrfs_set_inode_index_count(dir); if (ret) @@ -6047,8 +6040,8 @@ int btrfs_set_inode_index(struct inode *dir, u64 *index) } } - *index = BTRFS_I(dir)->index_cnt; - BTRFS_I(dir)->index_cnt++; + *index = dir->index_cnt; + dir->index_cnt++; return ret; } @@ -6109,7 +6102,7 @@ static struct inode *btrfs_new_inode(struct btrfs_trans_handle *trans, if (dir && name) { trace_btrfs_inode_request(dir); - ret = btrfs_set_inode_index(dir, index); + ret = btrfs_set_inode_index(BTRFS_I(dir), index); if (ret) { btrfs_free_path(path); iput(inode); @@ -6244,18 +6237,18 @@ static inline u8 btrfs_inode_type(struct inode *inode) * inode to the parent directory. */ int btrfs_add_link(struct btrfs_trans_handle *trans, - struct inode *parent_inode, struct inode *inode, + struct btrfs_inode *parent_inode, struct btrfs_inode *inode, const char *name, int name_len, int add_backref, u64 index) { - struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb); + struct btrfs_fs_info *fs_info = btrfs_sb(inode->vfs_inode.i_sb); int ret = 0; struct btrfs_key key; - struct btrfs_root *root = BTRFS_I(parent_inode)->root; - u64 ino = btrfs_ino(BTRFS_I(inode)); - u64 parent_ino = btrfs_ino(BTRFS_I(parent_inode)); + struct btrfs_root *root = parent_inode->root; + u64 ino = btrfs_ino(inode); + u64 parent_ino = btrfs_ino(parent_inode); if (unlikely(ino == BTRFS_FIRST_FREE_OBJECTID)) { - memcpy(&key, &BTRFS_I(inode)->root->root_key, sizeof(key)); + memcpy(&key, &inode->root->root_key, sizeof(key)); } else { key.objectid = ino; key.type = BTRFS_INODE_ITEM_KEY; @@ -6277,7 +6270,7 @@ int btrfs_add_link(struct btrfs_trans_handle *trans, ret = btrfs_insert_dir_item(trans, root, name, name_len, parent_inode, &key, - btrfs_inode_type(inode), index); + btrfs_inode_type(&inode->vfs_inode), index); if (ret == -EEXIST || ret == -EOVERFLOW) goto fail_dir_item; else if (ret) { @@ -6285,12 +6278,12 @@ int btrfs_add_link(struct btrfs_trans_handle *trans, return ret; } - btrfs_i_size_write(parent_inode, parent_inode->i_size + + btrfs_i_size_write(parent_inode, parent_inode->vfs_inode.i_size + name_len * 2); - inode_inc_iversion(parent_inode); - parent_inode->i_mtime = parent_inode->i_ctime = - current_time(parent_inode); - ret = btrfs_update_inode(trans, root, parent_inode); + inode_inc_iversion(&parent_inode->vfs_inode); + parent_inode->vfs_inode.i_mtime = parent_inode->vfs_inode.i_ctime = + current_time(&parent_inode->vfs_inode); + ret = btrfs_update_inode(trans, root, &parent_inode->vfs_inode); if (ret) btrfs_abort_transaction(trans, ret); return ret; @@ -6314,8 +6307,8 @@ fail_dir_item: } static int btrfs_add_nondir(struct btrfs_trans_handle *trans, - struct inode *dir, struct dentry *dentry, - struct inode *inode, int backref, u64 index) + struct btrfs_inode *dir, struct dentry *dentry, + struct btrfs_inode *inode, int backref, u64 index) { int err = btrfs_add_link(trans, dir, inode, dentry->d_name.name, dentry->d_name.len, @@ -6371,7 +6364,8 @@ static int btrfs_mknod(struct inode *dir, struct dentry *dentry, if (err) goto out_unlock_inode; - err = btrfs_add_nondir(trans, dir, dentry, inode, 0, index); + err = btrfs_add_nondir(trans, BTRFS_I(dir), dentry, BTRFS_I(inode), + 0, index); if (err) { goto out_unlock_inode; } else { @@ -6448,7 +6442,8 @@ static int btrfs_create(struct inode *dir, struct dentry *dentry, if (err) goto out_unlock_inode; - err = btrfs_add_nondir(trans, dir, dentry, inode, 0, index); + err = btrfs_add_nondir(trans, BTRFS_I(dir), dentry, BTRFS_I(inode), + 0, index); if (err) goto out_unlock_inode; @@ -6490,7 +6485,7 @@ static int btrfs_link(struct dentry *old_dentry, struct inode *dir, if (inode->i_nlink >= BTRFS_LINK_MAX) return -EMLINK; - err = btrfs_set_inode_index(dir, &index); + err = btrfs_set_inode_index(BTRFS_I(dir), &index); if (err) goto fail; @@ -6514,7 +6509,8 @@ static int btrfs_link(struct dentry *old_dentry, struct inode *dir, ihold(inode); set_bit(BTRFS_INODE_COPY_EVERYTHING, &BTRFS_I(inode)->runtime_flags); - err = btrfs_add_nondir(trans, dir, dentry, inode, 1, index); + err = btrfs_add_nondir(trans, BTRFS_I(dir), dentry, BTRFS_I(inode), + 1, index); if (err) { drop_inode = 1; @@ -6528,7 +6524,7 @@ static int btrfs_link(struct dentry *old_dentry, struct inode *dir, * If new hard link count is 1, it's a file created * with open(2) O_TMPFILE flag. */ - err = btrfs_orphan_del(trans, inode); + err = btrfs_orphan_del(trans, BTRFS_I(inode)); if (err) goto fail; } @@ -6589,13 +6585,14 @@ static int btrfs_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode) if (err) goto out_fail_inode; - btrfs_i_size_write(inode, 0); + btrfs_i_size_write(BTRFS_I(inode), 0); err = btrfs_update_inode(trans, root, inode); if (err) goto out_fail_inode; - err = btrfs_add_link(trans, dir, inode, dentry->d_name.name, - dentry->d_name.len, 0, index); + err = btrfs_add_link(trans, BTRFS_I(dir), BTRFS_I(inode), + dentry->d_name.name, + dentry->d_name.len, 0, index); if (err) goto out_fail_inode; @@ -6725,25 +6722,26 @@ static noinline int uncompress_inline(struct btrfs_path *path, * This also copies inline extents directly into the page. */ -struct extent_map *btrfs_get_extent(struct inode *inode, struct page *page, - size_t pg_offset, u64 start, u64 len, - int create) +struct extent_map *btrfs_get_extent(struct btrfs_inode *inode, + struct page *page, + size_t pg_offset, u64 start, u64 len, + int create) { - struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb); + struct btrfs_fs_info *fs_info = btrfs_sb(inode->vfs_inode.i_sb); int ret; int err = 0; u64 extent_start = 0; u64 extent_end = 0; - u64 objectid = btrfs_ino(BTRFS_I(inode)); + u64 objectid = btrfs_ino(inode); u32 found_type; struct btrfs_path *path = NULL; - struct btrfs_root *root = BTRFS_I(inode)->root; + struct btrfs_root *root = inode->root; struct btrfs_file_extent_item *item; struct extent_buffer *leaf; struct btrfs_key found_key; struct extent_map *em = NULL; - struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree; - struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree; + struct extent_map_tree *em_tree = &inode->extent_tree; + struct extent_io_tree *io_tree = &inode->io_tree; struct btrfs_trans_handle *trans = NULL; const bool new_inline = !page || create; @@ -6856,7 +6854,8 @@ next: goto not_found_em; } - btrfs_extent_item_to_extent_map(inode, path, item, new_inline, em); + btrfs_extent_item_to_extent_map(inode, path, item, + new_inline, em); if (found_type == BTRFS_FILE_EXTENT_REG || found_type == BTRFS_FILE_EXTENT_PREALLOC) { @@ -6992,7 +6991,7 @@ insert: write_unlock(&em_tree->lock); out: - trace_btrfs_get_extent(root, BTRFS_I(inode), em); + trace_btrfs_get_extent(root, inode, em); btrfs_free_path(path); if (trans) { @@ -7008,9 +7007,10 @@ out: return em; } -struct extent_map *btrfs_get_extent_fiemap(struct inode *inode, struct page *page, - size_t pg_offset, u64 start, u64 len, - int create) +struct extent_map *btrfs_get_extent_fiemap(struct btrfs_inode *inode, + struct page *page, + size_t pg_offset, u64 start, u64 len, + int create) { struct extent_map *em; struct extent_map *hole_em = NULL; @@ -7047,7 +7047,7 @@ struct extent_map *btrfs_get_extent_fiemap(struct inode *inode, struct page *pag em = NULL; /* ok, we didn't find anything, lets look for delalloc */ - found = count_range_bits(&BTRFS_I(inode)->io_tree, &range_start, + found = count_range_bits(&inode->io_tree, &range_start, end, len, EXTENT_DELALLOC, 1); found_end = range_start + found; if (found_end < range_start) @@ -7162,7 +7162,7 @@ static struct extent_map *btrfs_create_dio_extent(struct inode *inode, if (ret) { if (em) { free_extent_map(em); - btrfs_drop_extent_cache(inode, start, + btrfs_drop_extent_cache(BTRFS_I(inode), start, start + len - 1, 0); } em = ERR_PTR(ret); @@ -7423,7 +7423,7 @@ static int lock_extent_direct(struct inode *inode, u64 lockstart, u64 lockend, * doing DIO to, so we need to make sure there's no ordered * extents in this range. */ - ordered = btrfs_lookup_ordered_range(inode, lockstart, + ordered = btrfs_lookup_ordered_range(BTRFS_I(inode), lockstart, lockend - lockstart + 1); /* @@ -7529,7 +7529,7 @@ static struct extent_map *create_io_em(struct inode *inode, u64 start, u64 len, } do { - btrfs_drop_extent_cache(inode, em->start, + btrfs_drop_extent_cache(BTRFS_I(inode), em->start, em->start + em->len - 1, 0); write_lock(&em_tree->lock); ret = add_extent_mapping(em_tree, em, 1); @@ -7617,7 +7617,7 @@ static int btrfs_get_blocks_direct(struct inode *inode, sector_t iblock, goto err; } - em = btrfs_get_extent(inode, NULL, 0, start, len, 0); + em = btrfs_get_extent(BTRFS_I(inode), NULL, 0, start, len, 0); if (IS_ERR(em)) { ret = PTR_ERR(em); goto unlock_err; @@ -7854,7 +7854,7 @@ static int dio_read_error(struct inode *inode, struct bio *failed_bio, ret = btrfs_check_dio_repairable(inode, failed_bio, failrec, failed_mirror); if (!ret) { - free_io_failure(inode, failrec); + free_io_failure(BTRFS_I(inode), failrec); return -EIO; } @@ -7868,7 +7868,7 @@ static int dio_read_error(struct inode *inode, struct bio *failed_bio, bio = btrfs_create_repair_bio(inode, failed_bio, failrec, page, pgoff, isector, repair_endio, repair_arg); if (!bio) { - free_io_failure(inode, failrec); + free_io_failure(BTRFS_I(inode), failrec); return -EIO; } bio_set_op_attrs(bio, REQ_OP_READ, read_mode); @@ -7879,7 +7879,7 @@ static int dio_read_error(struct inode *inode, struct bio *failed_bio, ret = submit_dio_repair_bio(inode, bio, failrec->this_mirror); if (ret) { - free_io_failure(inode, failrec); + free_io_failure(BTRFS_I(inode), failrec); bio_put(bio); } @@ -7909,7 +7909,7 @@ static void btrfs_retry_endio_nocsum(struct bio *bio) done->uptodate = 1; bio_for_each_segment_all(bvec, bio, i) - clean_io_failure(done->inode, done->start, bvec->bv_page, 0); + clean_io_failure(BTRFS_I(done->inode), done->start, bvec->bv_page, 0); end: complete(&done->done); bio_put(bio); @@ -7995,7 +7995,7 @@ static void btrfs_retry_endio(struct bio *bio) bvec->bv_page, bvec->bv_offset, done->start, bvec->bv_len); if (!ret) - clean_io_failure(done->inode, done->start, + clean_io_failure(BTRFS_I(done->inode), done->start, bvec->bv_page, bvec->bv_offset); else uptodate = 0; @@ -8796,7 +8796,7 @@ static void btrfs_invalidatepage(struct page *page, unsigned int offset, lock_extent_bits(tree, page_start, page_end, &cached_state); again: start = page_start; - ordered = btrfs_lookup_ordered_range(inode, start, + ordered = btrfs_lookup_ordered_range(BTRFS_I(inode), start, page_end - start + 1); if (ordered) { end = min(page_end, ordered->file_offset + ordered->len - 1); @@ -8962,7 +8962,8 @@ again: * we can't set the delalloc bits if there are pending ordered * extents. Drop our locks and wait for them to finish */ - ordered = btrfs_lookup_ordered_range(inode, page_start, PAGE_SIZE); + ordered = btrfs_lookup_ordered_range(BTRFS_I(inode), page_start, + PAGE_SIZE); if (ordered) { unlock_extent_cached(io_tree, page_start, page_end, &cached_state, GFP_NOFS); @@ -9160,7 +9161,7 @@ static int btrfs_truncate(struct inode *inode) if (ret == 0 && inode->i_nlink > 0) { trans->block_rsv = root->orphan_block_rsv; - ret = btrfs_orphan_del(trans, inode); + ret = btrfs_orphan_del(trans, BTRFS_I(inode)); if (ret) err = ret; } @@ -9205,7 +9206,7 @@ int btrfs_create_subvol_root(struct btrfs_trans_handle *trans, inode->i_fop = &btrfs_dir_file_operations; set_nlink(inode, 1); - btrfs_i_size_write(inode, 0); + btrfs_i_size_write(BTRFS_I(inode), 0); unlock_new_inode(inode); err = btrfs_subvol_inherit_props(trans, new_root, parent_root); @@ -9278,7 +9279,7 @@ struct inode *btrfs_alloc_inode(struct super_block *sb) #ifdef CONFIG_BTRFS_FS_RUN_SANITY_TESTS void btrfs_test_destroy_inode(struct inode *inode) { - btrfs_drop_extent_cache(inode, 0, (u64)-1, 0); + btrfs_drop_extent_cache(BTRFS_I(inode), 0, (u64)-1, 0); kmem_cache_free(btrfs_inode_cachep, BTRFS_I(inode)); } #endif @@ -9333,7 +9334,7 @@ void btrfs_destroy_inode(struct inode *inode) } btrfs_qgroup_check_reserved_leak(inode); inode_tree_del(inode); - btrfs_drop_extent_cache(inode, 0, (u64)-1, 0); + btrfs_drop_extent_cache(BTRFS_I(inode), 0, (u64)-1, 0); free: call_rcu(&inode->i_rcu, btrfs_i_callback); } @@ -9480,10 +9481,10 @@ static int btrfs_rename_exchange(struct inode *old_dir, * We need to find a free sequence number both in the source and * in the destination directory for the exchange. */ - ret = btrfs_set_inode_index(new_dir, &old_idx); + ret = btrfs_set_inode_index(BTRFS_I(new_dir), &old_idx); if (ret) goto out_fail; - ret = btrfs_set_inode_index(old_dir, &new_idx); + ret = btrfs_set_inode_index(BTRFS_I(old_dir), &new_idx); if (ret) goto out_fail; @@ -9581,7 +9582,7 @@ static int btrfs_rename_exchange(struct inode *old_dir, goto out_fail; } - ret = btrfs_add_link(trans, new_dir, old_inode, + ret = btrfs_add_link(trans, BTRFS_I(new_dir), BTRFS_I(old_inode), new_dentry->d_name.name, new_dentry->d_name.len, 0, old_idx); if (ret) { @@ -9589,7 +9590,7 @@ static int btrfs_rename_exchange(struct inode *old_dir, goto out_fail; } - ret = btrfs_add_link(trans, old_dir, new_inode, + ret = btrfs_add_link(trans, BTRFS_I(old_dir), BTRFS_I(new_inode), old_dentry->d_name.name, old_dentry->d_name.len, 0, new_idx); if (ret) { @@ -9691,8 +9692,8 @@ static int btrfs_whiteout_for_rename(struct btrfs_trans_handle *trans, if (ret) goto out; - ret = btrfs_add_nondir(trans, dir, dentry, - inode, 0, index); + ret = btrfs_add_nondir(trans, BTRFS_I(dir), dentry, + BTRFS_I(inode), 0, index); if (ret) goto out; @@ -9791,7 +9792,7 @@ static int btrfs_rename(struct inode *old_dir, struct dentry *old_dentry, if (dest != root) btrfs_record_root_in_trans(trans, dest); - ret = btrfs_set_inode_index(new_dir, &index); + ret = btrfs_set_inode_index(BTRFS_I(new_dir), &index); if (ret) goto out_fail; @@ -9858,14 +9859,15 @@ static int btrfs_rename(struct inode *old_dir, struct dentry *old_dentry, new_dentry->d_name.len); } if (!ret && new_inode->i_nlink == 0) - ret = btrfs_orphan_add(trans, d_inode(new_dentry)); + ret = btrfs_orphan_add(trans, + BTRFS_I(d_inode(new_dentry))); if (ret) { btrfs_abort_transaction(trans, ret); goto out_fail; } } - ret = btrfs_add_link(trans, new_dir, old_inode, + ret = btrfs_add_link(trans, BTRFS_I(new_dir), BTRFS_I(old_inode), new_dentry->d_name.name, new_dentry->d_name.len, 0, index); if (ret) { @@ -10232,7 +10234,7 @@ static int btrfs_symlink(struct inode *dir, struct dentry *dentry, inode_nohighmem(inode); inode->i_mapping->a_ops = &btrfs_symlink_aops; inode_set_bytes(inode, name_len); - btrfs_i_size_write(inode, name_len); + btrfs_i_size_write(BTRFS_I(inode), name_len); err = btrfs_update_inode(trans, root, inode); /* * Last step, add directory indexes for our symlink inode. This is the @@ -10240,7 +10242,8 @@ static int btrfs_symlink(struct inode *dir, struct dentry *dentry, * elsewhere above. */ if (!err) - err = btrfs_add_nondir(trans, dir, dentry, inode, 0, index); + err = btrfs_add_nondir(trans, BTRFS_I(dir), dentry, + BTRFS_I(inode), 0, index); if (err) { drop_inode = 1; goto out_unlock_inode; @@ -10326,7 +10329,7 @@ static int __btrfs_prealloc_file_range(struct inode *inode, int mode, break; } - btrfs_drop_extent_cache(inode, cur_offset, + btrfs_drop_extent_cache(BTRFS_I(inode), cur_offset, cur_offset + ins.offset -1, 0); em = alloc_extent_map(); @@ -10353,7 +10356,7 @@ static int __btrfs_prealloc_file_range(struct inode *inode, int mode, write_unlock(&em_tree->lock); if (ret != -EEXIST) break; - btrfs_drop_extent_cache(inode, cur_offset, + btrfs_drop_extent_cache(BTRFS_I(inode), cur_offset, cur_offset + ins.offset - 1, 0); } @@ -10475,7 +10478,7 @@ static int btrfs_tmpfile(struct inode *dir, struct dentry *dentry, umode_t mode) ret = btrfs_update_inode(trans, root, inode); if (ret) goto out_inode; - ret = btrfs_orphan_add(trans, inode); + ret = btrfs_orphan_add(trans, BTRFS_I(inode)); if (ret) goto out_inode; @@ -10505,6 +10508,12 @@ out_inode: } +__attribute__((const)) +static int dummy_readpage_io_failed_hook(struct page *page, int failed_mirror) +{ + return 0; +} + static const struct inode_operations btrfs_dir_inode_operations = { .getattr = btrfs_getattr, .lookup = btrfs_lookup, @@ -10543,10 +10552,14 @@ static const struct file_operations btrfs_dir_file_operations = { }; static const struct extent_io_ops btrfs_extent_io_ops = { - .fill_delalloc = run_delalloc_range, + /* mandatory callbacks */ .submit_bio_hook = btrfs_submit_bio_hook, - .merge_bio_hook = btrfs_merge_bio_hook, .readpage_end_io_hook = btrfs_readpage_end_io_hook, + .merge_bio_hook = btrfs_merge_bio_hook, + .readpage_io_failed_hook = dummy_readpage_io_failed_hook, + + /* optional callbacks */ + .fill_delalloc = run_delalloc_range, .writepage_end_io_hook = btrfs_writepage_end_io_hook, .writepage_start_hook = btrfs_writepage_start_hook, .set_bit_hook = btrfs_set_bit_hook, diff --git a/fs/btrfs/ioctl.c b/fs/btrfs/ioctl.c index d8539979b44f..dabfc7ac48a6 100644 --- a/fs/btrfs/ioctl.c +++ b/fs/btrfs/ioctl.c @@ -434,7 +434,7 @@ int btrfs_is_empty_uuid(u8 *uuid) static noinline int create_subvol(struct inode *dir, struct dentry *dentry, - char *name, int namelen, + const char *name, int namelen, u64 *async_transid, struct btrfs_qgroup_inherit *inherit) { @@ -580,21 +580,21 @@ static noinline int create_subvol(struct inode *dir, /* * insert the directory item */ - ret = btrfs_set_inode_index(dir, &index); + ret = btrfs_set_inode_index(BTRFS_I(dir), &index); if (ret) { btrfs_abort_transaction(trans, ret); goto fail; } ret = btrfs_insert_dir_item(trans, root, - name, namelen, dir, &key, + name, namelen, BTRFS_I(dir), &key, BTRFS_FT_DIR, index); if (ret) { btrfs_abort_transaction(trans, ret); goto fail; } - btrfs_i_size_write(dir, dir->i_size + namelen * 2); + btrfs_i_size_write(BTRFS_I(dir), dir->i_size + namelen * 2); ret = btrfs_update_inode(trans, root, dir); BUG_ON(ret); @@ -832,7 +832,7 @@ static inline int btrfs_may_create(struct inode *dir, struct dentry *child) * inside this filesystem so it's quite a bit simpler. */ static noinline int btrfs_mksubvol(const struct path *parent, - char *name, int namelen, + const char *name, int namelen, struct btrfs_root *snap_src, u64 *async_transid, bool readonly, struct btrfs_qgroup_inherit *inherit) @@ -1009,7 +1009,7 @@ static struct extent_map *defrag_lookup_extent(struct inode *inode, u64 start) /* get the big lock and read metadata off disk */ lock_extent_bits(io_tree, start, end, &cached); - em = btrfs_get_extent(inode, NULL, 0, start, len, 0); + em = btrfs_get_extent(BTRFS_I(inode), NULL, 0, start, len, 0); unlock_extent_cached(io_tree, start, end, &cached, GFP_NOFS); if (IS_ERR(em)) @@ -1625,7 +1625,7 @@ out: } static noinline int btrfs_ioctl_snap_create_transid(struct file *file, - char *name, unsigned long fd, int subvol, + const char *name, unsigned long fd, int subvol, u64 *transid, bool readonly, struct btrfs_qgroup_inherit *inherit) { @@ -3298,7 +3298,7 @@ static int clone_finish_inode_update(struct btrfs_trans_handle *trans, if (endoff > destoff + olen) endoff = destoff + olen; if (endoff > inode->i_size) - btrfs_i_size_write(inode, endoff); + btrfs_i_size_write(BTRFS_I(inode), endoff); ret = btrfs_update_inode(trans, root, inode); if (ret) { @@ -3311,20 +3311,19 @@ out: return ret; } -static void clone_update_extent_map(struct inode *inode, +static void clone_update_extent_map(struct btrfs_inode *inode, const struct btrfs_trans_handle *trans, const struct btrfs_path *path, const u64 hole_offset, const u64 hole_len) { - struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree; + struct extent_map_tree *em_tree = &inode->extent_tree; struct extent_map *em; int ret; em = alloc_extent_map(); if (!em) { - set_bit(BTRFS_INODE_NEEDS_FULL_SYNC, - &BTRFS_I(inode)->runtime_flags); + set_bit(BTRFS_INODE_NEEDS_FULL_SYNC, &inode->runtime_flags); return; } @@ -3338,7 +3337,7 @@ static void clone_update_extent_map(struct inode *inode, if (btrfs_file_extent_type(path->nodes[0], fi) == BTRFS_FILE_EXTENT_INLINE) set_bit(BTRFS_INODE_NEEDS_FULL_SYNC, - &BTRFS_I(inode)->runtime_flags); + &inode->runtime_flags); } else { em->start = hole_offset; em->len = hole_len; @@ -3364,8 +3363,7 @@ static void clone_update_extent_map(struct inode *inode, } if (ret) - set_bit(BTRFS_INODE_NEEDS_FULL_SYNC, - &BTRFS_I(inode)->runtime_flags); + set_bit(BTRFS_INODE_NEEDS_FULL_SYNC, &inode->runtime_flags); } /* @@ -3791,11 +3789,12 @@ process_slot: /* If we have an implicit hole (NO_HOLES feature). */ if (drop_start < new_key.offset) - clone_update_extent_map(inode, trans, + clone_update_extent_map(BTRFS_I(inode), trans, NULL, drop_start, new_key.offset - drop_start); - clone_update_extent_map(inode, trans, path, 0, 0); + clone_update_extent_map(BTRFS_I(inode), trans, + path, 0, 0); btrfs_mark_buffer_dirty(leaf); btrfs_release_path(path); @@ -3845,8 +3844,9 @@ process_slot: btrfs_end_transaction(trans); goto out; } - clone_update_extent_map(inode, trans, NULL, last_dest_end, - destoff + len - last_dest_end); + clone_update_extent_map(BTRFS_I(inode), trans, NULL, + last_dest_end, + destoff + len - last_dest_end); ret = clone_finish_inode_update(trans, inode, destoff + len, destoff, olen, no_time_update); } diff --git a/fs/btrfs/lzo.c b/fs/btrfs/lzo.c index 45d26980caf9..f48c8c14dc14 100644 --- a/fs/btrfs/lzo.c +++ b/fs/btrfs/lzo.c @@ -76,7 +76,7 @@ static inline void write_compress_length(char *buf, size_t len) memcpy(buf, &dlen, LZO_LEN); } -static inline size_t read_compress_length(char *buf) +static inline size_t read_compress_length(const char *buf) { __le32 dlen; @@ -86,13 +86,11 @@ static inline size_t read_compress_length(char *buf) static int lzo_compress_pages(struct list_head *ws, struct address_space *mapping, - u64 start, unsigned long len, + u64 start, struct page **pages, - unsigned long nr_dest_pages, unsigned long *out_pages, unsigned long *total_in, - unsigned long *total_out, - unsigned long max_out) + unsigned long *total_out) { struct workspace *workspace = list_entry(ws, struct workspace, list); int ret = 0; @@ -102,7 +100,9 @@ static int lzo_compress_pages(struct list_head *ws, struct page *in_page = NULL; struct page *out_page = NULL; unsigned long bytes_left; - + unsigned long len = *total_out; + unsigned long nr_dest_pages = *out_pages; + const unsigned long max_out = nr_dest_pages * PAGE_SIZE; size_t in_len; size_t out_len; char *buf; diff --git a/fs/btrfs/ordered-data.c b/fs/btrfs/ordered-data.c index bc2aba810629..9a46878ba60f 100644 --- a/fs/btrfs/ordered-data.c +++ b/fs/btrfs/ordered-data.c @@ -879,15 +879,14 @@ out: /* Since the DIO code tries to lock a wide area we need to look for any ordered * extents that exist in the range, rather than just the start of the range. */ -struct btrfs_ordered_extent *btrfs_lookup_ordered_range(struct inode *inode, - u64 file_offset, - u64 len) +struct btrfs_ordered_extent *btrfs_lookup_ordered_range( + struct btrfs_inode *inode, u64 file_offset, u64 len) { struct btrfs_ordered_inode_tree *tree; struct rb_node *node; struct btrfs_ordered_extent *entry = NULL; - tree = &BTRFS_I(inode)->ordered_tree; + tree = &inode->ordered_tree; spin_lock_irq(&tree->lock); node = tree_search(tree, file_offset); if (!node) { @@ -923,7 +922,7 @@ bool btrfs_have_ordered_extents_in_range(struct inode *inode, { struct btrfs_ordered_extent *oe; - oe = btrfs_lookup_ordered_range(inode, file_offset, len); + oe = btrfs_lookup_ordered_range(BTRFS_I(inode), file_offset, len); if (oe) { btrfs_put_ordered_extent(oe); return true; diff --git a/fs/btrfs/ordered-data.h b/fs/btrfs/ordered-data.h index a8cb8efe6fae..195c93b67fe0 100644 --- a/fs/btrfs/ordered-data.h +++ b/fs/btrfs/ordered-data.h @@ -189,9 +189,10 @@ void btrfs_start_ordered_extent(struct inode *inode, int btrfs_wait_ordered_range(struct inode *inode, u64 start, u64 len); struct btrfs_ordered_extent * btrfs_lookup_first_ordered_extent(struct inode * inode, u64 file_offset); -struct btrfs_ordered_extent *btrfs_lookup_ordered_range(struct inode *inode, - u64 file_offset, - u64 len); +struct btrfs_ordered_extent *btrfs_lookup_ordered_range( + struct btrfs_inode *inode, + u64 file_offset, + u64 len); bool btrfs_have_ordered_extents_in_range(struct inode *inode, u64 file_offset, u64 len); diff --git a/fs/btrfs/relocation.c b/fs/btrfs/relocation.c index ddbde0f08365..d60df51959f7 100644 --- a/fs/btrfs/relocation.c +++ b/fs/btrfs/relocation.c @@ -1714,8 +1714,8 @@ int replace_file_extents(struct btrfs_trans_handle *trans, if (!ret) continue; - btrfs_drop_extent_cache(inode, key.offset, end, - 1); + btrfs_drop_extent_cache(BTRFS_I(inode), + key.offset, end, 1); unlock_extent(&BTRFS_I(inode)->io_tree, key.offset, end); } @@ -2130,7 +2130,7 @@ static int invalidate_extent_cache(struct btrfs_root *root, /* the lock_extent waits for readpage to complete */ lock_extent(&BTRFS_I(inode)->io_tree, start, end); - btrfs_drop_extent_cache(inode, start, end, 1); + btrfs_drop_extent_cache(BTRFS_I(inode), start, end, 1); unlock_extent(&BTRFS_I(inode)->io_tree, start, end); } return 0; @@ -3161,7 +3161,7 @@ int setup_extent_mapping(struct inode *inode, u64 start, u64 end, free_extent_map(em); break; } - btrfs_drop_extent_cache(inode, start, end, 0); + btrfs_drop_extent_cache(BTRFS_I(inode), start, end, 0); } unlock_extent(&BTRFS_I(inode)->io_tree, start, end); return ret; @@ -3203,7 +3203,8 @@ static int relocate_file_extent_cluster(struct inode *inode, index = (cluster->start - offset) >> PAGE_SHIFT; last_index = (cluster->end - offset) >> PAGE_SHIFT; while (index <= last_index) { - ret = btrfs_delalloc_reserve_metadata(inode, PAGE_SIZE); + ret = btrfs_delalloc_reserve_metadata(BTRFS_I(inode), + PAGE_SIZE); if (ret) goto out; @@ -3215,7 +3216,7 @@ static int relocate_file_extent_cluster(struct inode *inode, page = find_or_create_page(inode->i_mapping, index, mask); if (!page) { - btrfs_delalloc_release_metadata(inode, + btrfs_delalloc_release_metadata(BTRFS_I(inode), PAGE_SIZE); ret = -ENOMEM; goto out; @@ -3234,7 +3235,7 @@ static int relocate_file_extent_cluster(struct inode *inode, if (!PageUptodate(page)) { unlock_page(page); put_page(page); - btrfs_delalloc_release_metadata(inode, + btrfs_delalloc_release_metadata(BTRFS_I(inode), PAGE_SIZE); ret = -EIO; goto out; @@ -4245,7 +4246,7 @@ struct inode *create_reloc_inode(struct btrfs_fs_info *fs_info, BUG_ON(IS_ERR(inode) || is_bad_inode(inode)); BTRFS_I(inode)->index_cnt = group->key.objectid; - err = btrfs_orphan_add(trans, inode); + err = btrfs_orphan_add(trans, BTRFS_I(inode)); out: btrfs_end_transaction(trans); btrfs_btree_balance_dirty(fs_info); diff --git a/fs/btrfs/scrub.c b/fs/btrfs/scrub.c index ff9a11c39f5e..b0251eb1239f 100644 --- a/fs/btrfs/scrub.c +++ b/fs/btrfs/scrub.c @@ -731,7 +731,7 @@ static int scrub_fixup_readpage(u64 inum, u64 offset, u64 root, void *fixup_ctx) ret = -EIO; goto out; } - ret = repair_io_failure(inode, offset, PAGE_SIZE, + ret = repair_io_failure(BTRFS_I(inode), offset, PAGE_SIZE, fixup->logical, page, offset - page_offset(page), fixup->mirror_num); @@ -4236,7 +4236,7 @@ out: scrub_pending_trans_workers_dec(sctx); } -static int check_extent_to_block(struct inode *inode, u64 start, u64 len, +static int check_extent_to_block(struct btrfs_inode *inode, u64 start, u64 len, u64 logical) { struct extent_state *cached_state = NULL; @@ -4246,7 +4246,7 @@ static int check_extent_to_block(struct inode *inode, u64 start, u64 len, u64 lockstart = start, lockend = start + len - 1; int ret = 0; - io_tree = &BTRFS_I(inode)->io_tree; + io_tree = &inode->io_tree; lock_extent_bits(io_tree, lockstart, lockend, &cached_state); ordered = btrfs_lookup_ordered_range(inode, lockstart, len); @@ -4325,7 +4325,8 @@ static int copy_nocow_pages_for_inode(u64 inum, u64 offset, u64 root, io_tree = &BTRFS_I(inode)->io_tree; nocow_ctx_logical = nocow_ctx->logical; - ret = check_extent_to_block(inode, offset, len, nocow_ctx_logical); + ret = check_extent_to_block(BTRFS_I(inode), offset, len, + nocow_ctx_logical); if (ret) { ret = ret > 0 ? 0 : ret; goto out; @@ -4372,7 +4373,7 @@ again: } } - ret = check_extent_to_block(inode, offset, len, + ret = check_extent_to_block(BTRFS_I(inode), offset, len, nocow_ctx_logical); if (ret) { ret = ret > 0 ? 0 : ret; diff --git a/fs/btrfs/send.c b/fs/btrfs/send.c index d145ce804620..456c8901489b 100644 --- a/fs/btrfs/send.c +++ b/fs/btrfs/send.c @@ -1681,6 +1681,9 @@ static int is_inode_existent(struct send_ctx *sctx, u64 ino, u64 gen) { int ret; + if (ino == BTRFS_FIRST_FREE_OBJECTID) + return 1; + ret = get_cur_inode_state(sctx, ino, gen); if (ret < 0) goto out; @@ -1866,7 +1869,7 @@ static int will_overwrite_ref(struct send_ctx *sctx, u64 dir, u64 dir_gen, * not deleted and then re-created, if it was then we have no overwrite * and we can just unlink this entry. */ - if (sctx->parent_root) { + if (sctx->parent_root && dir != BTRFS_FIRST_FREE_OBJECTID) { ret = get_inode_info(sctx->parent_root, dir, NULL, &gen, NULL, NULL, NULL, NULL); if (ret < 0 && ret != -ENOENT) @@ -1934,6 +1937,19 @@ static int did_overwrite_ref(struct send_ctx *sctx, if (ret <= 0) goto out; + if (dir != BTRFS_FIRST_FREE_OBJECTID) { + ret = get_inode_info(sctx->send_root, dir, NULL, &gen, NULL, + NULL, NULL, NULL); + if (ret < 0 && ret != -ENOENT) + goto out; + if (ret) { + ret = 0; + goto out; + } + if (gen != dir_gen) + goto out; + } + /* check if the ref was overwritten by another ref */ ret = lookup_dir_item_inode(sctx->send_root, dir, name, name_len, &ow_inode, &other_type); @@ -3556,6 +3572,7 @@ static int wait_for_parent_move(struct send_ctx *sctx, { int ret = 0; u64 ino = parent_ref->dir; + u64 ino_gen = parent_ref->dir_gen; u64 parent_ino_before, parent_ino_after; struct fs_path *path_before = NULL; struct fs_path *path_after = NULL; @@ -3576,6 +3593,8 @@ static int wait_for_parent_move(struct send_ctx *sctx, * at get_cur_path()). */ while (ino > BTRFS_FIRST_FREE_OBJECTID) { + u64 parent_ino_after_gen; + if (is_waiting_for_move(sctx, ino)) { /* * If the current inode is an ancestor of ino in the @@ -3598,7 +3617,7 @@ static int wait_for_parent_move(struct send_ctx *sctx, fs_path_reset(path_after); ret = get_first_ref(sctx->send_root, ino, &parent_ino_after, - NULL, path_after); + &parent_ino_after_gen, path_after); if (ret < 0) goto out; ret = get_first_ref(sctx->parent_root, ino, &parent_ino_before, @@ -3615,10 +3634,20 @@ static int wait_for_parent_move(struct send_ctx *sctx, if (ino > sctx->cur_ino && (parent_ino_before != parent_ino_after || len1 != len2 || memcmp(path_before->start, path_after->start, len1))) { - ret = 1; - break; + u64 parent_ino_gen; + + ret = get_inode_info(sctx->parent_root, ino, NULL, + &parent_ino_gen, NULL, NULL, NULL, + NULL); + if (ret < 0) + goto out; + if (ino_gen == parent_ino_gen) { + ret = 1; + break; + } } ino = parent_ino_after; + ino_gen = parent_ino_after_gen; } out: @@ -5277,6 +5306,81 @@ out: return ret; } +static int range_is_hole_in_parent(struct send_ctx *sctx, + const u64 start, + const u64 end) +{ + struct btrfs_path *path; + struct btrfs_key key; + struct btrfs_root *root = sctx->parent_root; + u64 search_start = start; + int ret; + + path = alloc_path_for_send(); + if (!path) + return -ENOMEM; + + key.objectid = sctx->cur_ino; + key.type = BTRFS_EXTENT_DATA_KEY; + key.offset = search_start; + ret = btrfs_search_slot(NULL, root, &key, path, 0, 0); + if (ret < 0) + goto out; + if (ret > 0 && path->slots[0] > 0) + path->slots[0]--; + + while (search_start < end) { + struct extent_buffer *leaf = path->nodes[0]; + int slot = path->slots[0]; + struct btrfs_file_extent_item *fi; + u64 extent_end; + + if (slot >= btrfs_header_nritems(leaf)) { + ret = btrfs_next_leaf(root, path); + if (ret < 0) + goto out; + else if (ret > 0) + break; + continue; + } + + btrfs_item_key_to_cpu(leaf, &key, slot); + if (key.objectid < sctx->cur_ino || + key.type < BTRFS_EXTENT_DATA_KEY) + goto next; + if (key.objectid > sctx->cur_ino || + key.type > BTRFS_EXTENT_DATA_KEY || + key.offset >= end) + break; + + fi = btrfs_item_ptr(leaf, slot, struct btrfs_file_extent_item); + if (btrfs_file_extent_type(leaf, fi) == + BTRFS_FILE_EXTENT_INLINE) { + u64 size = btrfs_file_extent_inline_len(leaf, slot, fi); + + extent_end = ALIGN(key.offset + size, + root->fs_info->sectorsize); + } else { + extent_end = key.offset + + btrfs_file_extent_num_bytes(leaf, fi); + } + if (extent_end <= start) + goto next; + if (btrfs_file_extent_disk_bytenr(leaf, fi) == 0) { + search_start = extent_end; + goto next; + } + ret = 0; + goto out; +next: + path->slots[0]++; + } + ret = 1; +out: + btrfs_free_path(path); + return ret; +} + static int maybe_send_hole(struct send_ctx *sctx, struct btrfs_path *path, struct btrfs_key *key) { @@ -5321,8 +5425,17 @@ static int maybe_send_hole(struct send_ctx *sctx, struct btrfs_path *path, return ret; } - if (sctx->cur_inode_last_extent < key->offset) - ret = send_hole(sctx, key->offset); + if (sctx->cur_inode_last_extent < key->offset) { + ret = range_is_hole_in_parent(sctx, + sctx->cur_inode_last_extent, + key->offset); + if (ret < 0) + return ret; + else if (ret == 0) + ret = send_hole(sctx, key->offset); + else + ret = 0; + } sctx->cur_inode_last_extent = extent_end; return ret; } diff --git a/fs/btrfs/tests/inode-tests.c b/fs/btrfs/tests/inode-tests.c index 4d0f038e14f1..8c91d03cc82d 100644 --- a/fs/btrfs/tests/inode-tests.c +++ b/fs/btrfs/tests/inode-tests.c @@ -278,7 +278,7 @@ static noinline int test_btrfs_get_extent(u32 sectorsize, u32 nodesize) /* First with no extents */ BTRFS_I(inode)->root = root; - em = btrfs_get_extent(inode, NULL, 0, 0, sectorsize, 0); + em = btrfs_get_extent(BTRFS_I(inode), NULL, 0, 0, sectorsize, 0); if (IS_ERR(em)) { em = NULL; test_msg("Got an error when we shouldn't have\n"); @@ -293,7 +293,7 @@ static noinline int test_btrfs_get_extent(u32 sectorsize, u32 nodesize) goto out; } free_extent_map(em); - btrfs_drop_extent_cache(inode, 0, (u64)-1, 0); + btrfs_drop_extent_cache(BTRFS_I(inode), 0, (u64)-1, 0); /* * All of the magic numbers are based on the mapping setup in @@ -302,7 +302,7 @@ static noinline int test_btrfs_get_extent(u32 sectorsize, u32 nodesize) */ setup_file_extents(root, sectorsize); - em = btrfs_get_extent(inode, NULL, 0, 0, (u64)-1, 0); + em = btrfs_get_extent(BTRFS_I(inode), NULL, 0, 0, (u64)-1, 0); if (IS_ERR(em)) { test_msg("Got an error when we shouldn't have\n"); goto out; @@ -323,7 +323,7 @@ static noinline int test_btrfs_get_extent(u32 sectorsize, u32 nodesize) offset = em->start + em->len; free_extent_map(em); - em = btrfs_get_extent(inode, NULL, 0, offset, sectorsize, 0); + em = btrfs_get_extent(BTRFS_I(inode), NULL, 0, offset, sectorsize, 0); if (IS_ERR(em)) { test_msg("Got an error when we shouldn't have\n"); goto out; @@ -350,7 +350,7 @@ static noinline int test_btrfs_get_extent(u32 sectorsize, u32 nodesize) offset = em->start + em->len; free_extent_map(em); - em = btrfs_get_extent(inode, NULL, 0, offset, sectorsize, 0); + em = btrfs_get_extent(BTRFS_I(inode), NULL, 0, offset, sectorsize, 0); if (IS_ERR(em)) { test_msg("Got an error when we shouldn't have\n"); goto out; @@ -372,7 +372,7 @@ static noinline int test_btrfs_get_extent(u32 sectorsize, u32 nodesize) free_extent_map(em); /* Regular extent */ - em = btrfs_get_extent(inode, NULL, 0, offset, sectorsize, 0); + em = btrfs_get_extent(BTRFS_I(inode), NULL, 0, offset, sectorsize, 0); if (IS_ERR(em)) { test_msg("Got an error when we shouldn't have\n"); goto out; @@ -399,7 +399,7 @@ static noinline int test_btrfs_get_extent(u32 sectorsize, u32 nodesize) free_extent_map(em); /* The next 3 are split extents */ - em = btrfs_get_extent(inode, NULL, 0, offset, sectorsize, 0); + em = btrfs_get_extent(BTRFS_I(inode), NULL, 0, offset, sectorsize, 0); if (IS_ERR(em)) { test_msg("Got an error when we shouldn't have\n"); goto out; @@ -428,7 +428,7 @@ static noinline int test_btrfs_get_extent(u32 sectorsize, u32 nodesize) offset = em->start + em->len; free_extent_map(em); - em = btrfs_get_extent(inode, NULL, 0, offset, sectorsize, 0); + em = btrfs_get_extent(BTRFS_I(inode), NULL, 0, offset, sectorsize, 0); if (IS_ERR(em)) { test_msg("Got an error when we shouldn't have\n"); goto out; @@ -450,7 +450,7 @@ static noinline int test_btrfs_get_extent(u32 sectorsize, u32 nodesize) offset = em->start + em->len; free_extent_map(em); - em = btrfs_get_extent(inode, NULL, 0, offset, sectorsize, 0); + em = btrfs_get_extent(BTRFS_I(inode), NULL, 0, offset, sectorsize, 0); if (IS_ERR(em)) { test_msg("Got an error when we shouldn't have\n"); goto out; @@ -484,7 +484,7 @@ static noinline int test_btrfs_get_extent(u32 sectorsize, u32 nodesize) free_extent_map(em); /* Prealloc extent */ - em = btrfs_get_extent(inode, NULL, 0, offset, sectorsize, 0); + em = btrfs_get_extent(BTRFS_I(inode), NULL, 0, offset, sectorsize, 0); if (IS_ERR(em)) { test_msg("Got an error when we shouldn't have\n"); goto out; @@ -513,7 +513,7 @@ static noinline int test_btrfs_get_extent(u32 sectorsize, u32 nodesize) free_extent_map(em); /* The next 3 are a half written prealloc extent */ - em = btrfs_get_extent(inode, NULL, 0, offset, sectorsize, 0); + em = btrfs_get_extent(BTRFS_I(inode), NULL, 0, offset, sectorsize, 0); if (IS_ERR(em)) { test_msg("Got an error when we shouldn't have\n"); goto out; @@ -543,7 +543,7 @@ static noinline int test_btrfs_get_extent(u32 sectorsize, u32 nodesize) offset = em->start + em->len; free_extent_map(em); - em = btrfs_get_extent(inode, NULL, 0, offset, sectorsize, 0); + em = btrfs_get_extent(BTRFS_I(inode), NULL, 0, offset, sectorsize, 0); if (IS_ERR(em)) { test_msg("Got an error when we shouldn't have\n"); goto out; @@ -576,7 +576,7 @@ static noinline int test_btrfs_get_extent(u32 sectorsize, u32 nodesize) offset = em->start + em->len; free_extent_map(em); - em = btrfs_get_extent(inode, NULL, 0, offset, sectorsize, 0); + em = btrfs_get_extent(BTRFS_I(inode), NULL, 0, offset, sectorsize, 0); if (IS_ERR(em)) { test_msg("Got an error when we shouldn't have\n"); goto out; @@ -611,7 +611,7 @@ static noinline int test_btrfs_get_extent(u32 sectorsize, u32 nodesize) free_extent_map(em); /* Now for the compressed extent */ - em = btrfs_get_extent(inode, NULL, 0, offset, sectorsize, 0); + em = btrfs_get_extent(BTRFS_I(inode), NULL, 0, offset, sectorsize, 0); if (IS_ERR(em)) { test_msg("Got an error when we shouldn't have\n"); goto out; @@ -645,7 +645,7 @@ static noinline int test_btrfs_get_extent(u32 sectorsize, u32 nodesize) free_extent_map(em); /* Split compressed extent */ - em = btrfs_get_extent(inode, NULL, 0, offset, sectorsize, 0); + em = btrfs_get_extent(BTRFS_I(inode), NULL, 0, offset, sectorsize, 0); if (IS_ERR(em)) { test_msg("Got an error when we shouldn't have\n"); goto out; @@ -680,7 +680,7 @@ static noinline int test_btrfs_get_extent(u32 sectorsize, u32 nodesize) offset = em->start + em->len; free_extent_map(em); - em = btrfs_get_extent(inode, NULL, 0, offset, sectorsize, 0); + em = btrfs_get_extent(BTRFS_I(inode), NULL, 0, offset, sectorsize, 0); if (IS_ERR(em)) { test_msg("Got an error when we shouldn't have\n"); goto out; @@ -707,7 +707,7 @@ static noinline int test_btrfs_get_extent(u32 sectorsize, u32 nodesize) offset = em->start + em->len; free_extent_map(em); - em = btrfs_get_extent(inode, NULL, 0, offset, sectorsize, 0); + em = btrfs_get_extent(BTRFS_I(inode), NULL, 0, offset, sectorsize, 0); if (IS_ERR(em)) { test_msg("Got an error when we shouldn't have\n"); goto out; @@ -742,7 +742,8 @@ static noinline int test_btrfs_get_extent(u32 sectorsize, u32 nodesize) free_extent_map(em); /* A hole between regular extents but no hole extent */ - em = btrfs_get_extent(inode, NULL, 0, offset + 6, sectorsize, 0); + em = btrfs_get_extent(BTRFS_I(inode), NULL, 0, offset + 6, + sectorsize, 0); if (IS_ERR(em)) { test_msg("Got an error when we shouldn't have\n"); goto out; @@ -769,7 +770,7 @@ static noinline int test_btrfs_get_extent(u32 sectorsize, u32 nodesize) offset = em->start + em->len; free_extent_map(em); - em = btrfs_get_extent(inode, NULL, 0, offset, 4096 * 1024, 0); + em = btrfs_get_extent(BTRFS_I(inode), NULL, 0, offset, 4096 * 1024, 0); if (IS_ERR(em)) { test_msg("Got an error when we shouldn't have\n"); goto out; @@ -802,7 +803,7 @@ static noinline int test_btrfs_get_extent(u32 sectorsize, u32 nodesize) offset = em->start + em->len; free_extent_map(em); - em = btrfs_get_extent(inode, NULL, 0, offset, sectorsize, 0); + em = btrfs_get_extent(BTRFS_I(inode), NULL, 0, offset, sectorsize, 0); if (IS_ERR(em)) { test_msg("Got an error when we shouldn't have\n"); goto out; @@ -885,7 +886,7 @@ static int test_hole_first(u32 sectorsize, u32 nodesize) insert_inode_item_key(root); insert_extent(root, sectorsize, sectorsize, sectorsize, 0, sectorsize, sectorsize, BTRFS_FILE_EXTENT_REG, 0, 1); - em = btrfs_get_extent(inode, NULL, 0, 0, 2 * sectorsize, 0); + em = btrfs_get_extent(BTRFS_I(inode), NULL, 0, 0, 2 * sectorsize, 0); if (IS_ERR(em)) { test_msg("Got an error when we shouldn't have\n"); goto out; @@ -907,7 +908,8 @@ static int test_hole_first(u32 sectorsize, u32 nodesize) } free_extent_map(em); - em = btrfs_get_extent(inode, NULL, 0, sectorsize, 2 * sectorsize, 0); + em = btrfs_get_extent(BTRFS_I(inode), NULL, 0, sectorsize, + 2 * sectorsize, 0); if (IS_ERR(em)) { test_msg("Got an error when we shouldn't have\n"); goto out; diff --git a/fs/btrfs/transaction.c b/fs/btrfs/transaction.c index 6b3e0fc2fe7a..61b807de3e16 100644 --- a/fs/btrfs/transaction.c +++ b/fs/btrfs/transaction.c @@ -1505,7 +1505,7 @@ static noinline int create_pending_snapshot(struct btrfs_trans_handle *trans, /* * insert the directory item */ - ret = btrfs_set_inode_index(parent_inode, &index); + ret = btrfs_set_inode_index(BTRFS_I(parent_inode), &index); BUG_ON(ret); /* -ENOMEM */ /* check if there is a file/dir which has the same name. */ @@ -1644,7 +1644,7 @@ static noinline int create_pending_snapshot(struct btrfs_trans_handle *trans, ret = btrfs_insert_dir_item(trans, parent_root, dentry->d_name.name, dentry->d_name.len, - parent_inode, &key, + BTRFS_I(parent_inode), &key, BTRFS_FT_DIR, index); /* We have check then name at the beginning, so it is impossible. */ BUG_ON(ret == -EEXIST || ret == -EOVERFLOW); @@ -1653,7 +1653,7 @@ static noinline int create_pending_snapshot(struct btrfs_trans_handle *trans, goto fail; } - btrfs_i_size_write(parent_inode, parent_inode->i_size + + btrfs_i_size_write(BTRFS_I(parent_inode), parent_inode->i_size + dentry->d_name.len * 2); parent_inode->i_mtime = parent_inode->i_ctime = current_time(parent_inode); diff --git a/fs/btrfs/tree-log.c b/fs/btrfs/tree-log.c index 3806853cde08..a59674c3e69e 100644 --- a/fs/btrfs/tree-log.c +++ b/fs/btrfs/tree-log.c @@ -673,6 +673,10 @@ static noinline int replay_one_extent(struct btrfs_trans_handle *trans, unsigned long dest_offset; struct btrfs_key ins; + if (btrfs_file_extent_disk_bytenr(eb, item) == 0 && + btrfs_fs_incompat(fs_info, NO_HOLES)) + goto update_inode; + ret = btrfs_insert_empty_item(trans, root, path, key, sizeof(*item)); if (ret) @@ -825,6 +829,7 @@ static noinline int replay_one_extent(struct btrfs_trans_handle *trans, } inode_add_bytes(inode, nbytes); +update_inode: ret = btrfs_update_inode(trans, root, inode); out: if (inode) @@ -1322,8 +1327,9 @@ static noinline int add_inode_ref(struct btrfs_trans_handle *trans, } /* insert our name */ - ret = btrfs_add_link(trans, dir, inode, name, namelen, - 0, ref_index); + ret = btrfs_add_link(trans, BTRFS_I(dir), + BTRFS_I(inode), + name, namelen, 0, ref_index); if (ret) goto out; @@ -1641,7 +1647,8 @@ static noinline int insert_one_name(struct btrfs_trans_handle *trans, return -EIO; } - ret = btrfs_add_link(trans, dir, inode, name, name_len, 1, index); + ret = btrfs_add_link(trans, BTRFS_I(dir), BTRFS_I(inode), name, + name_len, 1, index); /* FIXME, put inode into FIXUP list */ @@ -1780,7 +1787,7 @@ static noinline int replay_one_name(struct btrfs_trans_handle *trans, out: btrfs_release_path(path); if (!ret && update_size) { - btrfs_i_size_write(dir, dir->i_size + name_len * 2); + btrfs_i_size_write(BTRFS_I(dir), dir->i_size + name_len * 2); ret = btrfs_update_inode(trans, root, dir); } kfree(name); @@ -5045,14 +5052,14 @@ static bool btrfs_must_commit_transaction(struct btrfs_trans_handle *trans, * a full commit is required. */ static noinline int check_parent_dirs_for_sync(struct btrfs_trans_handle *trans, - struct inode *inode, + struct btrfs_inode *inode, struct dentry *parent, struct super_block *sb, u64 last_committed) { int ret = 0; struct dentry *old_parent = NULL; - struct inode *orig_inode = inode; + struct btrfs_inode *orig_inode = inode; /* * for regular files, if its inode is already on disk, we don't @@ -5060,15 +5067,15 @@ static noinline int check_parent_dirs_for_sync(struct btrfs_trans_handle *trans, * we can use the last_unlink_trans field to record renames * and other fun in this file. */ - if (S_ISREG(inode->i_mode) && - BTRFS_I(inode)->generation <= last_committed && - BTRFS_I(inode)->last_unlink_trans <= last_committed) - goto out; + if (S_ISREG(inode->vfs_inode.i_mode) && + inode->generation <= last_committed && + inode->last_unlink_trans <= last_committed) + goto out; - if (!S_ISDIR(inode->i_mode)) { + if (!S_ISDIR(inode->vfs_inode.i_mode)) { if (!parent || d_really_is_negative(parent) || sb != parent->d_sb) goto out; - inode = d_inode(parent); + inode = BTRFS_I(d_inode(parent)); } while (1) { @@ -5079,10 +5086,10 @@ static noinline int check_parent_dirs_for_sync(struct btrfs_trans_handle *trans, * think this inode has already been logged. */ if (inode != orig_inode) - BTRFS_I(inode)->logged_trans = trans->transid; + inode->logged_trans = trans->transid; smp_mb(); - if (btrfs_must_commit_transaction(trans, BTRFS_I(inode))) { + if (btrfs_must_commit_transaction(trans, inode)) { ret = 1; break; } @@ -5091,8 +5098,8 @@ static noinline int check_parent_dirs_for_sync(struct btrfs_trans_handle *trans, break; if (IS_ROOT(parent)) { - inode = d_inode(parent); - if (btrfs_must_commit_transaction(trans, BTRFS_I(inode))) + inode = BTRFS_I(d_inode(parent)); + if (btrfs_must_commit_transaction(trans, inode)) ret = 1; break; } @@ -5100,7 +5107,7 @@ static noinline int check_parent_dirs_for_sync(struct btrfs_trans_handle *trans, parent = dget_parent(parent); dput(old_parent); old_parent = parent; - inode = d_inode(parent); + inode = BTRFS_I(d_inode(parent)); } dput(old_parent); @@ -5287,15 +5294,15 @@ next_dir_inode: } static int btrfs_log_all_parents(struct btrfs_trans_handle *trans, - struct inode *inode, + struct btrfs_inode *inode, struct btrfs_log_ctx *ctx) { - struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb); + struct btrfs_fs_info *fs_info = btrfs_sb(inode->vfs_inode.i_sb); int ret; struct btrfs_path *path; struct btrfs_key key; - struct btrfs_root *root = BTRFS_I(inode)->root; - const u64 ino = btrfs_ino(BTRFS_I(inode)); + struct btrfs_root *root = inode->root; + const u64 ino = btrfs_ino(inode); path = btrfs_alloc_path(); if (!path) @@ -5390,7 +5397,8 @@ out: * the last committed transaction */ static int btrfs_log_inode_parent(struct btrfs_trans_handle *trans, - struct btrfs_root *root, struct inode *inode, + struct btrfs_root *root, + struct btrfs_inode *inode, struct dentry *parent, const loff_t start, const loff_t end, @@ -5404,9 +5412,9 @@ static int btrfs_log_inode_parent(struct btrfs_trans_handle *trans, int ret = 0; u64 last_committed = fs_info->last_trans_committed; bool log_dentries = false; - struct inode *orig_inode = inode; + struct btrfs_inode *orig_inode = inode; - sb = inode->i_sb; + sb = inode->vfs_inode.i_sb; if (btrfs_test_opt(fs_info, NOTREELOG)) { ret = 1; @@ -5423,18 +5431,17 @@ static int btrfs_log_inode_parent(struct btrfs_trans_handle *trans, goto end_no_trans; } - if (root != BTRFS_I(inode)->root || - btrfs_root_refs(&root->root_item) == 0) { + if (root != inode->root || btrfs_root_refs(&root->root_item) == 0) { ret = 1; goto end_no_trans; } - ret = check_parent_dirs_for_sync(trans, inode, parent, - sb, last_committed); + ret = check_parent_dirs_for_sync(trans, inode, parent, sb, + last_committed); if (ret) goto end_no_trans; - if (btrfs_inode_in_log(BTRFS_I(inode), trans->transid)) { + if (btrfs_inode_in_log(inode, trans->transid)) { ret = BTRFS_NO_LOG_SYNC; goto end_no_trans; } @@ -5443,8 +5450,7 @@ static int btrfs_log_inode_parent(struct btrfs_trans_handle *trans, if (ret) goto end_no_trans; - ret = btrfs_log_inode(trans, root, BTRFS_I(inode), inode_only, - start, end, ctx); + ret = btrfs_log_inode(trans, root, inode, inode_only, start, end, ctx); if (ret) goto end_trans; @@ -5454,14 +5460,14 @@ static int btrfs_log_inode_parent(struct btrfs_trans_handle *trans, * we can use the last_unlink_trans field to record renames * and other fun in this file. */ - if (S_ISREG(inode->i_mode) && - BTRFS_I(inode)->generation <= last_committed && - BTRFS_I(inode)->last_unlink_trans <= last_committed) { + if (S_ISREG(inode->vfs_inode.i_mode) && + inode->generation <= last_committed && + inode->last_unlink_trans <= last_committed) { ret = 0; goto end_trans; } - if (S_ISDIR(inode->i_mode) && ctx && ctx->log_new_dentries) + if (S_ISDIR(inode->vfs_inode.i_mode) && ctx && ctx->log_new_dentries) log_dentries = true; /* @@ -5505,7 +5511,7 @@ static int btrfs_log_inode_parent(struct btrfs_trans_handle *trans, * but the file inode does not have a matching BTRFS_INODE_REF_KEY item * and has a link count of 2. */ - if (BTRFS_I(inode)->last_unlink_trans > last_committed) { + if (inode->last_unlink_trans > last_committed) { ret = btrfs_log_all_parents(trans, orig_inode, ctx); if (ret) goto end_trans; @@ -5515,14 +5521,13 @@ static int btrfs_log_inode_parent(struct btrfs_trans_handle *trans, if (!parent || d_really_is_negative(parent) || sb != parent->d_sb) break; - inode = d_inode(parent); - if (root != BTRFS_I(inode)->root) + inode = BTRFS_I(d_inode(parent)); + if (root != inode->root) break; - if (BTRFS_I(inode)->generation > last_committed) { - ret = btrfs_log_inode(trans, root, BTRFS_I(inode), - LOG_INODE_EXISTS, - 0, LLONG_MAX, ctx); + if (inode->generation > last_committed) { + ret = btrfs_log_inode(trans, root, inode, + LOG_INODE_EXISTS, 0, LLONG_MAX, ctx); if (ret) goto end_trans; } @@ -5534,7 +5539,7 @@ static int btrfs_log_inode_parent(struct btrfs_trans_handle *trans, old_parent = parent; } if (log_dentries) - ret = log_new_dir_dentries(trans, root, BTRFS_I(orig_inode), ctx); + ret = log_new_dir_dentries(trans, root, orig_inode, ctx); else ret = 0; end_trans: @@ -5566,8 +5571,8 @@ int btrfs_log_dentry_safe(struct btrfs_trans_handle *trans, struct dentry *parent = dget_parent(dentry); int ret; - ret = btrfs_log_inode_parent(trans, root, d_inode(dentry), parent, - start, end, 0, ctx); + ret = btrfs_log_inode_parent(trans, root, BTRFS_I(d_inode(dentry)), + parent, start, end, 0, ctx); dput(parent); return ret; @@ -5829,7 +5834,7 @@ int btrfs_log_new_name(struct btrfs_trans_handle *trans, (!old_dir || old_dir->logged_trans <= fs_info->last_trans_committed)) return 0; - return btrfs_log_inode_parent(trans, root, &inode->vfs_inode, parent, 0, + return btrfs_log_inode_parent(trans, root, inode, parent, 0, LLONG_MAX, 1, NULL); } diff --git a/fs/btrfs/volumes.c b/fs/btrfs/volumes.c index 13e55d13045d..73d56eef5e60 100644 --- a/fs/btrfs/volumes.c +++ b/fs/btrfs/volumes.c @@ -1725,7 +1725,7 @@ out: * Function to update ctime/mtime for a given device path. * Mainly used for ctime/mtime based probe like libblkid. */ -static void update_dev_time(char *path_name) +static void update_dev_time(const char *path_name) { struct file *filp; @@ -1851,7 +1851,8 @@ void btrfs_assign_next_active_device(struct btrfs_fs_info *fs_info, fs_info->fs_devices->latest_bdev = next_device->bdev; } -int btrfs_rm_device(struct btrfs_fs_info *fs_info, char *device_path, u64 devid) +int btrfs_rm_device(struct btrfs_fs_info *fs_info, const char *device_path, + u64 devid) { struct btrfs_device *device; struct btrfs_fs_devices *cur_devices; @@ -2091,7 +2092,7 @@ void btrfs_destroy_dev_replace_tgtdev(struct btrfs_fs_info *fs_info, } static int btrfs_find_device_by_path(struct btrfs_fs_info *fs_info, - char *device_path, + const char *device_path, struct btrfs_device **device) { int ret = 0; @@ -2118,7 +2119,7 @@ static int btrfs_find_device_by_path(struct btrfs_fs_info *fs_info, } int btrfs_find_device_missing_or_by_path(struct btrfs_fs_info *fs_info, - char *device_path, + const char *device_path, struct btrfs_device **device) { *device = NULL; @@ -2151,7 +2152,8 @@ int btrfs_find_device_missing_or_by_path(struct btrfs_fs_info *fs_info, * Lookup a device given by device id, or the path if the id is 0. */ int btrfs_find_device_by_devspec(struct btrfs_fs_info *fs_info, u64 devid, - char *devpath, struct btrfs_device **device) + const char *devpath, + struct btrfs_device **device) { int ret; @@ -2307,7 +2309,7 @@ error: return ret; } -int btrfs_init_new_device(struct btrfs_fs_info *fs_info, char *device_path) +int btrfs_init_new_device(struct btrfs_fs_info *fs_info, const char *device_path) { struct btrfs_root *root = fs_info->dev_root; struct request_queue *q; @@ -2515,7 +2517,7 @@ error: } int btrfs_init_dev_replace_tgtdev(struct btrfs_fs_info *fs_info, - char *device_path, + const char *device_path, struct btrfs_device *srcdev, struct btrfs_device **device_out) { @@ -6954,7 +6956,8 @@ static int update_dev_stat_item(struct btrfs_trans_handle *trans, key.offset = device->devid; path = btrfs_alloc_path(); - BUG_ON(!path); + if (!path) + return -ENOMEM; ret = btrfs_search_slot(trans, dev_root, &key, path, -1, 1); if (ret < 0) { btrfs_warn_in_rcu(fs_info, @@ -7102,7 +7105,7 @@ int btrfs_get_dev_stats(struct btrfs_fs_info *fs_info, return 0; } -void btrfs_scratch_superblocks(struct block_device *bdev, char *device_path) +void btrfs_scratch_superblocks(struct block_device *bdev, const char *device_path) { struct buffer_head *bh; struct btrfs_super_block *disk_super; diff --git a/fs/btrfs/volumes.h b/fs/btrfs/volumes.h index 24ba6bc3ec34..59be81206dd7 100644 --- a/fs/btrfs/volumes.h +++ b/fs/btrfs/volumes.h @@ -422,16 +422,16 @@ void btrfs_close_extra_devices(struct btrfs_fs_devices *fs_devices, int step); void btrfs_assign_next_active_device(struct btrfs_fs_info *fs_info, struct btrfs_device *device, struct btrfs_device *this_dev); int btrfs_find_device_missing_or_by_path(struct btrfs_fs_info *fs_info, - char *device_path, + const char *device_path, struct btrfs_device **device); int btrfs_find_device_by_devspec(struct btrfs_fs_info *fs_info, u64 devid, - char *devpath, + const char *devpath, struct btrfs_device **device); struct btrfs_device *btrfs_alloc_device(struct btrfs_fs_info *fs_info, const u64 *devid, const u8 *uuid); int btrfs_rm_device(struct btrfs_fs_info *fs_info, - char *device_path, u64 devid); + const char *device_path, u64 devid); void btrfs_cleanup_fs_uuids(void); int btrfs_num_copies(struct btrfs_fs_info *fs_info, u64 logical, u64 len); int btrfs_grow_device(struct btrfs_trans_handle *trans, @@ -439,9 +439,9 @@ int btrfs_grow_device(struct btrfs_trans_handle *trans, struct btrfs_device *btrfs_find_device(struct btrfs_fs_info *fs_info, u64 devid, u8 *uuid, u8 *fsid); int btrfs_shrink_device(struct btrfs_device *device, u64 new_size); -int btrfs_init_new_device(struct btrfs_fs_info *fs_info, char *path); +int btrfs_init_new_device(struct btrfs_fs_info *fs_info, const char *path); int btrfs_init_dev_replace_tgtdev(struct btrfs_fs_info *fs_info, - char *device_path, + const char *device_path, struct btrfs_device *srcdev, struct btrfs_device **device_out); int btrfs_balance(struct btrfs_balance_control *bctl, @@ -474,7 +474,7 @@ void btrfs_destroy_dev_replace_tgtdev(struct btrfs_fs_info *fs_info, struct btrfs_device *tgtdev); void btrfs_init_dev_replace_tgtdev_for_resume(struct btrfs_fs_info *fs_info, struct btrfs_device *tgtdev); -void btrfs_scratch_superblocks(struct block_device *bdev, char *device_path); +void btrfs_scratch_superblocks(struct block_device *bdev, const char *device_path); int btrfs_is_parity_mirror(struct btrfs_mapping_tree *map_tree, u64 logical, u64 len, int mirror_num); unsigned long btrfs_full_stripe_len(struct btrfs_fs_info *fs_info, diff --git a/fs/btrfs/zlib.c b/fs/btrfs/zlib.c index da497f184ff4..135b10823c6d 100644 --- a/fs/btrfs/zlib.c +++ b/fs/btrfs/zlib.c @@ -73,13 +73,11 @@ fail: static int zlib_compress_pages(struct list_head *ws, struct address_space *mapping, - u64 start, unsigned long len, + u64 start, struct page **pages, - unsigned long nr_dest_pages, unsigned long *out_pages, unsigned long *total_in, - unsigned long *total_out, - unsigned long max_out) + unsigned long *total_out) { struct workspace *workspace = list_entry(ws, struct workspace, list); int ret; @@ -89,6 +87,9 @@ static int zlib_compress_pages(struct list_head *ws, struct page *in_page = NULL; struct page *out_page = NULL; unsigned long bytes_left; + unsigned long len = *total_out; + unsigned long nr_dest_pages = *out_pages; + const unsigned long max_out = nr_dest_pages * PAGE_SIZE; *out_pages = 0; *total_out = 0; diff --git a/fs/cifs/connect.c b/fs/cifs/connect.c index 9bf25be05636..3aa457f83214 100644 --- a/fs/cifs/connect.c +++ b/fs/cifs/connect.c @@ -2456,7 +2456,7 @@ cifs_set_cifscreds(struct smb_vol *vol, struct cifs_ses *ses) } down_read(&key->sem); - upayload = user_key_payload(key); + upayload = user_key_payload_locked(key); if (IS_ERR_OR_NULL(upayload)) { rc = upayload ? PTR_ERR(upayload) : -EINVAL; goto out_key_put; diff --git a/fs/coda/file.c b/fs/coda/file.c index 6e0154eb6fcc..9d956cd6d46f 100644 --- a/fs/coda/file.c +++ b/fs/coda/file.c @@ -96,7 +96,7 @@ coda_file_mmap(struct file *coda_file, struct vm_area_struct *vma) cfi->cfi_mapcount++; spin_unlock(&cii->c_lock); - return host_file->f_op->mmap(host_file, vma); + return call_mmap(host_file, vma); } int coda_open(struct inode *coda_inode, struct file *coda_file) diff --git a/fs/crypto/keyinfo.c b/fs/crypto/keyinfo.c index 02eb6b9e4438..d5d896fa5a71 100644 --- a/fs/crypto/keyinfo.c +++ b/fs/crypto/keyinfo.c @@ -103,7 +103,7 @@ static int validate_user_key(struct fscrypt_info *crypt_info, goto out; } down_read(&keyring_key->sem); - ukp = user_key_payload(keyring_key); + ukp = user_key_payload_locked(keyring_key); if (ukp->datalen != sizeof(struct fscrypt_key)) { res = -EINVAL; up_read(&keyring_key->sem); diff --git a/fs/ecryptfs/ecryptfs_kernel.h b/fs/ecryptfs/ecryptfs_kernel.h index 599a29237cfe..95c1c8d34539 100644 --- a/fs/ecryptfs/ecryptfs_kernel.h +++ b/fs/ecryptfs/ecryptfs_kernel.h @@ -117,7 +117,7 @@ ecryptfs_get_key_payload_data(struct key *key) auth_tok = ecryptfs_get_encrypted_key_payload_data(key); if (!auth_tok) - return (struct ecryptfs_auth_tok *)user_key_payload(key)->data; + return (struct ecryptfs_auth_tok *)user_key_payload_locked(key)->data; else return auth_tok; } diff --git a/fs/fscache/object-list.c b/fs/fscache/object-list.c index 5d5ddaa84b21..67f940892ef8 100644 --- a/fs/fscache/object-list.c +++ b/fs/fscache/object-list.c @@ -329,7 +329,7 @@ static void fscache_objlist_config(struct fscache_objlist_data *data) config = 0; rcu_read_lock(); - confkey = user_key_payload(key); + confkey = user_key_payload_rcu(key); buf = confkey->data; for (len = confkey->datalen - 1; len >= 0; len--) { diff --git a/fs/namei.c b/fs/namei.c index da689c9c005e..d41fab78798b 100644 --- a/fs/namei.c +++ b/fs/namei.c @@ -672,17 +672,15 @@ static bool legitimize_links(struct nameidata *nd) /** * unlazy_walk - try to switch to ref-walk mode. * @nd: nameidata pathwalk data - * @dentry: child of nd->path.dentry or NULL - * @seq: seq number to check dentry against * Returns: 0 on success, -ECHILD on failure * - * unlazy_walk attempts to legitimize the current nd->path, nd->root and dentry - * for ref-walk mode. @dentry must be a path found by a do_lookup call on - * @nd or NULL. Must be called from rcu-walk context. + * unlazy_walk attempts to legitimize the current nd->path and nd->root + * for ref-walk mode. + * Must be called from rcu-walk context. * Nothing should touch nameidata between unlazy_walk() failure and * terminate_walk(). */ -static int unlazy_walk(struct nameidata *nd, struct dentry *dentry, unsigned seq) +static int unlazy_walk(struct nameidata *nd) { struct dentry *parent = nd->path.dentry; @@ -691,33 +689,66 @@ static int unlazy_walk(struct nameidata *nd, struct dentry *dentry, unsigned seq nd->flags &= ~LOOKUP_RCU; if (unlikely(!legitimize_links(nd))) goto out2; + if (unlikely(!legitimize_path(nd, &nd->path, nd->seq))) + goto out1; + if (nd->root.mnt && !(nd->flags & LOOKUP_ROOT)) { + if (unlikely(!legitimize_path(nd, &nd->root, nd->root_seq))) + goto out; + } + rcu_read_unlock(); + BUG_ON(nd->inode != parent->d_inode); + return 0; + +out2: + nd->path.mnt = NULL; + nd->path.dentry = NULL; +out1: + if (!(nd->flags & LOOKUP_ROOT)) + nd->root.mnt = NULL; +out: + rcu_read_unlock(); + return -ECHILD; +} + +/** + * unlazy_child - try to switch to ref-walk mode. + * @nd: nameidata pathwalk data + * @dentry: child of nd->path.dentry + * @seq: seq number to check dentry against + * Returns: 0 on success, -ECHILD on failure + * + * unlazy_child attempts to legitimize the current nd->path, nd->root and dentry + * for ref-walk mode. @dentry must be a path found by a do_lookup call on + * @nd. Must be called from rcu-walk context. + * Nothing should touch nameidata between unlazy_child() failure and + * terminate_walk(). + */ +static int unlazy_child(struct nameidata *nd, struct dentry *dentry, unsigned seq) +{ + BUG_ON(!(nd->flags & LOOKUP_RCU)); + + nd->flags &= ~LOOKUP_RCU; + if (unlikely(!legitimize_links(nd))) + goto out2; if (unlikely(!legitimize_mnt(nd->path.mnt, nd->m_seq))) goto out2; - if (unlikely(!lockref_get_not_dead(&parent->d_lockref))) + if (unlikely(!lockref_get_not_dead(&nd->path.dentry->d_lockref))) goto out1; /* - * For a negative lookup, the lookup sequence point is the parents - * sequence point, and it only needs to revalidate the parent dentry. - * - * For a positive lookup, we need to move both the parent and the - * dentry from the RCU domain to be properly refcounted. And the - * sequence number in the dentry validates *both* dentry counters, - * since we checked the sequence number of the parent after we got - * the child sequence number. So we know the parent must still - * be valid if the child sequence number is still valid. + * We need to move both the parent and the dentry from the RCU domain + * to be properly refcounted. And the sequence number in the dentry + * validates *both* dentry counters, since we checked the sequence + * number of the parent after we got the child sequence number. So we + * know the parent must still be valid if the child sequence number is */ - if (!dentry) { - if (read_seqcount_retry(&parent->d_seq, nd->seq)) - goto out; - BUG_ON(nd->inode != parent->d_inode); - } else { - if (!lockref_get_not_dead(&dentry->d_lockref)) - goto out; - if (read_seqcount_retry(&dentry->d_seq, seq)) - goto drop_dentry; + if (unlikely(!lockref_get_not_dead(&dentry->d_lockref))) + goto out; + if (unlikely(read_seqcount_retry(&dentry->d_seq, seq))) { + rcu_read_unlock(); + dput(dentry); + goto drop_root_mnt; } - /* * Sequence counts matched. Now make sure that the root is * still valid and get it if required. @@ -733,10 +764,6 @@ static int unlazy_walk(struct nameidata *nd, struct dentry *dentry, unsigned seq rcu_read_unlock(); return 0; -drop_dentry: - rcu_read_unlock(); - dput(dentry); - goto drop_root_mnt; out2: nd->path.mnt = NULL; out1: @@ -749,27 +776,12 @@ drop_root_mnt: return -ECHILD; } -static int unlazy_link(struct nameidata *nd, struct path *link, unsigned seq) -{ - if (unlikely(!legitimize_path(nd, link, seq))) { - drop_links(nd); - nd->depth = 0; - nd->flags &= ~LOOKUP_RCU; - nd->path.mnt = NULL; - nd->path.dentry = NULL; - if (!(nd->flags & LOOKUP_ROOT)) - nd->root.mnt = NULL; - rcu_read_unlock(); - } else if (likely(unlazy_walk(nd, NULL, 0)) == 0) { - return 0; - } - path_put(link); - return -ECHILD; -} - static inline int d_revalidate(struct dentry *dentry, unsigned int flags) { - return dentry->d_op->d_revalidate(dentry, flags); + if (unlikely(dentry->d_flags & DCACHE_OP_REVALIDATE)) + return dentry->d_op->d_revalidate(dentry, flags); + else + return 1; } /** @@ -790,7 +802,7 @@ static int complete_walk(struct nameidata *nd) if (nd->flags & LOOKUP_RCU) { if (!(nd->flags & LOOKUP_ROOT)) nd->root.mnt = NULL; - if (unlikely(unlazy_walk(nd, NULL, 0))) + if (unlikely(unlazy_walk(nd))) return -ECHILD; } @@ -1016,7 +1028,7 @@ const char *get_link(struct nameidata *nd) touch_atime(&last->link); cond_resched(); } else if (atime_needs_update_rcu(&last->link, inode)) { - if (unlikely(unlazy_walk(nd, NULL, 0))) + if (unlikely(unlazy_walk(nd))) return ERR_PTR(-ECHILD); touch_atime(&last->link); } @@ -1035,7 +1047,7 @@ const char *get_link(struct nameidata *nd) if (nd->flags & LOOKUP_RCU) { res = get(NULL, inode, &last->done); if (res == ERR_PTR(-ECHILD)) { - if (unlikely(unlazy_walk(nd, NULL, 0))) + if (unlikely(unlazy_walk(nd))) return ERR_PTR(-ECHILD); res = get(dentry, inode, &last->done); } @@ -1469,19 +1481,14 @@ static struct dentry *lookup_dcache(const struct qstr *name, struct dentry *dir, unsigned int flags) { - struct dentry *dentry; - int error; - - dentry = d_lookup(dir, name); + struct dentry *dentry = d_lookup(dir, name); if (dentry) { - if (dentry->d_flags & DCACHE_OP_REVALIDATE) { - error = d_revalidate(dentry, flags); - if (unlikely(error <= 0)) { - if (!error) - d_invalidate(dentry); - dput(dentry); - return ERR_PTR(error); - } + int error = d_revalidate(dentry, flags); + if (unlikely(error <= 0)) { + if (!error) + d_invalidate(dentry); + dput(dentry); + return ERR_PTR(error); } } return dentry; @@ -1546,7 +1553,7 @@ static int lookup_fast(struct nameidata *nd, bool negative; dentry = __d_lookup_rcu(parent, &nd->last, &seq); if (unlikely(!dentry)) { - if (unlazy_walk(nd, NULL, 0)) + if (unlazy_walk(nd)) return -ECHILD; return 0; } @@ -1571,14 +1578,8 @@ static int lookup_fast(struct nameidata *nd, return -ECHILD; *seqp = seq; - if (unlikely(dentry->d_flags & DCACHE_OP_REVALIDATE)) - status = d_revalidate(dentry, nd->flags); - if (unlikely(status <= 0)) { - if (unlazy_walk(nd, dentry, seq)) - return -ECHILD; - if (status == -ECHILD) - status = d_revalidate(dentry, nd->flags); - } else { + status = d_revalidate(dentry, nd->flags); + if (likely(status > 0)) { /* * Note: do negative dentry check after revalidation in * case that drops it. @@ -1589,15 +1590,17 @@ static int lookup_fast(struct nameidata *nd, path->dentry = dentry; if (likely(__follow_mount_rcu(nd, path, inode, seqp))) return 1; - if (unlazy_walk(nd, dentry, seq)) - return -ECHILD; } + if (unlazy_child(nd, dentry, seq)) + return -ECHILD; + if (unlikely(status == -ECHILD)) + /* we'd been told to redo it in non-rcu mode */ + status = d_revalidate(dentry, nd->flags); } else { dentry = __d_lookup(parent, &nd->last); if (unlikely(!dentry)) return 0; - if (unlikely(dentry->d_flags & DCACHE_OP_REVALIDATE)) - status = d_revalidate(dentry, nd->flags); + status = d_revalidate(dentry, nd->flags); } if (unlikely(status <= 0)) { if (!status) @@ -1636,8 +1639,7 @@ again: if (IS_ERR(dentry)) goto out; if (unlikely(!d_in_lookup(dentry))) { - if ((dentry->d_flags & DCACHE_OP_REVALIDATE) && - !(flags & LOOKUP_NO_REVAL)) { + if (!(flags & LOOKUP_NO_REVAL)) { int error = d_revalidate(dentry, flags); if (unlikely(error <= 0)) { if (!error) { @@ -1668,7 +1670,7 @@ static inline int may_lookup(struct nameidata *nd) int err = inode_permission(nd->inode, MAY_EXEC|MAY_NOT_BLOCK); if (err != -ECHILD) return err; - if (unlazy_walk(nd, NULL, 0)) + if (unlazy_walk(nd)) return -ECHILD; } return inode_permission(nd->inode, MAY_EXEC); @@ -1703,9 +1705,17 @@ static int pick_link(struct nameidata *nd, struct path *link, error = nd_alloc_stack(nd); if (unlikely(error)) { if (error == -ECHILD) { - if (unlikely(unlazy_link(nd, link, seq))) - return -ECHILD; - error = nd_alloc_stack(nd); + if (unlikely(!legitimize_path(nd, link, seq))) { + drop_links(nd); + nd->depth = 0; + nd->flags &= ~LOOKUP_RCU; + nd->path.mnt = NULL; + nd->path.dentry = NULL; + if (!(nd->flags & LOOKUP_ROOT)) + nd->root.mnt = NULL; + rcu_read_unlock(); + } else if (likely(unlazy_walk(nd)) == 0) + error = nd_alloc_stack(nd); } if (error) { path_put(link); @@ -2122,7 +2132,7 @@ OK: } if (unlikely(!d_can_lookup(nd->path.dentry))) { if (nd->flags & LOOKUP_RCU) { - if (unlazy_walk(nd, NULL, 0)) + if (unlazy_walk(nd)) return -ECHILD; } return -ENOTDIR; @@ -2579,7 +2589,7 @@ mountpoint_last(struct nameidata *nd) /* If we're in rcuwalk, drop out of it to handle last component */ if (nd->flags & LOOKUP_RCU) { - if (unlazy_walk(nd, NULL, 0)) + if (unlazy_walk(nd)) return -ECHILD; } @@ -3072,9 +3082,6 @@ static int lookup_open(struct nameidata *nd, struct path *path, if (d_in_lookup(dentry)) break; - if (!(dentry->d_flags & DCACHE_OP_REVALIDATE)) - break; - error = d_revalidate(dentry, nd->flags); if (likely(error > 0)) break; @@ -3356,13 +3363,50 @@ out: return error; } +struct dentry *vfs_tmpfile(struct dentry *dentry, umode_t mode, int open_flag) +{ + static const struct qstr name = QSTR_INIT("/", 1); + struct dentry *child = NULL; + struct inode *dir = dentry->d_inode; + struct inode *inode; + int error; + + /* we want directory to be writable */ + error = inode_permission(dir, MAY_WRITE | MAY_EXEC); + if (error) + goto out_err; + error = -EOPNOTSUPP; + if (!dir->i_op->tmpfile) + goto out_err; + error = -ENOMEM; + child = d_alloc(dentry, &name); + if (unlikely(!child)) + goto out_err; + error = dir->i_op->tmpfile(dir, child, mode); + if (error) + goto out_err; + error = -ENOENT; + inode = child->d_inode; + if (unlikely(!inode)) + goto out_err; + if (!(open_flag & O_EXCL)) { + spin_lock(&inode->i_lock); + inode->i_state |= I_LINKABLE; + spin_unlock(&inode->i_lock); + } + return child; + +out_err: + dput(child); + return ERR_PTR(error); +} +EXPORT_SYMBOL(vfs_tmpfile); + static int do_tmpfile(struct nameidata *nd, unsigned flags, const struct open_flags *op, struct file *file, int *opened) { - static const struct qstr name = QSTR_INIT("/", 1); struct dentry *child; - struct inode *dir; struct path path; int error = path_lookupat(nd, flags | LOOKUP_DIRECTORY, &path); if (unlikely(error)) @@ -3370,25 +3414,12 @@ static int do_tmpfile(struct nameidata *nd, unsigned flags, error = mnt_want_write(path.mnt); if (unlikely(error)) goto out; - dir = path.dentry->d_inode; - /* we want directory to be writable */ - error = inode_permission(dir, MAY_WRITE | MAY_EXEC); - if (error) + child = vfs_tmpfile(path.dentry, op->mode, op->open_flag); + error = PTR_ERR(child); + if (unlikely(IS_ERR(child))) goto out2; - if (!dir->i_op->tmpfile) { - error = -EOPNOTSUPP; - goto out2; - } - child = d_alloc(path.dentry, &name); - if (unlikely(!child)) { - error = -ENOMEM; - goto out2; - } dput(path.dentry); path.dentry = child; - error = dir->i_op->tmpfile(dir, child, op->mode); - if (error) - goto out2; audit_inode(nd->name, child, 0); /* Don't check for other permissions, the inode was just created */ error = may_open(&path, 0, op->open_flag); @@ -3399,14 +3430,8 @@ static int do_tmpfile(struct nameidata *nd, unsigned flags, if (error) goto out2; error = open_check_o_direct(file); - if (error) { + if (error) fput(file); - } else if (!(op->open_flag & O_EXCL)) { - struct inode *inode = file_inode(file); - spin_lock(&inode->i_lock); - inode->i_state |= I_LINKABLE; - spin_unlock(&inode->i_lock); - } out2: mnt_drop_write(path.mnt); out: diff --git a/fs/ncpfs/sock.c b/fs/ncpfs/sock.c index bdea177aa405..98b6db0ed63e 100644 --- a/fs/ncpfs/sock.c +++ b/fs/ncpfs/sock.c @@ -41,19 +41,12 @@ static int _recv(struct socket *sock, void *buf, int size, unsigned flags) return kernel_recvmsg(sock, &msg, &iov, 1, size, flags); } -static inline int do_send(struct socket *sock, struct kvec *vec, int count, - int len, unsigned flags) -{ - struct msghdr msg = { .msg_flags = flags }; - return kernel_sendmsg(sock, &msg, vec, count, len); -} - static int _send(struct socket *sock, const void *buff, int len) { - struct kvec vec; - vec.iov_base = (void *) buff; - vec.iov_len = len; - return do_send(sock, &vec, 1, len, 0); + struct msghdr msg = { .msg_flags = 0 }; + struct kvec vec = {.iov_base = (void *)buff, .iov_len = len}; + iov_iter_kvec(&msg.msg_iter, WRITE | ITER_KVEC, &vec, 1, len); + return sock_sendmsg(sock, &msg); } struct ncp_request_reply { @@ -64,9 +57,7 @@ struct ncp_request_reply { size_t datalen; int result; enum { RQ_DONE, RQ_INPROGRESS, RQ_QUEUED, RQ_IDLE, RQ_ABANDONED } status; - struct kvec* tx_ciov; - size_t tx_totallen; - size_t tx_iovlen; + struct iov_iter from; struct kvec tx_iov[3]; u_int16_t tx_type; u_int32_t sign[6]; @@ -206,28 +197,22 @@ static inline void __ncptcp_abort(struct ncp_server *server) static int ncpdgram_send(struct socket *sock, struct ncp_request_reply *req) { - struct kvec vec[3]; - /* sock_sendmsg updates iov pointers for us :-( */ - memcpy(vec, req->tx_ciov, req->tx_iovlen * sizeof(vec[0])); - return do_send(sock, vec, req->tx_iovlen, - req->tx_totallen, MSG_DONTWAIT); + struct msghdr msg = { .msg_iter = req->from, .msg_flags = MSG_DONTWAIT }; + return sock_sendmsg(sock, &msg); } static void __ncptcp_try_send(struct ncp_server *server) { struct ncp_request_reply *rq; - struct kvec *iov; - struct kvec iovc[3]; + struct msghdr msg = { .msg_flags = MSG_NOSIGNAL | MSG_DONTWAIT }; int result; rq = server->tx.creq; if (!rq) return; - /* sock_sendmsg updates iov pointers for us :-( */ - memcpy(iovc, rq->tx_ciov, rq->tx_iovlen * sizeof(iov[0])); - result = do_send(server->ncp_sock, iovc, rq->tx_iovlen, - rq->tx_totallen, MSG_NOSIGNAL | MSG_DONTWAIT); + msg.msg_iter = rq->from; + result = sock_sendmsg(server->ncp_sock, &msg); if (result == -EAGAIN) return; @@ -237,21 +222,12 @@ static void __ncptcp_try_send(struct ncp_server *server) __ncp_abort_request(server, rq, result); return; } - if (result >= rq->tx_totallen) { + if (!msg_data_left(&msg)) { server->rcv.creq = rq; server->tx.creq = NULL; return; } - rq->tx_totallen -= result; - iov = rq->tx_ciov; - while (iov->iov_len <= result) { - result -= iov->iov_len; - iov++; - rq->tx_iovlen--; - } - iov->iov_base += result; - iov->iov_len -= result; - rq->tx_ciov = iov; + rq->from = msg.msg_iter; } static inline void ncp_init_header(struct ncp_server *server, struct ncp_request_reply *req, struct ncp_request_header *h) @@ -264,22 +240,21 @@ static inline void ncp_init_header(struct ncp_server *server, struct ncp_request static void ncpdgram_start_request(struct ncp_server *server, struct ncp_request_reply *req) { - size_t signlen; - struct ncp_request_header* h; + size_t signlen, len = req->tx_iov[1].iov_len; + struct ncp_request_header *h = req->tx_iov[1].iov_base; - req->tx_ciov = req->tx_iov + 1; - - h = req->tx_iov[1].iov_base; ncp_init_header(server, req, h); - signlen = sign_packet(server, req->tx_iov[1].iov_base + sizeof(struct ncp_request_header) - 1, - req->tx_iov[1].iov_len - sizeof(struct ncp_request_header) + 1, - cpu_to_le32(req->tx_totallen), req->sign); + signlen = sign_packet(server, + req->tx_iov[1].iov_base + sizeof(struct ncp_request_header) - 1, + len - sizeof(struct ncp_request_header) + 1, + cpu_to_le32(len), req->sign); if (signlen) { - req->tx_ciov[1].iov_base = req->sign; - req->tx_ciov[1].iov_len = signlen; - req->tx_iovlen += 1; - req->tx_totallen += signlen; + /* NCP over UDP appends signature */ + req->tx_iov[2].iov_base = req->sign; + req->tx_iov[2].iov_len = signlen; } + iov_iter_kvec(&req->from, WRITE | ITER_KVEC, + req->tx_iov + 1, signlen ? 2 : 1, len + signlen); server->rcv.creq = req; server->timeout_last = server->m.time_out; server->timeout_retries = server->m.retry_count; @@ -293,24 +268,23 @@ static void ncpdgram_start_request(struct ncp_server *server, struct ncp_request static void ncptcp_start_request(struct ncp_server *server, struct ncp_request_reply *req) { - size_t signlen; - struct ncp_request_header* h; + size_t signlen, len = req->tx_iov[1].iov_len; + struct ncp_request_header *h = req->tx_iov[1].iov_base; - req->tx_ciov = req->tx_iov; - h = req->tx_iov[1].iov_base; ncp_init_header(server, req, h); signlen = sign_packet(server, req->tx_iov[1].iov_base + sizeof(struct ncp_request_header) - 1, - req->tx_iov[1].iov_len - sizeof(struct ncp_request_header) + 1, - cpu_to_be32(req->tx_totallen + 24), req->sign + 4) + 16; + len - sizeof(struct ncp_request_header) + 1, + cpu_to_be32(len + 24), req->sign + 4) + 16; req->sign[0] = htonl(NCP_TCP_XMIT_MAGIC); - req->sign[1] = htonl(req->tx_totallen + signlen); + req->sign[1] = htonl(len + signlen); req->sign[2] = htonl(NCP_TCP_XMIT_VERSION); req->sign[3] = htonl(req->datalen + 8); + /* NCP over TCP prepends signature */ req->tx_iov[0].iov_base = req->sign; req->tx_iov[0].iov_len = signlen; - req->tx_iovlen += 1; - req->tx_totallen += signlen; + iov_iter_kvec(&req->from, WRITE | ITER_KVEC, + req->tx_iov, 2, len + signlen); server->tx.creq = req; __ncptcp_try_send(server); @@ -365,18 +339,17 @@ static void __ncp_next_request(struct ncp_server *server) static void info_server(struct ncp_server *server, unsigned int id, const void * data, size_t len) { if (server->info_sock) { - struct kvec iov[2]; - __be32 hdr[2]; - - hdr[0] = cpu_to_be32(len + 8); - hdr[1] = cpu_to_be32(id); - - iov[0].iov_base = hdr; - iov[0].iov_len = 8; - iov[1].iov_base = (void *) data; - iov[1].iov_len = len; + struct msghdr msg = { .msg_flags = MSG_NOSIGNAL }; + __be32 hdr[2] = {cpu_to_be32(len + 8), cpu_to_be32(id)}; + struct kvec iov[2] = { + {.iov_base = hdr, .iov_len = 8}, + {.iov_base = (void *)data, .iov_len = len}, + }; + + iov_iter_kvec(&msg.msg_iter, ITER_KVEC | WRITE, + iov, 2, len + 8); - do_send(server->info_sock, iov, 2, len + 8, MSG_NOSIGNAL); + sock_sendmsg(server->info_sock, &msg); } } @@ -712,8 +685,6 @@ static int do_ncp_rpc_call(struct ncp_server *server, int size, req->datalen = max_reply_size; req->tx_iov[1].iov_base = server->packet; req->tx_iov[1].iov_len = size; - req->tx_iovlen = 1; - req->tx_totallen = size; req->tx_type = *(u_int16_t*)server->packet; result = ncp_add_request(server, req); diff --git a/fs/nfs/nfs4idmap.c b/fs/nfs/nfs4idmap.c index c444285bb1b1..835c163f61af 100644 --- a/fs/nfs/nfs4idmap.c +++ b/fs/nfs/nfs4idmap.c @@ -316,7 +316,7 @@ static ssize_t nfs_idmap_get_key(const char *name, size_t namelen, if (ret < 0) goto out_up; - payload = user_key_payload(rkey); + payload = user_key_payload_rcu(rkey); if (IS_ERR_OR_NULL(payload)) { ret = PTR_ERR(payload); goto out_up; diff --git a/fs/open.c b/fs/open.c index 9921f70bc5ca..949cef29c3bb 100644 --- a/fs/open.c +++ b/fs/open.c @@ -301,12 +301,10 @@ int vfs_fallocate(struct file *file, int mode, loff_t offset, loff_t len) if (S_ISFIFO(inode->i_mode)) return -ESPIPE; - /* - * Let individual file system decide if it supports preallocation - * for directories or not. - */ - if (!S_ISREG(inode->i_mode) && !S_ISDIR(inode->i_mode) && - !S_ISBLK(inode->i_mode)) + if (S_ISDIR(inode->i_mode)) + return -EISDIR; + + if (!S_ISREG(inode->i_mode) && !S_ISBLK(inode->i_mode)) return -ENODEV; /* Check for wrap through zero too */ @@ -316,7 +314,7 @@ int vfs_fallocate(struct file *file, int mode, loff_t offset, loff_t len) if (!file->f_op->fallocate) return -EOPNOTSUPP; - sb_start_write(inode->i_sb); + file_start_write(file); ret = file->f_op->fallocate(file, mode, offset, len); /* @@ -329,7 +327,7 @@ int vfs_fallocate(struct file *file, int mode, loff_t offset, loff_t len) if (ret == 0) fsnotify_modify(file); - sb_end_write(inode->i_sb); + file_end_write(file); return ret; } EXPORT_SYMBOL_GPL(vfs_fallocate); diff --git a/fs/orangefs/super.c b/fs/orangefs/super.c index c48859f16e7b..67c24351a67f 100644 --- a/fs/orangefs/super.c +++ b/fs/orangefs/super.c @@ -115,6 +115,13 @@ static struct inode *orangefs_alloc_inode(struct super_block *sb) return &orangefs_inode->vfs_inode; } +static void orangefs_i_callback(struct rcu_head *head) +{ + struct inode *inode = container_of(head, struct inode, i_rcu); + struct orangefs_inode_s *orangefs_inode = ORANGEFS_I(inode); + kmem_cache_free(orangefs_inode_cache, orangefs_inode); +} + static void orangefs_destroy_inode(struct inode *inode) { struct orangefs_inode_s *orangefs_inode = ORANGEFS_I(inode); @@ -123,7 +130,7 @@ static void orangefs_destroy_inode(struct inode *inode) "%s: deallocated %p destroying inode %pU\n", __func__, orangefs_inode, get_khandle_from_ino(inode)); - kmem_cache_free(orangefs_inode_cache, orangefs_inode); + call_rcu(&inode->i_rcu, orangefs_i_callback); } /* diff --git a/fs/read_write.c b/fs/read_write.c index dc60075653a8..c4f88afbc67f 100644 --- a/fs/read_write.c +++ b/fs/read_write.c @@ -24,9 +24,6 @@ #include <linux/uaccess.h> #include <asm/unistd.h> -typedef ssize_t (*io_fn_t)(struct file *, char __user *, size_t, loff_t *); -typedef ssize_t (*iter_fn_t)(struct kiocb *, struct iov_iter *); - const struct file_operations generic_ro_fops = { .llseek = generic_file_llseek, .read_iter = generic_file_read_iter, @@ -371,7 +368,7 @@ ssize_t vfs_iter_read(struct file *file, struct iov_iter *iter, loff_t *ppos) kiocb.ki_pos = *ppos; iter->type |= READ; - ret = file->f_op->read_iter(&kiocb, iter); + ret = call_read_iter(file, &kiocb, iter); BUG_ON(ret == -EIOCBQUEUED); if (ret > 0) *ppos = kiocb.ki_pos; @@ -391,7 +388,7 @@ ssize_t vfs_iter_write(struct file *file, struct iov_iter *iter, loff_t *ppos) kiocb.ki_pos = *ppos; iter->type |= WRITE; - ret = file->f_op->write_iter(&kiocb, iter); + ret = call_write_iter(file, &kiocb, iter); BUG_ON(ret == -EIOCBQUEUED); if (ret > 0) *ppos = kiocb.ki_pos; @@ -440,7 +437,7 @@ static ssize_t new_sync_read(struct file *filp, char __user *buf, size_t len, lo kiocb.ki_pos = *ppos; iov_iter_init(&iter, READ, &iov, 1, len); - ret = filp->f_op->read_iter(&kiocb, &iter); + ret = call_read_iter(filp, &kiocb, &iter); BUG_ON(ret == -EIOCBQUEUED); *ppos = kiocb.ki_pos; return ret; @@ -497,7 +494,7 @@ static ssize_t new_sync_write(struct file *filp, const char __user *buf, size_t kiocb.ki_pos = *ppos; iov_iter_init(&iter, WRITE, &iov, 1, len); - ret = filp->f_op->write_iter(&kiocb, &iter); + ret = call_write_iter(filp, &kiocb, &iter); BUG_ON(ret == -EIOCBQUEUED); if (ret > 0) *ppos = kiocb.ki_pos; @@ -676,7 +673,7 @@ unsigned long iov_shorten(struct iovec *iov, unsigned long nr_segs, size_t to) EXPORT_SYMBOL(iov_shorten); static ssize_t do_iter_readv_writev(struct file *filp, struct iov_iter *iter, - loff_t *ppos, iter_fn_t fn, int flags) + loff_t *ppos, int type, int flags) { struct kiocb kiocb; ssize_t ret; @@ -693,7 +690,10 @@ static ssize_t do_iter_readv_writev(struct file *filp, struct iov_iter *iter, kiocb.ki_flags |= (IOCB_DSYNC | IOCB_SYNC); kiocb.ki_pos = *ppos; - ret = fn(&kiocb, iter); + if (type == READ) + ret = call_read_iter(filp, &kiocb, iter); + else + ret = call_write_iter(filp, &kiocb, iter); BUG_ON(ret == -EIOCBQUEUED); *ppos = kiocb.ki_pos; return ret; @@ -701,7 +701,7 @@ static ssize_t do_iter_readv_writev(struct file *filp, struct iov_iter *iter, /* Do it by hand, with file-ops */ static ssize_t do_loop_readv_writev(struct file *filp, struct iov_iter *iter, - loff_t *ppos, io_fn_t fn, int flags) + loff_t *ppos, int type, int flags) { ssize_t ret = 0; @@ -712,7 +712,13 @@ static ssize_t do_loop_readv_writev(struct file *filp, struct iov_iter *iter, struct iovec iovec = iov_iter_iovec(iter); ssize_t nr; - nr = fn(filp, iovec.iov_base, iovec.iov_len, ppos); + if (type == READ) { + nr = filp->f_op->read(filp, iovec.iov_base, + iovec.iov_len, ppos); + } else { + nr = filp->f_op->write(filp, iovec.iov_base, + iovec.iov_len, ppos); + } if (nr < 0) { if (!ret) @@ -835,50 +841,32 @@ out: return ret; } -static ssize_t do_readv_writev(int type, struct file *file, - const struct iovec __user * uvector, - unsigned long nr_segs, loff_t *pos, - int flags) +static ssize_t __do_readv_writev(int type, struct file *file, + struct iov_iter *iter, loff_t *pos, int flags) { size_t tot_len; - struct iovec iovstack[UIO_FASTIOV]; - struct iovec *iov = iovstack; - struct iov_iter iter; - ssize_t ret; - io_fn_t fn; - iter_fn_t iter_fn; - - ret = import_iovec(type, uvector, nr_segs, - ARRAY_SIZE(iovstack), &iov, &iter); - if (ret < 0) - return ret; + ssize_t ret = 0; - tot_len = iov_iter_count(&iter); + tot_len = iov_iter_count(iter); if (!tot_len) goto out; ret = rw_verify_area(type, file, pos, tot_len); if (ret < 0) goto out; - if (type == READ) { - fn = file->f_op->read; - iter_fn = file->f_op->read_iter; - } else { - fn = (io_fn_t)file->f_op->write; - iter_fn = file->f_op->write_iter; + if (type != READ) file_start_write(file); - } - if (iter_fn) - ret = do_iter_readv_writev(file, &iter, pos, iter_fn, flags); + if ((type == READ && file->f_op->read_iter) || + (type == WRITE && file->f_op->write_iter)) + ret = do_iter_readv_writev(file, iter, pos, type, flags); else - ret = do_loop_readv_writev(file, &iter, pos, fn, flags); + ret = do_loop_readv_writev(file, iter, pos, type, flags); if (type != READ) file_end_write(file); out: - kfree(iov); if ((ret + (type == READ)) > 0) { if (type == READ) fsnotify_access(file); @@ -888,6 +876,27 @@ out: return ret; } +static ssize_t do_readv_writev(int type, struct file *file, + const struct iovec __user *uvector, + unsigned long nr_segs, loff_t *pos, + int flags) +{ + struct iovec iovstack[UIO_FASTIOV]; + struct iovec *iov = iovstack; + struct iov_iter iter; + ssize_t ret; + + ret = import_iovec(type, uvector, nr_segs, + ARRAY_SIZE(iovstack), &iov, &iter); + if (ret < 0) + return ret; + + ret = __do_readv_writev(type, file, &iter, pos, flags); + kfree(iov); + + return ret; +} + ssize_t vfs_readv(struct file *file, const struct iovec __user *vec, unsigned long vlen, loff_t *pos, int flags) { @@ -1065,51 +1074,19 @@ static ssize_t compat_do_readv_writev(int type, struct file *file, unsigned long nr_segs, loff_t *pos, int flags) { - compat_ssize_t tot_len; struct iovec iovstack[UIO_FASTIOV]; struct iovec *iov = iovstack; struct iov_iter iter; ssize_t ret; - io_fn_t fn; - iter_fn_t iter_fn; ret = compat_import_iovec(type, uvector, nr_segs, UIO_FASTIOV, &iov, &iter); if (ret < 0) return ret; - tot_len = iov_iter_count(&iter); - if (!tot_len) - goto out; - ret = rw_verify_area(type, file, pos, tot_len); - if (ret < 0) - goto out; - - if (type == READ) { - fn = file->f_op->read; - iter_fn = file->f_op->read_iter; - } else { - fn = (io_fn_t)file->f_op->write; - iter_fn = file->f_op->write_iter; - file_start_write(file); - } - - if (iter_fn) - ret = do_iter_readv_writev(file, &iter, pos, iter_fn, flags); - else - ret = do_loop_readv_writev(file, &iter, pos, fn, flags); - - if (type != READ) - file_end_write(file); - -out: + ret = __do_readv_writev(type, file, &iter, pos, flags); kfree(iov); - if ((ret + (type == READ)) > 0) { - if (type == READ) - fsnotify_access(file); - else - fsnotify_modify(file); - } + return ret; } @@ -1519,6 +1496,11 @@ ssize_t vfs_copy_file_range(struct file *file_in, loff_t pos_in, if (flags != 0) return -EINVAL; + if (S_ISDIR(inode_in->i_mode) || S_ISDIR(inode_out->i_mode)) + return -EISDIR; + if (!S_ISREG(inode_in->i_mode) || !S_ISREG(inode_out->i_mode)) + return -EINVAL; + ret = rw_verify_area(READ, file_in, &pos_in, len); if (unlikely(ret)) return ret; @@ -1539,7 +1521,7 @@ ssize_t vfs_copy_file_range(struct file *file_in, loff_t pos_in, if (len == 0) return 0; - sb_start_write(inode_out->i_sb); + file_start_write(file_out); /* * Try cloning first, this is supported by more file systems, and @@ -1575,7 +1557,7 @@ done: inc_syscr(current); inc_syscw(current); - sb_end_write(inode_out->i_sb); + file_end_write(file_out); return ret; } diff --git a/fs/splice.c b/fs/splice.c index e49336555739..006ba50f4ece 100644 --- a/fs/splice.c +++ b/fs/splice.c @@ -309,7 +309,7 @@ ssize_t generic_file_splice_read(struct file *in, loff_t *ppos, idx = to.idx; init_sync_kiocb(&kiocb, in); kiocb.ki_pos = *ppos; - ret = in->f_op->read_iter(&kiocb, &to); + ret = call_read_iter(in, &kiocb, &to); if (ret > 0) { *ppos = kiocb.ki_pos; file_accessed(in); diff --git a/fs/sync.c b/fs/sync.c index 2a54c1f22035..11ba023434b1 100644 --- a/fs/sync.c +++ b/fs/sync.c @@ -192,7 +192,7 @@ int vfs_fsync_range(struct file *file, loff_t start, loff_t end, int datasync) spin_unlock(&inode->i_lock); mark_inode_dirty_sync(inode); } - return file->f_op->fsync(file, start, end, datasync); + return call_fsync(file, start, end, datasync); } EXPORT_SYMBOL(vfs_fsync_range); |