diff options
Diffstat (limited to 'fs')
374 files changed, 7173 insertions, 6246 deletions
diff --git a/fs/9p/acl.c b/fs/9p/acl.c index 9da967f38387..eb3589edf485 100644 --- a/fs/9p/acl.c +++ b/fs/9p/acl.c @@ -93,7 +93,7 @@ static struct posix_acl *v9fs_get_cached_acl(struct inode *inode, int type) * instantiating the inode (v9fs_inode_from_fid) */ acl = get_cached_acl(inode, type); - BUG_ON(acl == ACL_NOT_CACHED); + BUG_ON(is_uncached_acl(acl)); return acl; } @@ -213,8 +213,8 @@ int v9fs_acl_mode(struct inode *dir, umode_t *modep, } static int v9fs_xattr_get_acl(const struct xattr_handler *handler, - struct dentry *dentry, const char *name, - void *buffer, size_t size) + struct dentry *dentry, struct inode *inode, + const char *name, void *buffer, size_t size) { struct v9fs_session_info *v9ses; struct posix_acl *acl; @@ -227,7 +227,7 @@ static int v9fs_xattr_get_acl(const struct xattr_handler *handler, if ((v9ses->flags & V9FS_ACCESS_MASK) != V9FS_ACCESS_CLIENT) return v9fs_xattr_get(dentry, handler->name, buffer, size); - acl = v9fs_get_cached_acl(d_inode(dentry), handler->flags); + acl = v9fs_get_cached_acl(inode, handler->flags); if (IS_ERR(acl)) return PTR_ERR(acl); if (acl == NULL) diff --git a/fs/9p/vfs_addr.c b/fs/9p/vfs_addr.c index ac9225e86bf3..c37fb9c08970 100644 --- a/fs/9p/vfs_addr.c +++ b/fs/9p/vfs_addr.c @@ -245,9 +245,10 @@ static int v9fs_launder_page(struct page *page) * */ static ssize_t -v9fs_direct_IO(struct kiocb *iocb, struct iov_iter *iter, loff_t pos) +v9fs_direct_IO(struct kiocb *iocb, struct iov_iter *iter) { struct file *file = iocb->ki_filp; + loff_t pos = iocb->ki_pos; ssize_t n; int err = 0; if (iov_iter_rw(iter) == WRITE) { diff --git a/fs/9p/vfs_dir.c b/fs/9p/vfs_dir.c index 5cc00e56206e..b0405d6aac85 100644 --- a/fs/9p/vfs_dir.c +++ b/fs/9p/vfs_dir.c @@ -246,7 +246,7 @@ int v9fs_dir_release(struct inode *inode, struct file *filp) const struct file_operations v9fs_dir_operations = { .read = generic_read_dir, .llseek = generic_file_llseek, - .iterate = v9fs_dir_readdir, + .iterate_shared = v9fs_dir_readdir, .open = v9fs_file_open, .release = v9fs_dir_release, }; @@ -254,7 +254,7 @@ const struct file_operations v9fs_dir_operations = { const struct file_operations v9fs_dir_operations_dotl = { .read = generic_read_dir, .llseek = generic_file_llseek, - .iterate = v9fs_dir_readdir_dotl, + .iterate_shared = v9fs_dir_readdir_dotl, .open = v9fs_file_open, .release = v9fs_dir_release, .fsync = v9fs_file_fsync_dotl, diff --git a/fs/9p/vfs_inode.c b/fs/9p/vfs_inode.c index 3a08b3e6ff1d..f4645c515262 100644 --- a/fs/9p/vfs_inode.c +++ b/fs/9p/vfs_inode.c @@ -1071,7 +1071,7 @@ v9fs_vfs_getattr(struct vfsmount *mnt, struct dentry *dentry, if (IS_ERR(st)) return PTR_ERR(st); - v9fs_stat2inode(st, d_inode(dentry), d_inode(dentry)->i_sb); + v9fs_stat2inode(st, d_inode(dentry), dentry->d_sb); generic_fillattr(d_inode(dentry), stat); p9stat_free(st); diff --git a/fs/9p/xattr.c b/fs/9p/xattr.c index 9dd9b47a6c1a..18c62bae9591 100644 --- a/fs/9p/xattr.c +++ b/fs/9p/xattr.c @@ -138,8 +138,8 @@ ssize_t v9fs_listxattr(struct dentry *dentry, char *buffer, size_t buffer_size) } static int v9fs_xattr_handler_get(const struct xattr_handler *handler, - struct dentry *dentry, const char *name, - void *buffer, size_t size) + struct dentry *dentry, struct inode *inode, + const char *name, void *buffer, size_t size) { const char *full_name = xattr_full_name(handler, name); diff --git a/fs/Kconfig.binfmt b/fs/Kconfig.binfmt index 2d0cbbd14cfc..72c03354c14b 100644 --- a/fs/Kconfig.binfmt +++ b/fs/Kconfig.binfmt @@ -1,6 +1,7 @@ config BINFMT_ELF bool "Kernel support for ELF binaries" depends on MMU && (BROKEN || !FRV) + select ELFCORE default y ---help--- ELF (Executable and Linkable Format) is a format for libraries and @@ -26,6 +27,7 @@ config BINFMT_ELF config COMPAT_BINFMT_ELF bool depends on COMPAT && BINFMT_ELF + select ELFCORE config ARCH_BINFMT_ELF_STATE bool @@ -34,6 +36,7 @@ config BINFMT_ELF_FDPIC bool "Kernel support for FDPIC ELF binaries" default y depends on (FRV || BLACKFIN || (SUPERH32 && !MMU) || C6X) + select ELFCORE help ELF FDPIC binaries are based on ELF, but allow the individual load segments of a binary to be located in memory independently of each @@ -43,6 +46,11 @@ config BINFMT_ELF_FDPIC It is also possible to run FDPIC ELF binaries on MMU linux also. +config ELFCORE + bool + help + This option enables kernel/elfcore.o. + config CORE_DUMP_DEFAULT_ELF_HEADERS bool "Write ELF core dumps with partial segments" default y diff --git a/fs/affs/dir.c b/fs/affs/dir.c index ac4f318aafba..f1e7294381c5 100644 --- a/fs/affs/dir.c +++ b/fs/affs/dir.c @@ -20,7 +20,7 @@ static int affs_readdir(struct file *, struct dir_context *); const struct file_operations affs_dir_operations = { .read = generic_read_dir, .llseek = generic_file_llseek, - .iterate = affs_readdir, + .iterate_shared = affs_readdir, .fsync = affs_file_fsync, }; diff --git a/fs/affs/file.c b/fs/affs/file.c index 0cde550050e8..0deec9cc2362 100644 --- a/fs/affs/file.c +++ b/fs/affs/file.c @@ -389,12 +389,13 @@ static void affs_write_failed(struct address_space *mapping, loff_t to) } static ssize_t -affs_direct_IO(struct kiocb *iocb, struct iov_iter *iter, loff_t offset) +affs_direct_IO(struct kiocb *iocb, struct iov_iter *iter) { struct file *file = iocb->ki_filp; struct address_space *mapping = file->f_mapping; struct inode *inode = mapping->host; size_t count = iov_iter_count(iter); + loff_t offset = iocb->ki_pos; ssize_t ret; if (iov_iter_rw(iter) == WRITE) { @@ -404,7 +405,7 @@ affs_direct_IO(struct kiocb *iocb, struct iov_iter *iter, loff_t offset) return 0; } - ret = blockdev_direct_IO(iocb, inode, iter, offset, affs_get_block); + ret = blockdev_direct_IO(iocb, inode, iter, affs_get_block); if (ret < 0 && iov_iter_rw(iter) == WRITE) affs_write_failed(mapping, offset + count); return ret; diff --git a/fs/afs/dir.c b/fs/afs/dir.c index 5fda2bc53cd7..eba541004d90 100644 --- a/fs/afs/dir.c +++ b/fs/afs/dir.c @@ -43,7 +43,7 @@ static int afs_rename(struct inode *old_dir, struct dentry *old_dentry, const struct file_operations afs_dir_file_operations = { .open = afs_dir_open, .release = afs_release, - .iterate = afs_readdir, + .iterate_shared = afs_readdir, .lock = afs_lock, .llseek = generic_file_llseek, }; @@ -128,7 +128,7 @@ struct afs_lookup_cookie { /* * check that a directory page is valid */ -static inline void afs_dir_check_page(struct inode *dir, struct page *page) +static inline bool afs_dir_check_page(struct inode *dir, struct page *page) { struct afs_dir_page *dbuf; loff_t latter; @@ -168,11 +168,11 @@ static inline void afs_dir_check_page(struct inode *dir, struct page *page) } SetPageChecked(page); - return; + return true; error: - SetPageChecked(page); SetPageError(page); + return false; } /* @@ -196,10 +196,10 @@ static struct page *afs_dir_get_page(struct inode *dir, unsigned long index, page = read_cache_page(dir->i_mapping, index, afs_page_filler, key); if (!IS_ERR(page)) { kmap(page); - if (!PageChecked(page)) - afs_dir_check_page(dir, page); - if (PageError(page)) - goto fail; + if (unlikely(!PageChecked(page))) { + if (PageError(page) || !afs_dir_check_page(dir, page)) + goto fail; + } } return page; diff --git a/fs/afs/rxrpc.c b/fs/afs/rxrpc.c index b50642870a43..63cd9f939f19 100644 --- a/fs/afs/rxrpc.c +++ b/fs/afs/rxrpc.c @@ -65,6 +65,12 @@ static void afs_async_workfn(struct work_struct *work) call->async_workfn(call); } +static int afs_wait_atomic_t(atomic_t *p) +{ + schedule(); + return 0; +} + /* * open an RxRPC socket and bind it to be a server for callback notifications * - the socket is left in blocking mode and non-blocking ops use MSG_DONTWAIT @@ -126,13 +132,16 @@ void afs_close_socket(void) { _enter(""); + wait_on_atomic_t(&afs_outstanding_calls, afs_wait_atomic_t, + TASK_UNINTERRUPTIBLE); + _debug("no outstanding calls"); + sock_release(afs_socket); _debug("dework"); destroy_workqueue(afs_async_calls); ASSERTCMP(atomic_read(&afs_outstanding_skbs), ==, 0); - ASSERTCMP(atomic_read(&afs_outstanding_calls), ==, 0); _leave(""); } @@ -178,8 +187,6 @@ static void afs_free_call(struct afs_call *call) { _debug("DONE %p{%s} [%d]", call, call->type->name, atomic_read(&afs_outstanding_calls)); - if (atomic_dec_return(&afs_outstanding_calls) == -1) - BUG(); ASSERTCMP(call->rxcall, ==, NULL); ASSERT(!work_pending(&call->async_work)); @@ -188,6 +195,9 @@ static void afs_free_call(struct afs_call *call) kfree(call->request); kfree(call); + + if (atomic_dec_and_test(&afs_outstanding_calls)) + wake_up_atomic_t(&afs_outstanding_calls); } /* @@ -420,9 +430,11 @@ error_kill_call: } /* - * handles intercepted messages that were arriving in the socket's Rx queue - * - called with the socket receive queue lock held to ensure message ordering - * - called with softirqs disabled + * Handles intercepted messages that were arriving in the socket's Rx queue. + * + * Called from the AF_RXRPC call processor in waitqueue process context. For + * each call, it is guaranteed this will be called in order of packet to be + * delivered. */ static void afs_rx_interceptor(struct sock *sk, unsigned long user_call_ID, struct sk_buff *skb) @@ -513,6 +525,12 @@ static void afs_deliver_to_call(struct afs_call *call) call->state = AFS_CALL_ABORTED; _debug("Rcv ABORT %u -> %d", abort_code, call->error); break; + case RXRPC_SKB_MARK_LOCAL_ABORT: + abort_code = rxrpc_kernel_get_abort_code(skb); + call->error = call->type->abort_to_error(abort_code); + call->state = AFS_CALL_ABORTED; + _debug("Loc ABORT %u -> %d", abort_code, call->error); + break; case RXRPC_SKB_MARK_NET_ERROR: call->error = -rxrpc_kernel_get_error_number(skb); call->state = AFS_CALL_ERROR; @@ -496,7 +496,12 @@ static int aio_setup_ring(struct kioctx *ctx) ctx->mmap_size = nr_pages * PAGE_SIZE; pr_debug("attempting mmap of %lu bytes\n", ctx->mmap_size); - down_write(&mm->mmap_sem); + if (down_write_killable(&mm->mmap_sem)) { + ctx->mmap_size = 0; + aio_free_ring(ctx); + return -EINTR; + } + ctx->mmap_base = do_mmap_pgoff(ctx->aio_ring_file, 0, ctx->mmap_size, PROT_READ | PROT_WRITE, MAP_SHARED, 0, &unused); @@ -1447,8 +1452,6 @@ rw_common: return ret; } - len = ret; - if (rw == WRITE) file_start_write(file); diff --git a/fs/autofs4/root.c b/fs/autofs4/root.c index 7ab923940d18..78bd80298528 100644 --- a/fs/autofs4/root.c +++ b/fs/autofs4/root.c @@ -39,7 +39,7 @@ const struct file_operations autofs4_root_operations = { .open = dcache_dir_open, .release = dcache_dir_close, .read = generic_read_dir, - .iterate = dcache_readdir, + .iterate_shared = dcache_readdir, .llseek = dcache_dir_lseek, .unlocked_ioctl = autofs4_root_ioctl, #ifdef CONFIG_COMPAT @@ -51,7 +51,7 @@ const struct file_operations autofs4_dir_operations = { .open = autofs4_dir_open, .release = dcache_dir_close, .read = generic_read_dir, - .iterate = dcache_readdir, + .iterate_shared = dcache_readdir, .llseek = dcache_dir_lseek, }; diff --git a/fs/bad_inode.c b/fs/bad_inode.c index 103f5d7c3083..72e35b721608 100644 --- a/fs/bad_inode.c +++ b/fs/bad_inode.c @@ -106,8 +106,8 @@ static int bad_inode_setxattr(struct dentry *dentry, const char *name, return -EIO; } -static ssize_t bad_inode_getxattr(struct dentry *dentry, const char *name, - void *buffer, size_t size) +static ssize_t bad_inode_getxattr(struct dentry *dentry, struct inode *inode, + const char *name, void *buffer, size_t size) { return -EIO; } diff --git a/fs/befs/befs.h b/fs/befs/befs.h index 35d19e8731e3..e0f59263a96d 100644 --- a/fs/befs/befs.h +++ b/fs/befs/befs.h @@ -116,7 +116,7 @@ BEFS_I(const struct inode *inode) } static inline befs_blocknr_t -iaddr2blockno(struct super_block *sb, befs_inode_addr * iaddr) +iaddr2blockno(struct super_block *sb, const befs_inode_addr *iaddr) { return ((iaddr->allocation_group << BEFS_SB(sb)->ag_shift) + iaddr->start); @@ -141,7 +141,7 @@ befs_iaddrs_per_block(struct super_block *sb) } static inline int -befs_iaddr_is_empty(befs_inode_addr * iaddr) +befs_iaddr_is_empty(const befs_inode_addr *iaddr) { return (!iaddr->allocation_group) && (!iaddr->start) && (!iaddr->len); } diff --git a/fs/befs/btree.c b/fs/befs/btree.c index 22c166280883..307645f9e284 100644 --- a/fs/befs/btree.c +++ b/fs/befs/btree.c @@ -88,15 +88,15 @@ struct befs_btree_node { static const befs_off_t befs_bt_inval = 0xffffffffffffffffULL; /* local functions */ -static int befs_btree_seekleaf(struct super_block *sb, befs_data_stream * ds, +static int befs_btree_seekleaf(struct super_block *sb, const befs_data_stream *ds, befs_btree_super * bt_super, struct befs_btree_node *this_node, befs_off_t * node_off); -static int befs_bt_read_super(struct super_block *sb, befs_data_stream * ds, +static int befs_bt_read_super(struct super_block *sb, const befs_data_stream *ds, befs_btree_super * sup); -static int befs_bt_read_node(struct super_block *sb, befs_data_stream * ds, +static int befs_bt_read_node(struct super_block *sb, const befs_data_stream *ds, struct befs_btree_node *node, befs_off_t node_off); @@ -134,7 +134,7 @@ static int befs_compare_strings(const void *key1, int keylen1, * On failure, BEFS_ERR is returned. */ static int -befs_bt_read_super(struct super_block *sb, befs_data_stream * ds, +befs_bt_read_super(struct super_block *sb, const befs_data_stream *ds, befs_btree_super * sup) { struct buffer_head *bh; @@ -193,7 +193,7 @@ befs_bt_read_super(struct super_block *sb, befs_data_stream * ds, */ static int -befs_bt_read_node(struct super_block *sb, befs_data_stream * ds, +befs_bt_read_node(struct super_block *sb, const befs_data_stream *ds, struct befs_btree_node *node, befs_off_t node_off) { uint off = 0; @@ -247,7 +247,7 @@ befs_bt_read_node(struct super_block *sb, befs_data_stream * ds, * actuall value stored with the key. */ int -befs_btree_find(struct super_block *sb, befs_data_stream * ds, +befs_btree_find(struct super_block *sb, const befs_data_stream *ds, const char *key, befs_off_t * value) { struct befs_btree_node *this_node; @@ -416,7 +416,7 @@ befs_find_key(struct super_block *sb, struct befs_btree_node *node, * until the (key_no)th key is found or the tree is out of keys. */ int -befs_btree_read(struct super_block *sb, befs_data_stream * ds, +befs_btree_read(struct super_block *sb, const befs_data_stream *ds, loff_t key_no, size_t bufsize, char *keybuf, size_t * keysize, befs_off_t * value) { @@ -548,7 +548,7 @@ befs_btree_read(struct super_block *sb, befs_data_stream * ds, * Also checks for an empty tree. If there are no keys, returns BEFS_BT_EMPTY. */ static int -befs_btree_seekleaf(struct super_block *sb, befs_data_stream * ds, +befs_btree_seekleaf(struct super_block *sb, const befs_data_stream *ds, befs_btree_super *bt_super, struct befs_btree_node *this_node, befs_off_t * node_off) diff --git a/fs/befs/btree.h b/fs/befs/btree.h index 92e781e5f30e..f2a8f637e9e0 100644 --- a/fs/befs/btree.h +++ b/fs/befs/btree.h @@ -4,10 +4,10 @@ */ -int befs_btree_find(struct super_block *sb, befs_data_stream * ds, +int befs_btree_find(struct super_block *sb, const befs_data_stream *ds, const char *key, befs_off_t * value); -int befs_btree_read(struct super_block *sb, befs_data_stream * ds, +int befs_btree_read(struct super_block *sb, const befs_data_stream *ds, loff_t key_no, size_t bufsize, char *keybuf, size_t * keysize, befs_off_t * value); diff --git a/fs/befs/datastream.c b/fs/befs/datastream.c index ebd50718659f..af1bc19b7c85 100644 --- a/fs/befs/datastream.c +++ b/fs/befs/datastream.c @@ -21,16 +21,16 @@ const befs_inode_addr BAD_IADDR = { 0, 0, 0 }; static int befs_find_brun_direct(struct super_block *sb, - befs_data_stream * data, + const befs_data_stream *data, befs_blocknr_t blockno, befs_block_run * run); static int befs_find_brun_indirect(struct super_block *sb, - befs_data_stream * data, + const befs_data_stream *data, befs_blocknr_t blockno, befs_block_run * run); static int befs_find_brun_dblindirect(struct super_block *sb, - befs_data_stream * data, + const befs_data_stream *data, befs_blocknr_t blockno, befs_block_run * run); @@ -45,10 +45,10 @@ static int befs_find_brun_dblindirect(struct super_block *sb, * if you don't need to know offset just set @off = NULL. */ struct buffer_head * -befs_read_datastream(struct super_block *sb, befs_data_stream * ds, +befs_read_datastream(struct super_block *sb, const befs_data_stream *ds, befs_off_t pos, uint * off) { - struct buffer_head *bh = NULL; + struct buffer_head *bh; befs_block_run run; befs_blocknr_t block; /* block coresponding to pos */ @@ -87,7 +87,7 @@ befs_read_datastream(struct super_block *sb, befs_data_stream * ds, * 2001-11-15 Will Dyson */ int -befs_fblock2brun(struct super_block *sb, befs_data_stream * data, +befs_fblock2brun(struct super_block *sb, const befs_data_stream *data, befs_blocknr_t fblock, befs_block_run * run) { int err; @@ -122,12 +122,12 @@ befs_fblock2brun(struct super_block *sb, befs_data_stream * data, * Returns the number of bytes read */ size_t -befs_read_lsymlink(struct super_block * sb, befs_data_stream * ds, void *buff, - befs_off_t len) +befs_read_lsymlink(struct super_block *sb, const befs_data_stream *ds, + void *buff, befs_off_t len) { befs_off_t bytes_read = 0; /* bytes readed */ u16 plen; - struct buffer_head *bh = NULL; + struct buffer_head *bh; befs_debug(sb, "---> %s length: %llu", __func__, len); while (bytes_read < len) { @@ -163,7 +163,7 @@ befs_read_lsymlink(struct super_block * sb, befs_data_stream * ds, void *buff, */ befs_blocknr_t -befs_count_blocks(struct super_block * sb, befs_data_stream * ds) +befs_count_blocks(struct super_block *sb, const befs_data_stream *ds) { befs_blocknr_t blocks; befs_blocknr_t datablocks; /* File data blocks */ @@ -243,11 +243,11 @@ befs_count_blocks(struct super_block * sb, befs_data_stream * ds) 2001-11-15 Will Dyson */ static int -befs_find_brun_direct(struct super_block *sb, befs_data_stream * data, +befs_find_brun_direct(struct super_block *sb, const befs_data_stream *data, befs_blocknr_t blockno, befs_block_run * run) { int i; - befs_block_run *array = data->direct; + const befs_block_run *array = data->direct; befs_blocknr_t sum; befs_blocknr_t max_block = data->max_direct_range >> BEFS_SB(sb)->block_shift; @@ -304,7 +304,8 @@ befs_find_brun_direct(struct super_block *sb, befs_data_stream * data, */ static int befs_find_brun_indirect(struct super_block *sb, - befs_data_stream * data, befs_blocknr_t blockno, + const befs_data_stream *data, + befs_blocknr_t blockno, befs_block_run * run) { int i, j; @@ -412,7 +413,8 @@ befs_find_brun_indirect(struct super_block *sb, */ static int befs_find_brun_dblindirect(struct super_block *sb, - befs_data_stream * data, befs_blocknr_t blockno, + const befs_data_stream *data, + befs_blocknr_t blockno, befs_block_run * run) { int dblindir_indx; @@ -427,7 +429,7 @@ befs_find_brun_dblindirect(struct super_block *sb, struct buffer_head *dbl_indir_block; struct buffer_head *indir_block; befs_block_run indir_run; - befs_disk_inode_addr *iaddr_array = NULL; + befs_disk_inode_addr *iaddr_array; struct befs_sb_info *befs_sb = BEFS_SB(sb); befs_blocknr_t indir_start_blk = @@ -486,7 +488,6 @@ befs_find_brun_dblindirect(struct super_block *sb, iaddr_array = (befs_disk_inode_addr *) dbl_indir_block->b_data; indir_run = fsrun_to_cpu(sb, iaddr_array[dbl_block_indx]); brelse(dbl_indir_block); - iaddr_array = NULL; /* Read indirect block */ which_block = indir_indx / befs_iaddrs_per_block(sb); @@ -511,7 +512,6 @@ befs_find_brun_dblindirect(struct super_block *sb, iaddr_array = (befs_disk_inode_addr *) indir_block->b_data; *run = fsrun_to_cpu(sb, iaddr_array[block_indx]); brelse(indir_block); - iaddr_array = NULL; blockno_at_run_start = indir_start_blk; blockno_at_run_start += diblklen * dblindir_indx; diff --git a/fs/befs/datastream.h b/fs/befs/datastream.h index 45e8a3c98249..91ba8203d83f 100644 --- a/fs/befs/datastream.h +++ b/fs/befs/datastream.h @@ -4,16 +4,17 @@ */ struct buffer_head *befs_read_datastream(struct super_block *sb, - befs_data_stream * ds, befs_off_t pos, - uint * off); + const befs_data_stream *ds, + befs_off_t pos, uint * off); -int befs_fblock2brun(struct super_block *sb, befs_data_stream * data, +int befs_fblock2brun(struct super_block *sb, const befs_data_stream *data, befs_blocknr_t fblock, befs_block_run * run); -size_t befs_read_lsymlink(struct super_block *sb, befs_data_stream * data, +size_t befs_read_lsymlink(struct super_block *sb, const befs_data_stream *data, void *buff, befs_off_t len); -befs_blocknr_t befs_count_blocks(struct super_block *sb, befs_data_stream * ds); +befs_blocknr_t befs_count_blocks(struct super_block *sb, + const befs_data_stream *ds); extern const befs_inode_addr BAD_IADDR; diff --git a/fs/befs/io.c b/fs/befs/io.c index 7a5b4ec21c56..523c8af2d770 100644 --- a/fs/befs/io.c +++ b/fs/befs/io.c @@ -26,7 +26,7 @@ struct buffer_head * befs_bread_iaddr(struct super_block *sb, befs_inode_addr iaddr) { - struct buffer_head *bh = NULL; + struct buffer_head *bh; befs_blocknr_t block = 0; struct befs_sb_info *befs_sb = BEFS_SB(sb); @@ -63,7 +63,7 @@ befs_bread_iaddr(struct super_block *sb, befs_inode_addr iaddr) struct buffer_head * befs_bread(struct super_block *sb, befs_blocknr_t block) { - struct buffer_head *bh = NULL; + struct buffer_head *bh; befs_debug(sb, "---> Enter %s %lu", __func__, (unsigned long)block); diff --git a/fs/befs/linuxvfs.c b/fs/befs/linuxvfs.c index cc0e08252913..7da05b159ade 100644 --- a/fs/befs/linuxvfs.c +++ b/fs/befs/linuxvfs.c @@ -66,7 +66,7 @@ static struct kmem_cache *befs_inode_cachep; static const struct file_operations befs_dir_operations = { .read = generic_read_dir, - .iterate = befs_readdir, + .iterate_shared = befs_readdir, .llseek = generic_file_llseek, }; @@ -155,9 +155,9 @@ befs_get_block(struct inode *inode, sector_t block, static struct dentry * befs_lookup(struct inode *dir, struct dentry *dentry, unsigned int flags) { - struct inode *inode = NULL; + struct inode *inode; struct super_block *sb = dir->i_sb; - befs_data_stream *ds = &BEFS_I(dir)->i_data.ds; + const befs_data_stream *ds = &BEFS_I(dir)->i_data.ds; befs_off_t offset; int ret; int utfnamelen; @@ -207,7 +207,7 @@ befs_readdir(struct file *file, struct dir_context *ctx) { struct inode *inode = file_inode(file); struct super_block *sb = inode->i_sb; - befs_data_stream *ds = &BEFS_I(inode)->i_data.ds; + const befs_data_stream *ds = &BEFS_I(inode)->i_data.ds; befs_off_t value; int result; size_t keysize; @@ -294,10 +294,10 @@ static void init_once(void *foo) static struct inode *befs_iget(struct super_block *sb, unsigned long ino) { - struct buffer_head *bh = NULL; - befs_inode *raw_inode = NULL; + struct buffer_head *bh; + befs_inode *raw_inode; struct befs_sb_info *befs_sb = BEFS_SB(sb); - struct befs_inode_info *befs_ino = NULL; + struct befs_inode_info *befs_ino; struct inode *inode; long ret = -EIO; diff --git a/fs/bfs/dir.c b/fs/bfs/dir.c index 3ec6113146c0..34a5bc2f1290 100644 --- a/fs/bfs/dir.c +++ b/fs/bfs/dir.c @@ -70,7 +70,7 @@ static int bfs_readdir(struct file *f, struct dir_context *ctx) const struct file_operations bfs_dir_operations = { .read = generic_read_dir, - .iterate = bfs_readdir, + .iterate_shared = bfs_readdir, .fsync = generic_file_fsync, .llseek = generic_file_llseek, }; diff --git a/fs/binfmt_aout.c b/fs/binfmt_aout.c index 4c556680fa74..2fab9f130e51 100644 --- a/fs/binfmt_aout.c +++ b/fs/binfmt_aout.c @@ -297,7 +297,10 @@ static int load_aout_binary(struct linux_binprm * bprm) } if (!bprm->file->f_op->mmap||((fd_offset & ~PAGE_MASK) != 0)) { - vm_brk(N_TXTADDR(ex), ex.a_text+ex.a_data); + error = vm_brk(N_TXTADDR(ex), ex.a_text+ex.a_data); + if (IS_ERR_VALUE(error)) + return error; + read_code(bprm->file, N_TXTADDR(ex), fd_offset, ex.a_text + ex.a_data); goto beyond_if; @@ -378,8 +381,10 @@ static int load_aout_library(struct file *file) "N_TXTOFF is not page aligned. Please convert library: %pD\n", file); } - vm_brk(start_addr, ex.a_text + ex.a_data + ex.a_bss); - + retval = vm_brk(start_addr, ex.a_text + ex.a_data + ex.a_bss); + if (IS_ERR_VALUE(retval)) + goto out; + read_code(file, start_addr, N_TXTOFF(ex), ex.a_text + ex.a_data); retval = 0; diff --git a/fs/binfmt_elf.c b/fs/binfmt_elf.c index 81381cc0dd17..938fc4ede764 100644 --- a/fs/binfmt_elf.c +++ b/fs/binfmt_elf.c @@ -1176,8 +1176,11 @@ static int load_elf_library(struct file *file) len = ELF_PAGESTART(eppnt->p_filesz + eppnt->p_vaddr + ELF_MIN_ALIGN - 1); bss = eppnt->p_memsz + eppnt->p_vaddr; - if (bss > len) - vm_brk(len, bss - len); + if (bss > len) { + error = vm_brk(len, bss - len); + if (BAD_ADDR(error)) + goto out_free_ph; + } error = 0; out_free_ph: @@ -2273,7 +2276,7 @@ static int elf_core_dump(struct coredump_params *cprm) goto end_coredump; /* Align to page */ - if (!dump_skip(cprm, dataoff - cprm->written)) + if (!dump_skip(cprm, dataoff - cprm->file->f_pos)) goto end_coredump; for (i = 0, vma = first_vma(current, gate_vma); vma != NULL; diff --git a/fs/binfmt_elf_fdpic.c b/fs/binfmt_elf_fdpic.c index 083ea2bc60ab..71ade0e556b7 100644 --- a/fs/binfmt_elf_fdpic.c +++ b/fs/binfmt_elf_fdpic.c @@ -1787,7 +1787,7 @@ static int elf_fdpic_core_dump(struct coredump_params *cprm) goto end_coredump; } - if (!dump_skip(cprm, dataoff - cprm->written)) + if (!dump_skip(cprm, dataoff - cprm->file->f_pos)) goto end_coredump; if (!elf_fdpic_dump_segments(cprm)) diff --git a/fs/block_dev.c b/fs/block_dev.c index 20a2c02b77c4..1089dbf25925 100644 --- a/fs/block_dev.c +++ b/fs/block_dev.c @@ -29,6 +29,7 @@ #include <linux/log2.h> #include <linux/cleancache.h> #include <linux/dax.h> +#include <linux/badblocks.h> #include <asm/uaccess.h> #include "internal.h" @@ -162,15 +163,15 @@ static struct inode *bdev_file_inode(struct file *file) } static ssize_t -blkdev_direct_IO(struct kiocb *iocb, struct iov_iter *iter, loff_t offset) +blkdev_direct_IO(struct kiocb *iocb, struct iov_iter *iter) { struct file *file = iocb->ki_filp; struct inode *inode = bdev_file_inode(file); if (IS_DAX(inode)) - return dax_do_io(iocb, inode, iter, offset, blkdev_get_block, + return dax_do_io(iocb, inode, iter, blkdev_get_block, NULL, DIO_SKIP_DIO_COUNT); - return __blockdev_direct_IO(iocb, inode, I_BDEV(inode), iter, offset, + return __blockdev_direct_IO(iocb, inode, I_BDEV(inode), iter, blkdev_get_block, NULL, NULL, DIO_SKIP_DIO_COUNT); } @@ -1159,6 +1160,33 @@ void bd_set_size(struct block_device *bdev, loff_t size) } EXPORT_SYMBOL(bd_set_size); +static bool blkdev_dax_capable(struct block_device *bdev) +{ + struct gendisk *disk = bdev->bd_disk; + + if (!disk->fops->direct_access || !IS_ENABLED(CONFIG_FS_DAX)) + return false; + + /* + * If the partition is not aligned on a page boundary, we can't + * do dax I/O to it. + */ + if ((bdev->bd_part->start_sect % (PAGE_SIZE / 512)) + || (bdev->bd_part->nr_sects % (PAGE_SIZE / 512))) + return false; + + /* + * If the device has known bad blocks, force all I/O through the + * driver / page cache. + * + * TODO: support finer grained dax error handling + */ + if (disk->bb && disk->bb->count) + return false; + + return true; +} + static void __blkdev_put(struct block_device *bdev, fmode_t mode, int for_part); /* @@ -1660,12 +1688,8 @@ ssize_t blkdev_write_iter(struct kiocb *iocb, struct iov_iter *from) blk_start_plug(&plug); ret = __generic_file_write_iter(iocb, from); - if (ret > 0) { - ssize_t err; - err = generic_write_sync(file, iocb->ki_pos - ret, ret); - if (err < 0) - ret = err; - } + if (ret > 0) + ret = generic_write_sync(iocb, ret); blk_finish_plug(&plug); return ret; } @@ -1724,79 +1748,13 @@ static const struct address_space_operations def_blk_aops = { .is_dirty_writeback = buffer_check_dirty_writeback, }; -#ifdef CONFIG_FS_DAX -/* - * In the raw block case we do not need to contend with truncation nor - * unwritten file extents. Without those concerns there is no need for - * additional locking beyond the mmap_sem context that these routines - * are already executing under. - * - * Note, there is no protection if the block device is dynamically - * resized (partition grow/shrink) during a fault. A stable block device - * size is already not enforced in the blkdev_direct_IO path. - * - * For DAX, it is the responsibility of the block device driver to - * ensure the whole-disk device size is stable while requests are in - * flight. - * - * Finally, unlike the filemap_page_mkwrite() case there is no - * filesystem superblock to sync against freezing. We still include a - * pfn_mkwrite callback for dax drivers to receive write fault - * notifications. - */ -static int blkdev_dax_fault(struct vm_area_struct *vma, struct vm_fault *vmf) -{ - return __dax_fault(vma, vmf, blkdev_get_block, NULL); -} - -static int blkdev_dax_pfn_mkwrite(struct vm_area_struct *vma, - struct vm_fault *vmf) -{ - return dax_pfn_mkwrite(vma, vmf); -} - -static int blkdev_dax_pmd_fault(struct vm_area_struct *vma, unsigned long addr, - pmd_t *pmd, unsigned int flags) -{ - return __dax_pmd_fault(vma, addr, pmd, flags, blkdev_get_block, NULL); -} - -static const struct vm_operations_struct blkdev_dax_vm_ops = { - .fault = blkdev_dax_fault, - .pmd_fault = blkdev_dax_pmd_fault, - .pfn_mkwrite = blkdev_dax_pfn_mkwrite, -}; - -static const struct vm_operations_struct blkdev_default_vm_ops = { - .fault = filemap_fault, - .map_pages = filemap_map_pages, -}; - -static int blkdev_mmap(struct file *file, struct vm_area_struct *vma) -{ - struct inode *bd_inode = bdev_file_inode(file); - - file_accessed(file); - if (IS_DAX(bd_inode)) { - vma->vm_ops = &blkdev_dax_vm_ops; - vma->vm_flags |= VM_MIXEDMAP | VM_HUGEPAGE; - } else { - vma->vm_ops = &blkdev_default_vm_ops; - } - - return 0; -} -#else -#define blkdev_mmap generic_file_mmap -#endif - const struct file_operations def_blk_fops = { .open = blkdev_open, .release = blkdev_close, .llseek = block_llseek, .read_iter = blkdev_read_iter, .write_iter = blkdev_write_iter, - .mmap = blkdev_mmap, + .mmap = generic_file_mmap, .fsync = blkdev_fsync, .unlocked_ioctl = block_ioctl, #ifdef CONFIG_COMPAT diff --git a/fs/btrfs/acl.c b/fs/btrfs/acl.c index 6d263bb1621c..67a607709d4f 100644 --- a/fs/btrfs/acl.c +++ b/fs/btrfs/acl.c @@ -63,9 +63,6 @@ struct posix_acl *btrfs_get_acl(struct inode *inode, int type) } kfree(value); - if (!IS_ERR(acl)) - set_cached_acl(inode, type, acl); - return acl; } diff --git a/fs/btrfs/backref.c b/fs/btrfs/backref.c index 80e8472d618b..d3090187fd76 100644 --- a/fs/btrfs/backref.c +++ b/fs/btrfs/backref.c @@ -1991,7 +1991,7 @@ struct inode_fs_paths *init_ipath(s32 total_bytes, struct btrfs_root *fs_root, ifp = kmalloc(sizeof(*ifp), GFP_NOFS); if (!ifp) { - kfree(fspath); + vfree(fspath); return ERR_PTR(-ENOMEM); } diff --git a/fs/btrfs/btrfs_inode.h b/fs/btrfs/btrfs_inode.h index 61205e3bbefa..1da5753d886d 100644 --- a/fs/btrfs/btrfs_inode.h +++ b/fs/btrfs/btrfs_inode.h @@ -196,6 +196,16 @@ struct btrfs_inode { struct list_head delayed_iput; long delayed_iput_count; + /* + * To avoid races between lockless (i_mutex not held) direct IO writes + * and concurrent fsync requests. Direct IO writes must acquire read + * access on this semaphore for creating an extent map and its + * corresponding ordered extent. The fast fsync path must acquire write + * access on this semaphore before it collects ordered extents and + * extent maps. + */ + struct rw_semaphore dio_sem; + struct inode vfs_inode; }; diff --git a/fs/btrfs/compression.c b/fs/btrfs/compression.c index ff61a41ac90b..658c39b70fba 100644 --- a/fs/btrfs/compression.c +++ b/fs/btrfs/compression.c @@ -743,8 +743,11 @@ out: static struct { struct list_head idle_ws; spinlock_t ws_lock; - int num_ws; - atomic_t alloc_ws; + /* Number of free workspaces */ + int free_ws; + /* Total number of allocated workspaces */ + atomic_t total_ws; + /* Waiters for a free workspace */ wait_queue_head_t ws_wait; } btrfs_comp_ws[BTRFS_COMPRESS_TYPES]; @@ -758,16 +761,34 @@ void __init btrfs_init_compress(void) int i; for (i = 0; i < BTRFS_COMPRESS_TYPES; i++) { + struct list_head *workspace; + INIT_LIST_HEAD(&btrfs_comp_ws[i].idle_ws); spin_lock_init(&btrfs_comp_ws[i].ws_lock); - atomic_set(&btrfs_comp_ws[i].alloc_ws, 0); + atomic_set(&btrfs_comp_ws[i].total_ws, 0); init_waitqueue_head(&btrfs_comp_ws[i].ws_wait); + + /* + * Preallocate one workspace for each compression type so + * we can guarantee forward progress in the worst case + */ + workspace = btrfs_compress_op[i]->alloc_workspace(); + if (IS_ERR(workspace)) { + printk(KERN_WARNING + "BTRFS: cannot preallocate compression workspace, will try later"); + } else { + atomic_set(&btrfs_comp_ws[i].total_ws, 1); + btrfs_comp_ws[i].free_ws = 1; + list_add(workspace, &btrfs_comp_ws[i].idle_ws); + } } } /* - * this finds an available workspace or allocates a new one - * ERR_PTR is returned if things go bad. + * This finds an available workspace or allocates a new one. + * If it's not possible to allocate a new one, waits until there's one. + * Preallocation makes a forward progress guarantees and we do not return + * errors. */ static struct list_head *find_workspace(int type) { @@ -777,36 +798,58 @@ static struct list_head *find_workspace(int type) struct list_head *idle_ws = &btrfs_comp_ws[idx].idle_ws; spinlock_t *ws_lock = &btrfs_comp_ws[idx].ws_lock; - atomic_t *alloc_ws = &btrfs_comp_ws[idx].alloc_ws; + atomic_t *total_ws = &btrfs_comp_ws[idx].total_ws; wait_queue_head_t *ws_wait = &btrfs_comp_ws[idx].ws_wait; - int *num_ws = &btrfs_comp_ws[idx].num_ws; + int *free_ws = &btrfs_comp_ws[idx].free_ws; again: spin_lock(ws_lock); if (!list_empty(idle_ws)) { workspace = idle_ws->next; list_del(workspace); - (*num_ws)--; + (*free_ws)--; spin_unlock(ws_lock); return workspace; } - if (atomic_read(alloc_ws) > cpus) { + if (atomic_read(total_ws) > cpus) { DEFINE_WAIT(wait); spin_unlock(ws_lock); prepare_to_wait(ws_wait, &wait, TASK_UNINTERRUPTIBLE); - if (atomic_read(alloc_ws) > cpus && !*num_ws) + if (atomic_read(total_ws) > cpus && !*free_ws) schedule(); finish_wait(ws_wait, &wait); goto again; } - atomic_inc(alloc_ws); + atomic_inc(total_ws); spin_unlock(ws_lock); workspace = btrfs_compress_op[idx]->alloc_workspace(); if (IS_ERR(workspace)) { - atomic_dec(alloc_ws); + atomic_dec(total_ws); wake_up(ws_wait); + + /* + * Do not return the error but go back to waiting. There's a + * workspace preallocated for each type and the compression + * time is bounded so we get to a workspace eventually. This + * makes our caller's life easier. + * + * To prevent silent and low-probability deadlocks (when the + * initial preallocation fails), check if there are any + * workspaces at all. + */ + if (atomic_read(total_ws) == 0) { + static DEFINE_RATELIMIT_STATE(_rs, + /* once per minute */ 60 * HZ, + /* no burst */ 1); + + if (__ratelimit(&_rs)) { + printk(KERN_WARNING + "no compression workspaces, low memory, retrying"); + } + } + goto again; } return workspace; } @@ -820,21 +863,21 @@ static void free_workspace(int type, struct list_head *workspace) int idx = type - 1; struct list_head *idle_ws = &btrfs_comp_ws[idx].idle_ws; spinlock_t *ws_lock = &btrfs_comp_ws[idx].ws_lock; - atomic_t *alloc_ws = &btrfs_comp_ws[idx].alloc_ws; + atomic_t *total_ws = &btrfs_comp_ws[idx].total_ws; wait_queue_head_t *ws_wait = &btrfs_comp_ws[idx].ws_wait; - int *num_ws = &btrfs_comp_ws[idx].num_ws; + int *free_ws = &btrfs_comp_ws[idx].free_ws; spin_lock(ws_lock); - if (*num_ws < num_online_cpus()) { + if (*free_ws < num_online_cpus()) { list_add(workspace, idle_ws); - (*num_ws)++; + (*free_ws)++; spin_unlock(ws_lock); goto wake; } spin_unlock(ws_lock); btrfs_compress_op[idx]->free_workspace(workspace); - atomic_dec(alloc_ws); + atomic_dec(total_ws); wake: /* * Make sure counter is updated before we wake up waiters. @@ -857,7 +900,7 @@ static void free_workspaces(void) workspace = btrfs_comp_ws[i].idle_ws.next; list_del(workspace); btrfs_compress_op[i]->free_workspace(workspace); - atomic_dec(&btrfs_comp_ws[i].alloc_ws); + atomic_dec(&btrfs_comp_ws[i].total_ws); } } } @@ -894,8 +937,6 @@ int btrfs_compress_pages(int type, struct address_space *mapping, int ret; workspace = find_workspace(type); - if (IS_ERR(workspace)) - return PTR_ERR(workspace); ret = btrfs_compress_op[type-1]->compress_pages(workspace, mapping, start, len, pages, @@ -930,8 +971,6 @@ static int btrfs_decompress_biovec(int type, struct page **pages_in, int ret; workspace = find_workspace(type); - if (IS_ERR(workspace)) - return PTR_ERR(workspace); ret = btrfs_compress_op[type-1]->decompress_biovec(workspace, pages_in, disk_start, @@ -952,8 +991,6 @@ int btrfs_decompress(int type, unsigned char *data_in, struct page *dest_page, int ret; workspace = find_workspace(type); - if (IS_ERR(workspace)) - return PTR_ERR(workspace); ret = btrfs_compress_op[type-1]->decompress(workspace, data_in, dest_page, start_byte, diff --git a/fs/btrfs/ctree.c b/fs/btrfs/ctree.c index ec7928a27aaa..decd0a3f5d61 100644 --- a/fs/btrfs/ctree.c +++ b/fs/btrfs/ctree.c @@ -1011,7 +1011,7 @@ static noinline int update_ref_for_cow(struct btrfs_trans_handle *trans, return ret; if (refs == 0) { ret = -EROFS; - btrfs_std_error(root->fs_info, ret, NULL); + btrfs_handle_fs_error(root->fs_info, ret, NULL); return ret; } } else { @@ -1928,7 +1928,7 @@ static noinline int balance_level(struct btrfs_trans_handle *trans, child = read_node_slot(root, mid, 0); if (!child) { ret = -EROFS; - btrfs_std_error(root->fs_info, ret, NULL); + btrfs_handle_fs_error(root->fs_info, ret, NULL); goto enospc; } @@ -2031,7 +2031,7 @@ static noinline int balance_level(struct btrfs_trans_handle *trans, */ if (!left) { ret = -EROFS; - btrfs_std_error(root->fs_info, ret, NULL); + btrfs_handle_fs_error(root->fs_info, ret, NULL); goto enospc; } wret = balance_node_right(trans, root, mid, left); diff --git a/fs/btrfs/ctree.h b/fs/btrfs/ctree.h index 84a6a5b3384a..ddcc58f03c79 100644 --- a/fs/btrfs/ctree.h +++ b/fs/btrfs/ctree.h @@ -33,6 +33,7 @@ #include <asm/kmap_types.h> #include <linux/pagemap.h> #include <linux/btrfs.h> +#include <linux/btrfs_tree.h> #include <linux/workqueue.h> #include <linux/security.h> #include <linux/sizes.h> @@ -64,98 +65,6 @@ struct btrfs_ordered_sum; #define BTRFS_COMPAT_EXTENT_TREE_V0 -/* holds pointers to all of the tree roots */ -#define BTRFS_ROOT_TREE_OBJECTID 1ULL - -/* stores information about which extents are in use, and reference counts */ -#define BTRFS_EXTENT_TREE_OBJECTID 2ULL - -/* - * chunk tree stores translations from logical -> physical block numbering - * the super block points to the chunk tree - */ -#define BTRFS_CHUNK_TREE_OBJECTID 3ULL - -/* - * stores information about which areas of a given device are in use. - * one per device. The tree of tree roots points to the device tree - */ -#define BTRFS_DEV_TREE_OBJECTID 4ULL - -/* one per subvolume, storing files and directories */ -#define BTRFS_FS_TREE_OBJECTID 5ULL - -/* directory objectid inside the root tree */ -#define BTRFS_ROOT_TREE_DIR_OBJECTID 6ULL - -/* holds checksums of all the data extents */ -#define BTRFS_CSUM_TREE_OBJECTID 7ULL - -/* holds quota configuration and tracking */ -#define BTRFS_QUOTA_TREE_OBJECTID 8ULL - -/* for storing items that use the BTRFS_UUID_KEY* types */ -#define BTRFS_UUID_TREE_OBJECTID 9ULL - -/* tracks free space in block groups. */ -#define BTRFS_FREE_SPACE_TREE_OBJECTID 10ULL - -/* device stats in the device tree */ -#define BTRFS_DEV_STATS_OBJECTID 0ULL - -/* for storing balance parameters in the root tree */ -#define BTRFS_BALANCE_OBJECTID -4ULL - -/* orhpan objectid for tracking unlinked/truncated files */ -#define BTRFS_ORPHAN_OBJECTID -5ULL - -/* does write ahead logging to speed up fsyncs */ -#define BTRFS_TREE_LOG_OBJECTID -6ULL -#define BTRFS_TREE_LOG_FIXUP_OBJECTID -7ULL - -/* for space balancing */ -#define BTRFS_TREE_RELOC_OBJECTID -8ULL -#define BTRFS_DATA_RELOC_TREE_OBJECTID -9ULL - -/* - * extent checksums all have this objectid - * this allows them to share the logging tree - * for fsyncs - */ -#define BTRFS_EXTENT_CSUM_OBJECTID -10ULL - -/* For storing free space cache */ -#define BTRFS_FREE_SPACE_OBJECTID -11ULL - -/* - * The inode number assigned to the special inode for storing - * free ino cache - */ -#define BTRFS_FREE_INO_OBJECTID -12ULL - -/* dummy objectid represents multiple objectids */ -#define BTRFS_MULTIPLE_OBJECTIDS -255ULL - -/* - * All files have objectids in this range. - */ -#define BTRFS_FIRST_FREE_OBJECTID 256ULL -#define BTRFS_LAST_FREE_OBJECTID -256ULL -#define BTRFS_FIRST_CHUNK_TREE_OBJECTID 256ULL - - -/* - * the device items go into the chunk tree. The key is in the form - * [ 1 BTRFS_DEV_ITEM_KEY device_id ] - */ -#define BTRFS_DEV_ITEMS_OBJECTID 1ULL - -#define BTRFS_BTREE_INODE_OBJECTID 1 - -#define BTRFS_EMPTY_SUBVOL_DIR_OBJECTID 2 - -#define BTRFS_DEV_REPLACE_DEVID 0ULL - /* * the max metadata block size. This limit is somewhat artificial, * but the memmove costs go through the roof for larger blocks. @@ -175,12 +84,6 @@ struct btrfs_ordered_sum; */ #define BTRFS_LINK_MAX 65535U -/* 32 bytes in various csum fields */ -#define BTRFS_CSUM_SIZE 32 - -/* csum types */ -#define BTRFS_CSUM_TYPE_CRC32 0 - static const int btrfs_csum_sizes[] = { 4 }; /* four bytes for CRC32 */ @@ -189,17 +92,6 @@ static const int btrfs_csum_sizes[] = { 4 }; /* spefic to btrfs_map_block(), therefore not in include/linux/blk_types.h */ #define REQ_GET_READ_MIRRORS (1 << 30) -#define BTRFS_FT_UNKNOWN 0 -#define BTRFS_FT_REG_FILE 1 -#define BTRFS_FT_DIR 2 -#define BTRFS_FT_CHRDEV 3 -#define BTRFS_FT_BLKDEV 4 -#define BTRFS_FT_FIFO 5 -#define BTRFS_FT_SOCK 6 -#define BTRFS_FT_SYMLINK 7 -#define BTRFS_FT_XATTR 8 -#define BTRFS_FT_MAX 9 - /* ioprio of readahead is set to idle */ #define BTRFS_IOPRIO_READA (IOPRIO_PRIO_VALUE(IOPRIO_CLASS_IDLE, 0)) @@ -207,138 +99,10 @@ static const int btrfs_csum_sizes[] = { 4 }; #define BTRFS_MAX_EXTENT_SIZE SZ_128M -/* - * The key defines the order in the tree, and so it also defines (optimal) - * block layout. - * - * objectid corresponds to the inode number. - * - * type tells us things about the object, and is a kind of stream selector. - * so for a given inode, keys with type of 1 might refer to the inode data, - * type of 2 may point to file data in the btree and type == 3 may point to - * extents. - * - * offset is the starting byte offset for this key in the stream. - * - * btrfs_disk_key is in disk byte order. struct btrfs_key is always - * in cpu native order. Otherwise they are identical and their sizes - * should be the same (ie both packed) - */ -struct btrfs_disk_key { - __le64 objectid; - u8 type; - __le64 offset; -} __attribute__ ((__packed__)); - -struct btrfs_key { - u64 objectid; - u8 type; - u64 offset; -} __attribute__ ((__packed__)); - struct btrfs_mapping_tree { struct extent_map_tree map_tree; }; -struct btrfs_dev_item { - /* the internal btrfs device id */ - __le64 devid; - - /* size of the device */ - __le64 total_bytes; - - /* bytes used */ - __le64 bytes_used; - - /* optimal io alignment for this device */ - __le32 io_align; - - /* optimal io width for this device */ - __le32 io_width; - - /* minimal io size for this device */ - __le32 sector_size; - - /* type and info about this device */ - __le64 type; - - /* expected generation for this device */ - __le64 generation; - - /* - * starting byte of this partition on the device, - * to allow for stripe alignment in the future - */ - __le64 start_offset; - - /* grouping information for allocation decisions */ - __le32 dev_group; - - /* seek speed 0-100 where 100 is fastest */ - u8 seek_speed; - - /* bandwidth 0-100 where 100 is fastest */ - u8 bandwidth; - - /* btrfs generated uuid for this device */ - u8 uuid[BTRFS_UUID_SIZE]; - - /* uuid of FS who owns this device */ - u8 fsid[BTRFS_UUID_SIZE]; -} __attribute__ ((__packed__)); - -struct btrfs_stripe { - __le64 devid; - __le64 offset; - u8 dev_uuid[BTRFS_UUID_SIZE]; -} __attribute__ ((__packed__)); - -struct btrfs_chunk { - /* size of this chunk in bytes */ - __le64 length; - - /* objectid of the root referencing this chunk */ - __le64 owner; - - __le64 stripe_len; - __le64 type; - - /* optimal io alignment for this chunk */ - __le32 io_align; - - /* optimal io width for this chunk */ - __le32 io_width; - - /* minimal io size for this chunk */ - __le32 sector_size; - - /* 2^16 stripes is quite a lot, a second limit is the size of a single - * item in the btree - */ - __le16 num_stripes; - - /* sub stripes only matter for raid10 */ - __le16 sub_stripes; - struct btrfs_stripe stripe; - /* additional stripes go here */ -} __attribute__ ((__packed__)); - -#define BTRFS_FREE_SPACE_EXTENT 1 -#define BTRFS_FREE_SPACE_BITMAP 2 - -struct btrfs_free_space_entry { - __le64 offset; - __le64 bytes; - u8 type; -} __attribute__ ((__packed__)); - -struct btrfs_free_space_header { - struct btrfs_disk_key location; - __le64 generation; - __le64 num_entries; - __le64 num_bitmaps; -} __attribute__ ((__packed__)); - static inline unsigned long btrfs_chunk_item_size(int num_stripes) { BUG_ON(num_stripes == 0); @@ -346,9 +110,6 @@ static inline unsigned long btrfs_chunk_item_size(int num_stripes) sizeof(struct btrfs_stripe) * (num_stripes - 1); } -#define BTRFS_HEADER_FLAG_WRITTEN (1ULL << 0) -#define BTRFS_HEADER_FLAG_RELOC (1ULL << 1) - /* * File system states */ @@ -357,13 +118,6 @@ static inline unsigned long btrfs_chunk_item_size(int num_stripes) #define BTRFS_FS_STATE_TRANS_ABORTED 2 #define BTRFS_FS_STATE_DEV_REPLACING 3 -/* Super block flags */ -/* Errors detected */ -#define BTRFS_SUPER_FLAG_ERROR (1ULL << 2) - -#define BTRFS_SUPER_FLAG_SEEDING (1ULL << 32) -#define BTRFS_SUPER_FLAG_METADUMP (1ULL << 33) - #define BTRFS_BACKREF_REV_MAX 256 #define BTRFS_BACKREF_REV_SHIFT 56 #define BTRFS_BACKREF_REV_MASK (((u64)BTRFS_BACKREF_REV_MAX - 1) << \ @@ -410,7 +164,6 @@ struct btrfs_header { * room to translate 14 chunks with 3 stripes each. */ #define BTRFS_SYSTEM_CHUNK_ARRAY_SIZE 2048 -#define BTRFS_LABEL_SIZE 256 /* * just in case we somehow lose the roots and are not able to mount, @@ -507,31 +260,6 @@ struct btrfs_super_block { * Compat flags that we support. If any incompat flags are set other than the * ones specified below then we will fail to mount */ -#define BTRFS_FEATURE_COMPAT_RO_FREE_SPACE_TREE (1ULL << 0) - -#define BTRFS_FEATURE_INCOMPAT_MIXED_BACKREF (1ULL << 0) -#define BTRFS_FEATURE_INCOMPAT_DEFAULT_SUBVOL (1ULL << 1) -#define BTRFS_FEATURE_INCOMPAT_MIXED_GROUPS (1ULL << 2) -#define BTRFS_FEATURE_INCOMPAT_COMPRESS_LZO (1ULL << 3) -/* - * some patches floated around with a second compression method - * lets save that incompat here for when they do get in - * Note we don't actually support it, we're just reserving the - * number - */ -#define BTRFS_FEATURE_INCOMPAT_COMPRESS_LZOv2 (1ULL << 4) - -/* - * older kernels tried to do bigger metadata blocks, but the - * code was pretty buggy. Lets not let them try anymore. - */ -#define BTRFS_FEATURE_INCOMPAT_BIG_METADATA (1ULL << 5) - -#define BTRFS_FEATURE_INCOMPAT_EXTENDED_IREF (1ULL << 6) -#define BTRFS_FEATURE_INCOMPAT_RAID56 (1ULL << 7) -#define BTRFS_FEATURE_INCOMPAT_SKINNY_METADATA (1ULL << 8) -#define BTRFS_FEATURE_INCOMPAT_NO_HOLES (1ULL << 9) - #define BTRFS_FEATURE_COMPAT_SUPP 0ULL #define BTRFS_FEATURE_COMPAT_SAFE_SET 0ULL #define BTRFS_FEATURE_COMPAT_SAFE_CLEAR 0ULL @@ -624,357 +352,8 @@ struct btrfs_path { unsigned int need_commit_sem:1; unsigned int skip_release_on_error:1; }; - -/* - * items in the extent btree are used to record the objectid of the - * owner of the block and the number of references - */ - -struct btrfs_extent_item { - __le64 refs; - __le64 generation; - __le64 flags; -} __attribute__ ((__packed__)); - -struct btrfs_extent_item_v0 { - __le32 refs; -} __attribute__ ((__packed__)); - #define BTRFS_MAX_EXTENT_ITEM_SIZE(r) ((BTRFS_LEAF_DATA_SIZE(r) >> 4) - \ sizeof(struct btrfs_item)) - -#define BTRFS_EXTENT_FLAG_DATA (1ULL << 0) -#define BTRFS_EXTENT_FLAG_TREE_BLOCK (1ULL << 1) - -/* following flags only apply to tree blocks */ - -/* use full backrefs for extent pointers in the block */ -#define BTRFS_BLOCK_FLAG_FULL_BACKREF (1ULL << 8) - -/* - * this flag is only used internally by scrub and may be changed at any time - * it is only declared here to avoid collisions - */ -#define BTRFS_EXTENT_FLAG_SUPER (1ULL << 48) - -struct btrfs_tree_block_info { - struct btrfs_disk_key key; - u8 level; -} __attribute__ ((__packed__)); - -struct btrfs_extent_data_ref { - __le64 root; - __le64 objectid; - __le64 offset; - __le32 count; -} __attribute__ ((__packed__)); - -struct btrfs_shared_data_ref { - __le32 count; -} __attribute__ ((__packed__)); - -struct btrfs_extent_inline_ref { - u8 type; - __le64 offset; -} __attribute__ ((__packed__)); - -/* old style backrefs item */ -struct btrfs_extent_ref_v0 { - __le64 root; - __le64 generation; - __le64 objectid; - __le32 count; -} __attribute__ ((__packed__)); - - -/* dev extents record free space on individual devices. The owner - * field points back to the chunk allocation mapping tree that allocated - * the extent. The chunk tree uuid field is a way to double check the owner - */ -struct btrfs_dev_extent { - __le64 chunk_tree; - __le64 chunk_objectid; - __le64 chunk_offset; - __le64 length; - u8 chunk_tree_uuid[BTRFS_UUID_SIZE]; -} __attribute__ ((__packed__)); - -struct btrfs_inode_ref { - __le64 index; - __le16 name_len; - /* name goes here */ -} __attribute__ ((__packed__)); - -struct btrfs_inode_extref { - __le64 parent_objectid; - __le64 index; - __le16 name_len; - __u8 name[0]; - /* name goes here */ -} __attribute__ ((__packed__)); - -struct btrfs_timespec { - __le64 sec; - __le32 nsec; -} __attribute__ ((__packed__)); - -struct btrfs_inode_item { - /* nfs style generation number */ - __le64 generation; - /* transid that last touched this inode */ - __le64 transid; - __le64 size; - __le64 nbytes; - __le64 block_group; - __le32 nlink; - __le32 uid; - __le32 gid; - __le32 mode; - __le64 rdev; - __le64 flags; - - /* modification sequence number for NFS */ - __le64 sequence; - - /* - * a little future expansion, for more than this we can - * just grow the inode item and version it - */ - __le64 reserved[4]; - struct btrfs_timespec atime; - struct btrfs_timespec ctime; - struct btrfs_timespec mtime; - struct btrfs_timespec otime; -} __attribute__ ((__packed__)); - -struct btrfs_dir_log_item { - __le64 end; -} __attribute__ ((__packed__)); - -struct btrfs_dir_item { - struct btrfs_disk_key location; - __le64 transid; - __le16 data_len; - __le16 name_len; - u8 type; -} __attribute__ ((__packed__)); - -#define BTRFS_ROOT_SUBVOL_RDONLY (1ULL << 0) - -/* - * Internal in-memory flag that a subvolume has been marked for deletion but - * still visible as a directory - */ -#define BTRFS_ROOT_SUBVOL_DEAD (1ULL << 48) - -struct btrfs_root_item { - struct btrfs_inode_item inode; - __le64 generation; - __le64 root_dirid; - __le64 bytenr; - __le64 byte_limit; - __le64 bytes_used; - __le64 last_snapshot; - __le64 flags; - __le32 refs; - struct btrfs_disk_key drop_progress; - u8 drop_level; - u8 level; - - /* - * The following fields appear after subvol_uuids+subvol_times - * were introduced. - */ - - /* - * This generation number is used to test if the new fields are valid - * and up to date while reading the root item. Every time the root item - * is written out, the "generation" field is copied into this field. If - * anyone ever mounted the fs with an older kernel, we will have - * mismatching generation values here and thus must invalidate the - * new fields. See btrfs_update_root and btrfs_find_last_root for - * details. - * the offset of generation_v2 is also used as the start for the memset - * when invalidating the fields. - */ - __le64 generation_v2; - u8 uuid[BTRFS_UUID_SIZE]; - u8 parent_uuid[BTRFS_UUID_SIZE]; - u8 received_uuid[BTRFS_UUID_SIZE]; - __le64 ctransid; /* updated when an inode changes */ - __le64 otransid; /* trans when created */ - __le64 stransid; /* trans when sent. non-zero for received subvol */ - __le64 rtransid; /* trans when received. non-zero for received subvol */ - struct btrfs_timespec ctime; - struct btrfs_timespec otime; - struct btrfs_timespec stime; - struct btrfs_timespec rtime; - __le64 reserved[8]; /* for future */ -} __attribute__ ((__packed__)); - -/* - * this is used for both forward and backward root refs - */ -struct btrfs_root_ref { - __le64 dirid; - __le64 sequence; - __le16 name_len; -} __attribute__ ((__packed__)); - -struct btrfs_disk_balance_args { - /* - * profiles to operate on, single is denoted by - * BTRFS_AVAIL_ALLOC_BIT_SINGLE - */ - __le64 profiles; - - /* - * usage filter - * BTRFS_BALANCE_ARGS_USAGE with a single value means '0..N' - * BTRFS_BALANCE_ARGS_USAGE_RANGE - range syntax, min..max - */ - union { - __le64 usage; - struct { - __le32 usage_min; - __le32 usage_max; - }; - }; - - /* devid filter */ - __le64 devid; - - /* devid subset filter [pstart..pend) */ - __le64 pstart; - __le64 pend; - - /* btrfs virtual address space subset filter [vstart..vend) */ - __le64 vstart; - __le64 vend; - - /* - * profile to convert to, single is denoted by - * BTRFS_AVAIL_ALLOC_BIT_SINGLE - */ - __le64 target; - - /* BTRFS_BALANCE_ARGS_* */ - __le64 flags; - - /* - * BTRFS_BALANCE_ARGS_LIMIT with value 'limit' - * BTRFS_BALANCE_ARGS_LIMIT_RANGE - the extend version can use minimum - * and maximum - */ - union { - __le64 limit; - struct { - __le32 limit_min; - __le32 limit_max; - }; - }; - - /* - * Process chunks that cross stripes_min..stripes_max devices, - * BTRFS_BALANCE_ARGS_STRIPES_RANGE - */ - __le32 stripes_min; - __le32 stripes_max; - - __le64 unused[6]; -} __attribute__ ((__packed__)); - -/* - * store balance parameters to disk so that balance can be properly - * resumed after crash or unmount - */ -struct btrfs_balance_item { - /* BTRFS_BALANCE_* */ - __le64 flags; - - struct btrfs_disk_balance_args data; - struct btrfs_disk_balance_args meta; - struct btrfs_disk_balance_args sys; - - __le64 unused[4]; -} __attribute__ ((__packed__)); - -#define BTRFS_FILE_EXTENT_INLINE 0 -#define BTRFS_FILE_EXTENT_REG 1 -#define BTRFS_FILE_EXTENT_PREALLOC 2 - -struct btrfs_file_extent_item { - /* - * transaction id that created this extent - */ - __le64 generation; - /* - * max number of bytes to hold this extent in ram - * when we split a compressed extent we can't know how big - * each of the resulting pieces will be. So, this is - * an upper limit on the size of the extent in ram instead of - * an exact limit. - */ - __le64 ram_bytes; - - /* - * 32 bits for the various ways we might encode the data, - * including compression and encryption. If any of these - * are set to something a given disk format doesn't understand - * it is treated like an incompat flag for reading and writing, - * but not for stat. - */ - u8 compression; - u8 encryption; - __le16 other_encoding; /* spare for later use */ - - /* are we inline data or a real extent? */ - u8 type; - - /* - * disk space consumed by the extent, checksum blocks are included - * in these numbers - * - * At this offset in the structure, the inline extent data start. - */ - __le64 disk_bytenr; - __le64 disk_num_bytes; - /* - * the logical offset in file blocks (no csums) - * this extent record is for. This allows a file extent to point - * into the middle of an existing extent on disk, sharing it - * between two snapshots (useful if some bytes in the middle of the - * extent have changed - */ - __le64 offset; - /* - * the logical number of file blocks (no csums included). This - * always reflects the size uncompressed and without encoding. - */ - __le64 num_bytes; - -} __attribute__ ((__packed__)); - -struct btrfs_csum_item { - u8 csum; -} __attribute__ ((__packed__)); - -struct btrfs_dev_stats_item { - /* - * grow this item struct at the end for future enhancements and keep - * the existing values unchanged - */ - __le64 values[BTRFS_DEV_STAT_VALUES_MAX]; -} __attribute__ ((__packed__)); - -#define BTRFS_DEV_REPLACE_ITEM_CONT_READING_FROM_SRCDEV_MODE_ALWAYS 0 -#define BTRFS_DEV_REPLACE_ITEM_CONT_READING_FROM_SRCDEV_MODE_AVOID 1 -#define BTRFS_DEV_REPLACE_ITEM_STATE_NEVER_STARTED 0 -#define BTRFS_DEV_REPLACE_ITEM_STATE_STARTED 1 -#define BTRFS_DEV_REPLACE_ITEM_STATE_SUSPENDED 2 -#define BTRFS_DEV_REPLACE_ITEM_STATE_FINISHED 3 -#define BTRFS_DEV_REPLACE_ITEM_STATE_CANCELED 4 - struct btrfs_dev_replace { u64 replace_state; /* see #define above */ u64 time_started; /* seconds since 1-Jan-1970 */ @@ -1005,175 +384,6 @@ struct btrfs_dev_replace { struct btrfs_scrub_progress scrub_progress; }; -struct btrfs_dev_replace_item { - /* - * grow this item struct at the end for future enhancements and keep - * the existing values unchanged - */ - __le64 src_devid; - __le64 cursor_left; - __le64 cursor_right; - __le64 cont_reading_from_srcdev_mode; - - __le64 replace_state; - __le64 time_started; - __le64 time_stopped; - __le64 num_write_errors; - __le64 num_uncorrectable_read_errors; -} __attribute__ ((__packed__)); - -/* different types of block groups (and chunks) */ -#define BTRFS_BLOCK_GROUP_DATA (1ULL << 0) -#define BTRFS_BLOCK_GROUP_SYSTEM (1ULL << 1) -#define BTRFS_BLOCK_GROUP_METADATA (1ULL << 2) -#define BTRFS_BLOCK_GROUP_RAID0 (1ULL << 3) -#define BTRFS_BLOCK_GROUP_RAID1 (1ULL << 4) -#define BTRFS_BLOCK_GROUP_DUP (1ULL << 5) -#define BTRFS_BLOCK_GROUP_RAID10 (1ULL << 6) -#define BTRFS_BLOCK_GROUP_RAID5 (1ULL << 7) -#define BTRFS_BLOCK_GROUP_RAID6 (1ULL << 8) -#define BTRFS_BLOCK_GROUP_RESERVED (BTRFS_AVAIL_ALLOC_BIT_SINGLE | \ - BTRFS_SPACE_INFO_GLOBAL_RSV) - -enum btrfs_raid_types { - BTRFS_RAID_RAID10, - BTRFS_RAID_RAID1, - BTRFS_RAID_DUP, - BTRFS_RAID_RAID0, - BTRFS_RAID_SINGLE, - BTRFS_RAID_RAID5, - BTRFS_RAID_RAID6, - BTRFS_NR_RAID_TYPES -}; - -#define BTRFS_BLOCK_GROUP_TYPE_MASK (BTRFS_BLOCK_GROUP_DATA | \ - BTRFS_BLOCK_GROUP_SYSTEM | \ - BTRFS_BLOCK_GROUP_METADATA) - -#define BTRFS_BLOCK_GROUP_PROFILE_MASK (BTRFS_BLOCK_GROUP_RAID0 | \ - BTRFS_BLOCK_GROUP_RAID1 | \ - BTRFS_BLOCK_GROUP_RAID5 | \ - BTRFS_BLOCK_GROUP_RAID6 | \ - BTRFS_BLOCK_GROUP_DUP | \ - BTRFS_BLOCK_GROUP_RAID10) -#define BTRFS_BLOCK_GROUP_RAID56_MASK (BTRFS_BLOCK_GROUP_RAID5 | \ - BTRFS_BLOCK_GROUP_RAID6) - -/* - * We need a bit for restriper to be able to tell when chunks of type - * SINGLE are available. This "extended" profile format is used in - * fs_info->avail_*_alloc_bits (in-memory) and balance item fields - * (on-disk). The corresponding on-disk bit in chunk.type is reserved - * to avoid remappings between two formats in future. - */ -#define BTRFS_AVAIL_ALLOC_BIT_SINGLE (1ULL << 48) - -/* - * A fake block group type that is used to communicate global block reserve - * size to userspace via the SPACE_INFO ioctl. - */ -#define BTRFS_SPACE_INFO_GLOBAL_RSV (1ULL << 49) - -#define BTRFS_EXTENDED_PROFILE_MASK (BTRFS_BLOCK_GROUP_PROFILE_MASK | \ - BTRFS_AVAIL_ALLOC_BIT_SINGLE) - -static inline u64 chunk_to_extended(u64 flags) -{ - if ((flags & BTRFS_BLOCK_GROUP_PROFILE_MASK) == 0) - flags |= BTRFS_AVAIL_ALLOC_BIT_SINGLE; - - return flags; -} -static inline u64 extended_to_chunk(u64 flags) -{ - return flags & ~BTRFS_AVAIL_ALLOC_BIT_SINGLE; -} - -struct btrfs_block_group_item { - __le64 used; - __le64 chunk_objectid; - __le64 flags; -} __attribute__ ((__packed__)); - -struct btrfs_free_space_info { - __le32 extent_count; - __le32 flags; -} __attribute__ ((__packed__)); - -#define BTRFS_FREE_SPACE_USING_BITMAPS (1ULL << 0) - -#define BTRFS_QGROUP_LEVEL_SHIFT 48 -static inline u64 btrfs_qgroup_level(u64 qgroupid) -{ - return qgroupid >> BTRFS_QGROUP_LEVEL_SHIFT; -} - -/* - * is subvolume quota turned on? - */ -#define BTRFS_QGROUP_STATUS_FLAG_ON (1ULL << 0) -/* - * RESCAN is set during the initialization phase - */ -#define BTRFS_QGROUP_STATUS_FLAG_RESCAN (1ULL << 1) -/* - * Some qgroup entries are known to be out of date, - * either because the configuration has changed in a way that - * makes a rescan necessary, or because the fs has been mounted - * with a non-qgroup-aware version. - * Turning qouta off and on again makes it inconsistent, too. - */ -#define BTRFS_QGROUP_STATUS_FLAG_INCONSISTENT (1ULL << 2) - -#define BTRFS_QGROUP_STATUS_VERSION 1 - -struct btrfs_qgroup_status_item { - __le64 version; - /* - * the generation is updated during every commit. As older - * versions of btrfs are not aware of qgroups, it will be - * possible to detect inconsistencies by checking the - * generation on mount time - */ - __le64 generation; - - /* flag definitions see above */ - __le64 flags; - - /* - * only used during scanning to record the progress - * of the scan. It contains a logical address - */ - __le64 rescan; -} __attribute__ ((__packed__)); - -struct btrfs_qgroup_info_item { - __le64 generation; - __le64 rfer; - __le64 rfer_cmpr; - __le64 excl; - __le64 excl_cmpr; -} __attribute__ ((__packed__)); - -/* flags definition for qgroup limits */ -#define BTRFS_QGROUP_LIMIT_MAX_RFER (1ULL << 0) -#define BTRFS_QGROUP_LIMIT_MAX_EXCL (1ULL << 1) -#define BTRFS_QGROUP_LIMIT_RSV_RFER (1ULL << 2) -#define BTRFS_QGROUP_LIMIT_RSV_EXCL (1ULL << 3) -#define BTRFS_QGROUP_LIMIT_RFER_CMPR (1ULL << 4) -#define BTRFS_QGROUP_LIMIT_EXCL_CMPR (1ULL << 5) - -struct btrfs_qgroup_limit_item { - /* - * only updated when any of the other values change - */ - __le64 flags; - __le64 max_rfer; - __le64 max_excl; - __le64 rsv_rfer; - __le64 rsv_excl; -} __attribute__ ((__packed__)); - /* For raid type sysfs entries */ struct raid_kobject { int raid_type; @@ -1408,6 +618,27 @@ struct btrfs_block_group_cache { struct btrfs_io_ctl io_ctl; + /* + * Incremented when doing extent allocations and holding a read lock + * on the space_info's groups_sem semaphore. + * Decremented when an ordered extent that represents an IO against this + * block group's range is created (after it's added to its inode's + * root's list of ordered extents) or immediately after the allocation + * if it's a metadata extent or fallocate extent (for these cases we + * don't create ordered extents). + */ + atomic_t reservations; + + /* + * Incremented while holding the spinlock *lock* by a task checking if + * it can perform a nocow write (incremented if the value for the *ro* + * field is 0). Decremented by such tasks once they create an ordered + * extent or before that if some error happens before reaching that step. + * This is to prevent races between block group relocation and nocow + * writes through direct IO. + */ + atomic_t nocow_writers; + /* Lock for free space tree operations. */ struct mutex free_space_lock; @@ -2026,228 +1257,6 @@ struct btrfs_root { atomic_t qgroup_meta_rsv; }; -struct btrfs_ioctl_defrag_range_args { - /* start of the defrag operation */ - __u64 start; - - /* number of bytes to defrag, use (u64)-1 to say all */ - __u64 len; - - /* - * flags for the operation, which can include turning - * on compression for this one defrag - */ - __u64 flags; - - /* - * any extent bigger than this will be considered - * already defragged. Use 0 to take the kernel default - * Use 1 to say every single extent must be rewritten - */ - __u32 extent_thresh; - - /* - * which compression method to use if turning on compression - * for this defrag operation. If unspecified, zlib will - * be used - */ - __u32 compress_type; - - /* spare for later */ - __u32 unused[4]; -}; - - -/* - * inode items have the data typically returned from stat and store other - * info about object characteristics. There is one for every file and dir in - * the FS - */ -#define BTRFS_INODE_ITEM_KEY 1 -#define BTRFS_INODE_REF_KEY 12 -#define BTRFS_INODE_EXTREF_KEY 13 -#define BTRFS_XATTR_ITEM_KEY 24 -#define BTRFS_ORPHAN_ITEM_KEY 48 -/* reserve 2-15 close to the inode for later flexibility */ - -/* - * dir items are the name -> inode pointers in a directory. There is one - * for every name in a directory. - */ -#define BTRFS_DIR_LOG_ITEM_KEY 60 -#define BTRFS_DIR_LOG_INDEX_KEY 72 -#define BTRFS_DIR_ITEM_KEY 84 -#define BTRFS_DIR_INDEX_KEY 96 -/* - * extent data is for file data - */ -#define BTRFS_EXTENT_DATA_KEY 108 - -/* - * extent csums are stored in a separate tree and hold csums for - * an entire extent on disk. - */ -#define BTRFS_EXTENT_CSUM_KEY 128 - -/* - * root items point to tree roots. They are typically in the root - * tree used by the super block to find all the other trees - */ -#define BTRFS_ROOT_ITEM_KEY 132 - -/* - * root backrefs tie subvols and snapshots to the directory entries that - * reference them - */ -#define BTRFS_ROOT_BACKREF_KEY 144 - -/* - * root refs make a fast index for listing all of the snapshots and - * subvolumes referenced by a given root. They point directly to the - * directory item in the root that references the subvol - */ -#define BTRFS_ROOT_REF_KEY 156 - -/* - * extent items are in the extent map tree. These record which blocks - * are used, and how many references there are to each block - */ -#define BTRFS_EXTENT_ITEM_KEY 168 - -/* - * The same as the BTRFS_EXTENT_ITEM_KEY, except it's metadata we already know - * the length, so we save the level in key->offset instead of the length. - */ -#define BTRFS_METADATA_ITEM_KEY 169 - -#define BTRFS_TREE_BLOCK_REF_KEY 176 - -#define BTRFS_EXTENT_DATA_REF_KEY 178 - -#define BTRFS_EXTENT_REF_V0_KEY 180 - -#define BTRFS_SHARED_BLOCK_REF_KEY 182 - -#define BTRFS_SHARED_DATA_REF_KEY 184 - -/* - * block groups give us hints into the extent allocation trees. Which - * blocks are free etc etc - */ -#define BTRFS_BLOCK_GROUP_ITEM_KEY 192 - -/* - * Every block group is represented in the free space tree by a free space info - * item, which stores some accounting information. It is keyed on - * (block_group_start, FREE_SPACE_INFO, block_group_length). - */ -#define BTRFS_FREE_SPACE_INFO_KEY 198 - -/* - * A free space extent tracks an extent of space that is free in a block group. - * It is keyed on (start, FREE_SPACE_EXTENT, length). - */ -#define BTRFS_FREE_SPACE_EXTENT_KEY 199 - -/* - * When a block group becomes very fragmented, we convert it to use bitmaps - * instead of extents. A free space bitmap is keyed on - * (start, FREE_SPACE_BITMAP, length); the corresponding item is a bitmap with - * (length / sectorsize) bits. - */ -#define BTRFS_FREE_SPACE_BITMAP_KEY 200 - -#define BTRFS_DEV_EXTENT_KEY 204 -#define BTRFS_DEV_ITEM_KEY 216 -#define BTRFS_CHUNK_ITEM_KEY 228 - -/* - * Records the overall state of the qgroups. - * There's only one instance of this key present, - * (0, BTRFS_QGROUP_STATUS_KEY, 0) - */ -#define BTRFS_QGROUP_STATUS_KEY 240 -/* - * Records the currently used space of the qgroup. - * One key per qgroup, (0, BTRFS_QGROUP_INFO_KEY, qgroupid). - */ -#define BTRFS_QGROUP_INFO_KEY 242 -/* - * Contains the user configured limits for the qgroup. - * One key per qgroup, (0, BTRFS_QGROUP_LIMIT_KEY, qgroupid). - */ -#define BTRFS_QGROUP_LIMIT_KEY 244 -/* - * Records the child-parent relationship of qgroups. For - * each relation, 2 keys are present: - * (childid, BTRFS_QGROUP_RELATION_KEY, parentid) - * (parentid, BTRFS_QGROUP_RELATION_KEY, childid) - */ -#define BTRFS_QGROUP_RELATION_KEY 246 - -/* - * Obsolete name, see BTRFS_TEMPORARY_ITEM_KEY. - */ -#define BTRFS_BALANCE_ITEM_KEY 248 - -/* - * The key type for tree items that are stored persistently, but do not need to - * exist for extended period of time. The items can exist in any tree. - * - * [subtype, BTRFS_TEMPORARY_ITEM_KEY, data] - * - * Existing items: - * - * - balance status item - * (BTRFS_BALANCE_OBJECTID, BTRFS_TEMPORARY_ITEM_KEY, 0) - */ -#define BTRFS_TEMPORARY_ITEM_KEY 248 - -/* - * Obsolete name, see BTRFS_PERSISTENT_ITEM_KEY - */ -#define BTRFS_DEV_STATS_KEY 249 - -/* - * The key type for tree items that are stored persistently and usually exist - * for a long period, eg. filesystem lifetime. The item kinds can be status - * information, stats or preference values. The item can exist in any tree. - * - * [subtype, BTRFS_PERSISTENT_ITEM_KEY, data] - * - * Existing items: - * - * - device statistics, store IO stats in the device tree, one key for all - * stats - * (BTRFS_DEV_STATS_OBJECTID, BTRFS_DEV_STATS_KEY, 0) - */ -#define BTRFS_PERSISTENT_ITEM_KEY 249 - -/* - * Persistantly stores the device replace state in the device tree. - * The key is built like this: (0, BTRFS_DEV_REPLACE_KEY, 0). - */ -#define BTRFS_DEV_REPLACE_KEY 250 - -/* - * Stores items that allow to quickly map UUIDs to something else. - * These items are part of the filesystem UUID tree. - * The key is built like this: - * (UUID_upper_64_bits, BTRFS_UUID_KEY*, UUID_lower_64_bits). - */ -#if BTRFS_UUID_SIZE != 16 -#error "UUID items require BTRFS_UUID_SIZE == 16!" -#endif -#define BTRFS_UUID_KEY_SUBVOL 251 /* for UUIDs assigned to subvols */ -#define BTRFS_UUID_KEY_RECEIVED_SUBVOL 252 /* for UUIDs assigned to - * received subvols */ - -/* - * string items are for debugging. They just store a short string of - * data in the FS - */ -#define BTRFS_STRING_ITEM_KEY 253 - /* * Flags for mount options. * @@ -3499,6 +2508,12 @@ int btrfs_should_throttle_delayed_refs(struct btrfs_trans_handle *trans, struct btrfs_root *root); int btrfs_check_space_for_delayed_refs(struct btrfs_trans_handle *trans, struct btrfs_root *root); +void btrfs_dec_block_group_reservations(struct btrfs_fs_info *fs_info, + const u64 start); +void btrfs_wait_block_group_reservations(struct btrfs_block_group_cache *bg); +bool btrfs_inc_nocow_writers(struct btrfs_fs_info *fs_info, u64 bytenr); +void btrfs_dec_nocow_writers(struct btrfs_fs_info *fs_info, u64 bytenr); +void btrfs_wait_nocow_writers(struct btrfs_block_group_cache *bg); void btrfs_put_block_group(struct btrfs_block_group_cache *cache); int btrfs_run_delayed_refs(struct btrfs_trans_handle *trans, struct btrfs_root *root, unsigned long count); @@ -4122,6 +3137,7 @@ void btrfs_test_inode_set_ops(struct inode *inode); /* ioctl.c */ long btrfs_ioctl(struct file *file, unsigned int cmd, unsigned long arg); +long btrfs_compat_ioctl(struct file *file, unsigned int cmd, unsigned long arg); int btrfs_ioctl_get_supported_features(void __user *arg); void btrfs_update_iflags(struct inode *inode); void btrfs_inherit_iflags(struct inode *inode, struct inode *dir); @@ -4326,10 +3342,9 @@ static inline void assfail(char *expr, char *file, int line) #define ASSERT(expr) ((void)0) #endif -#define btrfs_assert() __printf(5, 6) __cold -void __btrfs_std_error(struct btrfs_fs_info *fs_info, const char *function, +void __btrfs_handle_fs_error(struct btrfs_fs_info *fs_info, const char *function, unsigned int line, int errno, const char *fmt, ...); const char *btrfs_decode_error(int errno); @@ -4339,6 +3354,46 @@ void __btrfs_abort_transaction(struct btrfs_trans_handle *trans, struct btrfs_root *root, const char *function, unsigned int line, int errno); +/* + * Call btrfs_abort_transaction as early as possible when an error condition is + * detected, that way the exact line number is reported. + */ +#define btrfs_abort_transaction(trans, root, errno) \ +do { \ + /* Report first abort since mount */ \ + if (!test_and_set_bit(BTRFS_FS_STATE_TRANS_ABORTED, \ + &((root)->fs_info->fs_state))) { \ + WARN(1, KERN_DEBUG \ + "BTRFS: Transaction aborted (error %d)\n", \ + (errno)); \ + } \ + __btrfs_abort_transaction((trans), (root), __func__, \ + __LINE__, (errno)); \ +} while (0) + +#define btrfs_handle_fs_error(fs_info, errno, fmt, args...) \ +do { \ + __btrfs_handle_fs_error((fs_info), __func__, __LINE__, \ + (errno), fmt, ##args); \ +} while (0) + +__printf(5, 6) +__cold +void __btrfs_panic(struct btrfs_fs_info *fs_info, const char *function, + unsigned int line, int errno, const char *fmt, ...); +/* + * If BTRFS_MOUNT_PANIC_ON_FATAL_ERROR is in mount_opt, __btrfs_panic + * will panic(). Otherwise we BUG() here. + */ +#define btrfs_panic(fs_info, errno, fmt, args...) \ +do { \ + __btrfs_panic(fs_info, __func__, __LINE__, errno, fmt, ##args); \ + BUG(); \ +} while (0) + + +/* compatibility and incompatibility defines */ + #define btrfs_set_fs_incompat(__fs_info, opt) \ __btrfs_set_fs_incompat((__fs_info), BTRFS_FEATURE_INCOMPAT_##opt) @@ -4455,44 +3510,6 @@ static inline int __btrfs_fs_compat_ro(struct btrfs_fs_info *fs_info, u64 flag) return !!(btrfs_super_compat_ro_flags(disk_super) & flag); } -/* - * Call btrfs_abort_transaction as early as possible when an error condition is - * detected, that way the exact line number is reported. - */ -#define btrfs_abort_transaction(trans, root, errno) \ -do { \ - /* Report first abort since mount */ \ - if (!test_and_set_bit(BTRFS_FS_STATE_TRANS_ABORTED, \ - &((root)->fs_info->fs_state))) { \ - WARN(1, KERN_DEBUG \ - "BTRFS: Transaction aborted (error %d)\n", \ - (errno)); \ - } \ - __btrfs_abort_transaction((trans), (root), __func__, \ - __LINE__, (errno)); \ -} while (0) - -#define btrfs_std_error(fs_info, errno, fmt, args...) \ -do { \ - __btrfs_std_error((fs_info), __func__, __LINE__, \ - (errno), fmt, ##args); \ -} while (0) - -__printf(5, 6) -__cold -void __btrfs_panic(struct btrfs_fs_info *fs_info, const char *function, - unsigned int line, int errno, const char *fmt, ...); - -/* - * If BTRFS_MOUNT_PANIC_ON_FATAL_ERROR is in mount_opt, __btrfs_panic - * will panic(). Otherwise we BUG() here. - */ -#define btrfs_panic(fs_info, errno, fmt, args...) \ -do { \ - __btrfs_panic(fs_info, __func__, __LINE__, errno, fmt, ##args); \ - BUG(); \ -} while (0) - /* acl.c */ #ifdef CONFIG_BTRFS_FS_POSIX_ACL struct posix_acl *btrfs_get_acl(struct inode *inode, int type); diff --git a/fs/btrfs/delayed-inode.c b/fs/btrfs/delayed-inode.c index 6cef0062f929..61561c2a3f96 100644 --- a/fs/btrfs/delayed-inode.c +++ b/fs/btrfs/delayed-inode.c @@ -134,7 +134,7 @@ again: /* cached in the btrfs inode and can be accessed */ atomic_add(2, &node->refs); - ret = radix_tree_preload(GFP_NOFS & ~__GFP_HIGHMEM); + ret = radix_tree_preload(GFP_NOFS); if (ret) { kmem_cache_free(delayed_node_cache, node); return ERR_PTR(ret); diff --git a/fs/btrfs/dev-replace.c b/fs/btrfs/dev-replace.c index 26bcb487f958..85f12e6e28d2 100644 --- a/fs/btrfs/dev-replace.c +++ b/fs/btrfs/dev-replace.c @@ -44,9 +44,6 @@ static void btrfs_dev_replace_update_device_in_mapping_tree( struct btrfs_fs_info *fs_info, struct btrfs_device *srcdev, struct btrfs_device *tgtdev); -static int btrfs_dev_replace_find_srcdev(struct btrfs_root *root, u64 srcdevid, - char *srcdev_name, - struct btrfs_device **device); static u64 __btrfs_dev_replace_cancel(struct btrfs_fs_info *fs_info); static int btrfs_dev_replace_kthread(void *data); static int btrfs_dev_replace_continue_on_mount(struct btrfs_fs_info *fs_info); @@ -305,8 +302,8 @@ void btrfs_after_dev_replace_commit(struct btrfs_fs_info *fs_info) dev_replace->cursor_left_last_write_of_item; } -int btrfs_dev_replace_start(struct btrfs_root *root, - struct btrfs_ioctl_dev_replace_args *args) +int btrfs_dev_replace_start(struct btrfs_root *root, char *tgtdev_name, + u64 srcdevid, char *srcdev_name, int read_src) { struct btrfs_trans_handle *trans; struct btrfs_fs_info *fs_info = root->fs_info; @@ -315,29 +312,16 @@ int btrfs_dev_replace_start(struct btrfs_root *root, struct btrfs_device *tgt_device = NULL; struct btrfs_device *src_device = NULL; - switch (args->start.cont_reading_from_srcdev_mode) { - case BTRFS_IOCTL_DEV_REPLACE_CONT_READING_FROM_SRCDEV_MODE_ALWAYS: - case BTRFS_IOCTL_DEV_REPLACE_CONT_READING_FROM_SRCDEV_MODE_AVOID: - break; - default: - return -EINVAL; - } - - if ((args->start.srcdevid == 0 && args->start.srcdev_name[0] == '\0') || - args->start.tgtdev_name[0] == '\0') - return -EINVAL; - /* the disk copy procedure reuses the scrub code */ mutex_lock(&fs_info->volume_mutex); - ret = btrfs_dev_replace_find_srcdev(root, args->start.srcdevid, - args->start.srcdev_name, - &src_device); + ret = btrfs_find_device_by_devspec(root, srcdevid, + srcdev_name, &src_device); if (ret) { mutex_unlock(&fs_info->volume_mutex); return ret; } - ret = btrfs_init_dev_replace_tgtdev(root, args->start.tgtdev_name, + ret = btrfs_init_dev_replace_tgtdev(root, tgtdev_name, src_device, &tgt_device); mutex_unlock(&fs_info->volume_mutex); if (ret) @@ -364,18 +348,17 @@ int btrfs_dev_replace_start(struct btrfs_root *root, break; case BTRFS_IOCTL_DEV_REPLACE_STATE_STARTED: case BTRFS_IOCTL_DEV_REPLACE_STATE_SUSPENDED: - args->result = BTRFS_IOCTL_DEV_REPLACE_RESULT_ALREADY_STARTED; + ret = BTRFS_IOCTL_DEV_REPLACE_RESULT_ALREADY_STARTED; goto leave; } - dev_replace->cont_reading_from_srcdev_mode = - args->start.cont_reading_from_srcdev_mode; + dev_replace->cont_reading_from_srcdev_mode = read_src; WARN_ON(!src_device); dev_replace->srcdev = src_device; WARN_ON(!tgt_device); dev_replace->tgtdev = tgt_device; - btrfs_info_in_rcu(root->fs_info, + btrfs_info_in_rcu(fs_info, "dev_replace from %s (devid %llu) to %s started", src_device->missing ? "<missing disk>" : rcu_str_deref(src_device->name), @@ -396,14 +379,13 @@ int btrfs_dev_replace_start(struct btrfs_root *root, dev_replace->item_needs_writeback = 1; atomic64_set(&dev_replace->num_write_errors, 0); atomic64_set(&dev_replace->num_uncorrectable_read_errors, 0); - args->result = BTRFS_IOCTL_DEV_REPLACE_RESULT_NO_ERROR; btrfs_dev_replace_unlock(dev_replace, 1); ret = btrfs_sysfs_add_device_link(tgt_device->fs_devices, tgt_device); if (ret) - btrfs_err(root->fs_info, "kobj add dev failed %d\n", ret); + btrfs_err(fs_info, "kobj add dev failed %d\n", ret); - btrfs_wait_ordered_roots(root->fs_info, -1); + btrfs_wait_ordered_roots(root->fs_info, -1, 0, (u64)-1); /* force writing the updated state information to disk */ trans = btrfs_start_transaction(root, 0); @@ -421,11 +403,9 @@ int btrfs_dev_replace_start(struct btrfs_root *root, btrfs_device_get_total_bytes(src_device), &dev_replace->scrub_progress, 0, 1); - ret = btrfs_dev_replace_finishing(root->fs_info, ret); - /* don't warn if EINPROGRESS, someone else might be running scrub */ + ret = btrfs_dev_replace_finishing(fs_info, ret); if (ret == -EINPROGRESS) { - args->result = BTRFS_IOCTL_DEV_REPLACE_RESULT_SCRUB_INPROGRESS; - ret = 0; + ret = BTRFS_IOCTL_DEV_REPLACE_RESULT_SCRUB_INPROGRESS; } else { WARN_ON(ret); } @@ -440,6 +420,35 @@ leave: return ret; } +int btrfs_dev_replace_by_ioctl(struct btrfs_root *root, + struct btrfs_ioctl_dev_replace_args *args) +{ + int ret; + + switch (args->start.cont_reading_from_srcdev_mode) { + case BTRFS_IOCTL_DEV_REPLACE_CONT_READING_FROM_SRCDEV_MODE_ALWAYS: + case BTRFS_IOCTL_DEV_REPLACE_CONT_READING_FROM_SRCDEV_MODE_AVOID: + break; + default: + return -EINVAL; + } + + if ((args->start.srcdevid == 0 && args->start.srcdev_name[0] == '\0') || + args->start.tgtdev_name[0] == '\0') + return -EINVAL; + + ret = btrfs_dev_replace_start(root, args->start.tgtdev_name, + args->start.srcdevid, + args->start.srcdev_name, + args->start.cont_reading_from_srcdev_mode); + args->result = ret; + /* don't warn if EINPROGRESS, someone else might be running scrub */ + if (ret == BTRFS_IOCTL_DEV_REPLACE_RESULT_SCRUB_INPROGRESS) + ret = 0; + + return ret; +} + /* * blocked until all flighting bios are finished. */ @@ -495,7 +504,7 @@ static int btrfs_dev_replace_finishing(struct btrfs_fs_info *fs_info, mutex_unlock(&dev_replace->lock_finishing_cancel_unmount); return ret; } - btrfs_wait_ordered_roots(root->fs_info, -1); + btrfs_wait_ordered_roots(root->fs_info, -1, 0, (u64)-1); trans = btrfs_start_transaction(root, 0); if (IS_ERR(trans)) { @@ -560,10 +569,9 @@ static int btrfs_dev_replace_finishing(struct btrfs_fs_info *fs_info, ASSERT(list_empty(&src_device->resized_list)); tgt_device->commit_total_bytes = src_device->commit_total_bytes; tgt_device->commit_bytes_used = src_device->bytes_used; - if (fs_info->sb->s_bdev == src_device->bdev) - fs_info->sb->s_bdev = tgt_device->bdev; - if (fs_info->fs_devices->latest_bdev == src_device->bdev) - fs_info->fs_devices->latest_bdev = tgt_device->bdev; + + btrfs_assign_next_active_device(fs_info, src_device, tgt_device); + list_add(&tgt_device->dev_alloc_list, &fs_info->fs_devices->alloc_list); fs_info->fs_devices->rw_devices++; @@ -626,25 +634,6 @@ static void btrfs_dev_replace_update_device_in_mapping_tree( write_unlock(&em_tree->lock); } -static int btrfs_dev_replace_find_srcdev(struct btrfs_root *root, u64 srcdevid, - char *srcdev_name, - struct btrfs_device **device) -{ - int ret; - - if (srcdevid) { - ret = 0; - *device = btrfs_find_device(root->fs_info, srcdevid, NULL, - NULL); - if (!*device) - ret = -ENOENT; - } else { - ret = btrfs_find_device_missing_or_by_path(root, srcdev_name, - device); - } - return ret; -} - void btrfs_dev_replace_status(struct btrfs_fs_info *fs_info, struct btrfs_ioctl_dev_replace_args *args) { diff --git a/fs/btrfs/dev-replace.h b/fs/btrfs/dev-replace.h index 29e3ef5f96bd..e922b42d91df 100644 --- a/fs/btrfs/dev-replace.h +++ b/fs/btrfs/dev-replace.h @@ -25,8 +25,10 @@ int btrfs_init_dev_replace(struct btrfs_fs_info *fs_info); int btrfs_run_dev_replace(struct btrfs_trans_handle *trans, struct btrfs_fs_info *fs_info); void btrfs_after_dev_replace_commit(struct btrfs_fs_info *fs_info); -int btrfs_dev_replace_start(struct btrfs_root *root, +int btrfs_dev_replace_by_ioctl(struct btrfs_root *root, struct btrfs_ioctl_dev_replace_args *args); +int btrfs_dev_replace_start(struct btrfs_root *root, char *tgtdev_name, + u64 srcdevid, char *srcdev_name, int read_src); void btrfs_dev_replace_status(struct btrfs_fs_info *fs_info, struct btrfs_ioctl_dev_replace_args *args); int btrfs_dev_replace_cancel(struct btrfs_fs_info *fs_info, diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c index 4e47849d7427..91d123938cef 100644 --- a/fs/btrfs/disk-io.c +++ b/fs/btrfs/disk-io.c @@ -1640,7 +1640,7 @@ int btrfs_insert_fs_root(struct btrfs_fs_info *fs_info, { int ret; - ret = radix_tree_preload(GFP_NOFS & ~__GFP_HIGHMEM); + ret = radix_tree_preload(GFP_NOFS); if (ret) return ret; @@ -2417,7 +2417,7 @@ static int btrfs_replay_log(struct btrfs_fs_info *fs_info, /* returns with log_tree_root freed on success */ ret = btrfs_recover_log_trees(log_tree_root); if (ret) { - btrfs_std_error(tree_root->fs_info, ret, + btrfs_handle_fs_error(tree_root->fs_info, ret, "Failed to recover log tree"); free_extent_buffer(log_tree_root->node); kfree(log_tree_root); @@ -2517,6 +2517,7 @@ int open_ctree(struct super_block *sb, int num_backups_tried = 0; int backup_index = 0; int max_active; + bool cleaner_mutex_locked = false; tree_root = fs_info->tree_root = btrfs_alloc_root(fs_info, GFP_KERNEL); chunk_root = fs_info->chunk_root = btrfs_alloc_root(fs_info, GFP_KERNEL); @@ -2713,7 +2714,7 @@ int open_ctree(struct super_block *sb, * Pass the whole disk block of size BTRFS_SUPER_INFO_SIZE (4k). */ if (btrfs_check_super_csum(bh->b_data)) { - printk(KERN_ERR "BTRFS: superblock checksum mismatch\n"); + btrfs_err(fs_info, "superblock checksum mismatch"); err = -EINVAL; brelse(bh); goto fail_alloc; @@ -2733,7 +2734,7 @@ int open_ctree(struct super_block *sb, ret = btrfs_check_super_valid(fs_info, sb->s_flags & MS_RDONLY); if (ret) { - printk(KERN_ERR "BTRFS: superblock contains fatal errors\n"); + btrfs_err(fs_info, "superblock contains fatal errors"); err = -EINVAL; goto fail_alloc; } @@ -2768,9 +2769,9 @@ int open_ctree(struct super_block *sb, features = btrfs_super_incompat_flags(disk_super) & ~BTRFS_FEATURE_INCOMPAT_SUPP; if (features) { - printk(KERN_ERR "BTRFS: couldn't mount because of " - "unsupported optional features (%Lx).\n", - features); + btrfs_err(fs_info, + "cannot mount because of unsupported optional features (%llx)", + features); err = -EINVAL; goto fail_alloc; } @@ -2781,7 +2782,7 @@ int open_ctree(struct super_block *sb, features |= BTRFS_FEATURE_INCOMPAT_COMPRESS_LZO; if (features & BTRFS_FEATURE_INCOMPAT_SKINNY_METADATA) - printk(KERN_INFO "BTRFS: has skinny extents\n"); + btrfs_info(fs_info, "has skinny extents"); /* * flag our filesystem as having big metadata blocks if @@ -2789,7 +2790,8 @@ int open_ctree(struct super_block *sb, */ if (btrfs_super_nodesize(disk_super) > PAGE_SIZE) { if (!(features & BTRFS_FEATURE_INCOMPAT_BIG_METADATA)) - printk(KERN_INFO "BTRFS: flagging fs with big metadata feature\n"); + btrfs_info(fs_info, + "flagging fs with big metadata feature"); features |= BTRFS_FEATURE_INCOMPAT_BIG_METADATA; } @@ -2805,9 +2807,9 @@ int open_ctree(struct super_block *sb, */ if ((features & BTRFS_FEATURE_INCOMPAT_MIXED_GROUPS) && (sectorsize != nodesize)) { - printk(KERN_ERR "BTRFS: unequal leaf/node/sector sizes " - "are not allowed for mixed block groups on %s\n", - sb->s_id); + btrfs_err(fs_info, +"unequal nodesize/sectorsize (%u != %u) are not allowed for mixed block groups", + nodesize, sectorsize); goto fail_alloc; } @@ -2820,8 +2822,8 @@ int open_ctree(struct super_block *sb, features = btrfs_super_compat_ro_flags(disk_super) & ~BTRFS_FEATURE_COMPAT_RO_SUPP; if (!(sb->s_flags & MS_RDONLY) && features) { - printk(KERN_ERR "BTRFS: couldn't mount RDWR because of " - "unsupported option features (%Lx).\n", + btrfs_err(fs_info, + "cannot mount read-write because of unsupported optional features (%llx)", features); err = -EINVAL; goto fail_alloc; @@ -2850,8 +2852,7 @@ int open_ctree(struct super_block *sb, ret = btrfs_read_sys_array(tree_root); mutex_unlock(&fs_info->chunk_mutex); if (ret) { - printk(KERN_ERR "BTRFS: failed to read the system " - "array on %s\n", sb->s_id); + btrfs_err(fs_info, "failed to read the system array: %d", ret); goto fail_sb_buffer; } @@ -2865,8 +2866,7 @@ int open_ctree(struct super_block *sb, generation); if (IS_ERR(chunk_root->node) || !extent_buffer_uptodate(chunk_root->node)) { - printk(KERN_ERR "BTRFS: failed to read chunk root on %s\n", - sb->s_id); + btrfs_err(fs_info, "failed to read chunk root"); if (!IS_ERR(chunk_root->node)) free_extent_buffer(chunk_root->node); chunk_root->node = NULL; @@ -2880,8 +2880,7 @@ int open_ctree(struct super_block *sb, ret = btrfs_read_chunk_tree(chunk_root); if (ret) { - printk(KERN_ERR "BTRFS: failed to read chunk tree on %s\n", - sb->s_id); + btrfs_err(fs_info, "failed to read chunk tree: %d", ret); goto fail_tree_roots; } @@ -2892,8 +2891,7 @@ int open_ctree(struct super_block *sb, btrfs_close_extra_devices(fs_devices, 0); if (!fs_devices->latest_bdev) { - printk(KERN_ERR "BTRFS: failed to read devices on %s\n", - sb->s_id); + btrfs_err(fs_info, "failed to read devices"); goto fail_tree_roots; } @@ -2905,8 +2903,7 @@ retry_root_backup: generation); if (IS_ERR(tree_root->node) || !extent_buffer_uptodate(tree_root->node)) { - printk(KERN_WARNING "BTRFS: failed to read tree root on %s\n", - sb->s_id); + btrfs_warn(fs_info, "failed to read tree root"); if (!IS_ERR(tree_root->node)) free_extent_buffer(tree_root->node); tree_root->node = NULL; @@ -2938,20 +2935,19 @@ retry_root_backup: ret = btrfs_recover_balance(fs_info); if (ret) { - printk(KERN_ERR "BTRFS: failed to recover balance\n"); + btrfs_err(fs_info, "failed to recover balance: %d", ret); goto fail_block_groups; } ret = btrfs_init_dev_stats(fs_info); if (ret) { - printk(KERN_ERR "BTRFS: failed to init dev_stats: %d\n", - ret); + btrfs_err(fs_info, "failed to init dev_stats: %d", ret); goto fail_block_groups; } ret = btrfs_init_dev_replace(fs_info); if (ret) { - pr_err("BTRFS: failed to init dev_replace: %d\n", ret); + btrfs_err(fs_info, "failed to init dev_replace: %d", ret); goto fail_block_groups; } @@ -2959,31 +2955,33 @@ retry_root_backup: ret = btrfs_sysfs_add_fsid(fs_devices, NULL); if (ret) { - pr_err("BTRFS: failed to init sysfs fsid interface: %d\n", ret); + btrfs_err(fs_info, "failed to init sysfs fsid interface: %d", + ret); goto fail_block_groups; } ret = btrfs_sysfs_add_device(fs_devices); if (ret) { - pr_err("BTRFS: failed to init sysfs device interface: %d\n", ret); + btrfs_err(fs_info, "failed to init sysfs device interface: %d", + ret); goto fail_fsdev_sysfs; } ret = btrfs_sysfs_add_mounted(fs_info); if (ret) { - pr_err("BTRFS: failed to init sysfs interface: %d\n", ret); + btrfs_err(fs_info, "failed to init sysfs interface: %d", ret); goto fail_fsdev_sysfs; } ret = btrfs_init_space_info(fs_info); if (ret) { - printk(KERN_ERR "BTRFS: Failed to initial space info: %d\n", ret); + btrfs_err(fs_info, "failed to initialize space info: %d", ret); goto fail_sysfs; } ret = btrfs_read_block_groups(fs_info->extent_root); if (ret) { - printk(KERN_ERR "BTRFS: Failed to read block groups: %d\n", ret); + btrfs_err(fs_info, "failed to read block groups: %d", ret); goto fail_sysfs; } fs_info->num_tolerated_disk_barrier_failures = @@ -2991,12 +2989,20 @@ retry_root_backup: if (fs_info->fs_devices->missing_devices > fs_info->num_tolerated_disk_barrier_failures && !(sb->s_flags & MS_RDONLY)) { - pr_warn("BTRFS: missing devices(%llu) exceeds the limit(%d), writeable mount is not allowed\n", + btrfs_warn(fs_info, +"missing devices (%llu) exceeds the limit (%d), writeable mount is not allowed", fs_info->fs_devices->missing_devices, fs_info->num_tolerated_disk_barrier_failures); goto fail_sysfs; } + /* + * Hold the cleaner_mutex thread here so that we don't block + * for a long time on btrfs_recover_relocation. cleaner_kthread + * will wait for us to finish mounting the filesystem. + */ + mutex_lock(&fs_info->cleaner_mutex); + cleaner_mutex_locked = true; fs_info->cleaner_kthread = kthread_run(cleaner_kthread, tree_root, "btrfs-cleaner"); if (IS_ERR(fs_info->cleaner_kthread)) @@ -3011,8 +3017,7 @@ retry_root_backup: if (!btrfs_test_opt(tree_root, SSD) && !btrfs_test_opt(tree_root, NOSSD) && !fs_info->fs_devices->rotating) { - printk(KERN_INFO "BTRFS: detected SSD devices, enabling SSD " - "mode\n"); + btrfs_info(fs_info, "detected SSD devices, enabling SSD mode"); btrfs_set_opt(fs_info->mount_opt, SSD); } @@ -3030,8 +3035,9 @@ retry_root_backup: 1 : 0, fs_info->check_integrity_print_mask); if (ret) - printk(KERN_WARNING "BTRFS: failed to initialize" - " integrity check module %s\n", sb->s_id); + btrfs_warn(fs_info, + "failed to initialize integrity check module: %d", + ret); } #endif ret = btrfs_read_qgroup_config(fs_info); @@ -3056,17 +3062,17 @@ retry_root_backup: ret = btrfs_cleanup_fs_roots(fs_info); if (ret) goto fail_qgroup; - - mutex_lock(&fs_info->cleaner_mutex); + /* We locked cleaner_mutex before creating cleaner_kthread. */ ret = btrfs_recover_relocation(tree_root); - mutex_unlock(&fs_info->cleaner_mutex); if (ret < 0) { - printk(KERN_WARNING - "BTRFS: failed to recover relocation\n"); + btrfs_warn(fs_info, "failed to recover relocation: %d", + ret); err = -EINVAL; goto fail_qgroup; } } + mutex_unlock(&fs_info->cleaner_mutex); + cleaner_mutex_locked = false; location.objectid = BTRFS_FS_TREE_OBJECTID; location.type = BTRFS_ROOT_ITEM_KEY; @@ -3083,11 +3089,11 @@ retry_root_backup: if (btrfs_test_opt(tree_root, FREE_SPACE_TREE) && !btrfs_fs_compat_ro(fs_info, FREE_SPACE_TREE)) { - pr_info("BTRFS: creating free space tree\n"); + btrfs_info(fs_info, "creating free space tree"); ret = btrfs_create_free_space_tree(fs_info); if (ret) { - pr_warn("BTRFS: failed to create free space tree %d\n", - ret); + btrfs_warn(fs_info, + "failed to create free space tree: %d", ret); close_ctree(tree_root); return ret; } @@ -3104,14 +3110,14 @@ retry_root_backup: ret = btrfs_resume_balance_async(fs_info); if (ret) { - printk(KERN_WARNING "BTRFS: failed to resume balance\n"); + btrfs_warn(fs_info, "failed to resume balance: %d", ret); close_ctree(tree_root); return ret; } ret = btrfs_resume_dev_replace_async(fs_info); if (ret) { - pr_warn("BTRFS: failed to resume dev_replace\n"); + btrfs_warn(fs_info, "failed to resume device replace: %d", ret); close_ctree(tree_root); return ret; } @@ -3120,33 +3126,33 @@ retry_root_backup: if (btrfs_test_opt(tree_root, CLEAR_CACHE) && btrfs_fs_compat_ro(fs_info, FREE_SPACE_TREE)) { - pr_info("BTRFS: clearing free space tree\n"); + btrfs_info(fs_info, "clearing free space tree"); ret = btrfs_clear_free_space_tree(fs_info); if (ret) { - pr_warn("BTRFS: failed to clear free space tree %d\n", - ret); + btrfs_warn(fs_info, + "failed to clear free space tree: %d", ret); close_ctree(tree_root); return ret; } } if (!fs_info->uuid_root) { - pr_info("BTRFS: creating UUID tree\n"); + btrfs_info(fs_info, "creating UUID tree"); ret = btrfs_create_uuid_tree(fs_info); if (ret) { - pr_warn("BTRFS: failed to create the UUID tree %d\n", - ret); + btrfs_warn(fs_info, + "failed to create the UUID tree: %d", ret); close_ctree(tree_root); return ret; } } else if (btrfs_test_opt(tree_root, RESCAN_UUID_TREE) || fs_info->generation != btrfs_super_uuid_tree_generation(disk_super)) { - pr_info("BTRFS: checking UUID tree\n"); + btrfs_info(fs_info, "checking UUID tree"); ret = btrfs_check_uuid_tree(fs_info); if (ret) { - pr_warn("BTRFS: failed to check the UUID tree %d\n", - ret); + btrfs_warn(fs_info, + "failed to check the UUID tree: %d", ret); close_ctree(tree_root); return ret; } @@ -3180,6 +3186,10 @@ fail_cleaner: filemap_write_and_wait(fs_info->btree_inode->i_mapping); fail_sysfs: + if (cleaner_mutex_locked) { + mutex_unlock(&fs_info->cleaner_mutex); + cleaner_mutex_locked = false; + } btrfs_sysfs_remove_mounted(fs_info); fail_fsdev_sysfs: @@ -3646,7 +3656,7 @@ static int write_all_supers(struct btrfs_root *root, int max_mirrors) if (ret) { mutex_unlock( &root->fs_info->fs_devices->device_list_mutex); - btrfs_std_error(root->fs_info, ret, + btrfs_handle_fs_error(root->fs_info, ret, "errors while submitting device barriers."); return ret; } @@ -3686,7 +3696,7 @@ static int write_all_supers(struct btrfs_root *root, int max_mirrors) mutex_unlock(&root->fs_info->fs_devices->device_list_mutex); /* FUA is masked off if unsupported and can't be the reason */ - btrfs_std_error(root->fs_info, -EIO, + btrfs_handle_fs_error(root->fs_info, -EIO, "%d errors while writing supers", total_errors); return -EIO; } @@ -3704,7 +3714,7 @@ static int write_all_supers(struct btrfs_root *root, int max_mirrors) } mutex_unlock(&root->fs_info->fs_devices->device_list_mutex); if (total_errors > max_errors) { - btrfs_std_error(root->fs_info, -EIO, + btrfs_handle_fs_error(root->fs_info, -EIO, "%d errors while writing supers", total_errors); return -EIO; } diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c index 84e060eb0de8..9424864fd01a 100644 --- a/fs/btrfs/extent-tree.c +++ b/fs/btrfs/extent-tree.c @@ -3824,6 +3824,59 @@ int btrfs_extent_readonly(struct btrfs_root *root, u64 bytenr) return readonly; } +bool btrfs_inc_nocow_writers(struct btrfs_fs_info *fs_info, u64 bytenr) +{ + struct btrfs_block_group_cache *bg; + bool ret = true; + + bg = btrfs_lookup_block_group(fs_info, bytenr); + if (!bg) + return false; + + spin_lock(&bg->lock); + if (bg->ro) + ret = false; + else + atomic_inc(&bg->nocow_writers); + spin_unlock(&bg->lock); + + /* no put on block group, done by btrfs_dec_nocow_writers */ + if (!ret) + btrfs_put_block_group(bg); + + return ret; + +} + +void btrfs_dec_nocow_writers(struct btrfs_fs_info *fs_info, u64 bytenr) +{ + struct btrfs_block_group_cache *bg; + + bg = btrfs_lookup_block_group(fs_info, bytenr); + ASSERT(bg); + if (atomic_dec_and_test(&bg->nocow_writers)) + wake_up_atomic_t(&bg->nocow_writers); + /* + * Once for our lookup and once for the lookup done by a previous call + * to btrfs_inc_nocow_writers() + */ + btrfs_put_block_group(bg); + btrfs_put_block_group(bg); +} + +static int btrfs_wait_nocow_writers_atomic_t(atomic_t *a) +{ + schedule(); + return 0; +} + +void btrfs_wait_nocow_writers(struct btrfs_block_group_cache *bg) +{ + wait_on_atomic_t(&bg->nocow_writers, + btrfs_wait_nocow_writers_atomic_t, + TASK_UNINTERRUPTIBLE); +} + static const char *alloc_name(u64 flags) { switch (flags) { @@ -4141,7 +4194,7 @@ commit_trans: if (need_commit > 0) { btrfs_start_delalloc_roots(fs_info, 0, -1); - btrfs_wait_ordered_roots(fs_info, -1); + btrfs_wait_ordered_roots(fs_info, -1, 0, (u64)-1); } trans = btrfs_join_transaction(root); @@ -4583,7 +4636,8 @@ static void btrfs_writeback_inodes_sb_nr(struct btrfs_root *root, */ btrfs_start_delalloc_roots(root->fs_info, 0, nr_items); if (!current->journal_info) - btrfs_wait_ordered_roots(root->fs_info, nr_items); + btrfs_wait_ordered_roots(root->fs_info, nr_items, + 0, (u64)-1); } } @@ -4620,7 +4674,7 @@ static void shrink_delalloc(struct btrfs_root *root, u64 to_reclaim, u64 orig, /* Calc the number of the pages we need flush for space reservation */ items = calc_reclaim_items_nr(root, to_reclaim); - to_reclaim = items * EXTENT_SIZE_PER_ITEM; + to_reclaim = (u64)items * EXTENT_SIZE_PER_ITEM; trans = (struct btrfs_trans_handle *)current->journal_info; block_rsv = &root->fs_info->delalloc_block_rsv; @@ -4632,7 +4686,8 @@ static void shrink_delalloc(struct btrfs_root *root, u64 to_reclaim, u64 orig, if (trans) return; if (wait_ordered) - btrfs_wait_ordered_roots(root->fs_info, items); + btrfs_wait_ordered_roots(root->fs_info, items, + 0, (u64)-1); return; } @@ -4671,7 +4726,8 @@ skip_async: loops++; if (wait_ordered && !trans) { - btrfs_wait_ordered_roots(root->fs_info, items); + btrfs_wait_ordered_roots(root->fs_info, items, + 0, (u64)-1); } else { time_left = schedule_timeout_killable(1); if (time_left) @@ -6172,6 +6228,57 @@ int btrfs_exclude_logged_extents(struct btrfs_root *log, return 0; } +static void +btrfs_inc_block_group_reservations(struct btrfs_block_group_cache *bg) +{ + atomic_inc(&bg->reservations); +} + +void btrfs_dec_block_group_reservations(struct btrfs_fs_info *fs_info, + const u64 start) +{ + struct btrfs_block_group_cache *bg; + + bg = btrfs_lookup_block_group(fs_info, start); + ASSERT(bg); + if (atomic_dec_and_test(&bg->reservations)) + wake_up_atomic_t(&bg->reservations); + btrfs_put_block_group(bg); +} + +static int btrfs_wait_bg_reservations_atomic_t(atomic_t *a) +{ + schedule(); + return 0; +} + +void btrfs_wait_block_group_reservations(struct btrfs_block_group_cache *bg) +{ + struct btrfs_space_info *space_info = bg->space_info; + + ASSERT(bg->ro); + + if (!(bg->flags & BTRFS_BLOCK_GROUP_DATA)) + return; + + /* + * Our block group is read only but before we set it to read only, + * some task might have had allocated an extent from it already, but it + * has not yet created a respective ordered extent (and added it to a + * root's list of ordered extents). + * Therefore wait for any task currently allocating extents, since the + * block group's reservations counter is incremented while a read lock + * on the groups' semaphore is held and decremented after releasing + * the read access on that semaphore and creating the ordered extent. + */ + down_write(&space_info->groups_sem); + up_write(&space_info->groups_sem); + + wait_on_atomic_t(&bg->reservations, + btrfs_wait_bg_reservations_atomic_t, + TASK_UNINTERRUPTIBLE); +} + /** * btrfs_update_reserved_bytes - update the block_group and space info counters * @cache: The cache we are manipulating @@ -7025,36 +7132,35 @@ btrfs_lock_cluster(struct btrfs_block_group_cache *block_group, int delalloc) { struct btrfs_block_group_cache *used_bg = NULL; - bool locked = false; -again: + spin_lock(&cluster->refill_lock); - if (locked) { - if (used_bg == cluster->block_group) + while (1) { + used_bg = cluster->block_group; + if (!used_bg) + return NULL; + + if (used_bg == block_group) return used_bg; - up_read(&used_bg->data_rwsem); - btrfs_put_block_group(used_bg); - } + btrfs_get_block_group(used_bg); - used_bg = cluster->block_group; - if (!used_bg) - return NULL; + if (!delalloc) + return used_bg; - if (used_bg == block_group) - return used_bg; + if (down_read_trylock(&used_bg->data_rwsem)) + return used_bg; - btrfs_get_block_group(used_bg); + spin_unlock(&cluster->refill_lock); - if (!delalloc) - return used_bg; + down_read(&used_bg->data_rwsem); - if (down_read_trylock(&used_bg->data_rwsem)) - return used_bg; + spin_lock(&cluster->refill_lock); + if (used_bg == cluster->block_group) + return used_bg; - spin_unlock(&cluster->refill_lock); - down_read(&used_bg->data_rwsem); - locked = true; - goto again; + up_read(&used_bg->data_rwsem); + btrfs_put_block_group(used_bg); + } } static inline void @@ -7431,6 +7537,7 @@ checks: btrfs_add_free_space(block_group, offset, num_bytes); goto loop; } + btrfs_inc_block_group_reservations(block_group); /* we are all good, lets return */ ins->objectid = search_start; @@ -7612,8 +7719,10 @@ again: WARN_ON(num_bytes < root->sectorsize); ret = find_free_extent(root, num_bytes, empty_size, hint_byte, ins, flags, delalloc); - - if (ret == -ENOSPC) { + if (!ret && !is_data) { + btrfs_dec_block_group_reservations(root->fs_info, + ins->objectid); + } else if (ret == -ENOSPC) { if (!final_tried && ins->offset) { num_bytes = min(num_bytes >> 1, ins->offset); num_bytes = round_down(num_bytes, root->sectorsize); @@ -9058,7 +9167,7 @@ out: if (!for_reloc && root_dropped == false) btrfs_add_dead_root(root); if (err && err != -EAGAIN) - btrfs_std_error(root->fs_info, err, NULL); + btrfs_handle_fs_error(root->fs_info, err, NULL); return err; } diff --git a/fs/btrfs/extent_io.c b/fs/btrfs/extent_io.c index d247fc0eea19..2f83448d34fe 100644 --- a/fs/btrfs/extent_io.c +++ b/fs/btrfs/extent_io.c @@ -3200,14 +3200,10 @@ int extent_read_full_page(struct extent_io_tree *tree, struct page *page, return ret; } -static noinline void update_nr_written(struct page *page, - struct writeback_control *wbc, - unsigned long nr_written) +static void update_nr_written(struct page *page, struct writeback_control *wbc, + unsigned long nr_written) { wbc->nr_to_write -= nr_written; - if (wbc->range_cyclic || (wbc->nr_to_write > 0 && - wbc->range_start == 0 && wbc->range_end == LLONG_MAX)) - page->mapping->writeback_index = page->index + nr_written; } /* @@ -3368,6 +3364,8 @@ static noinline_for_stack int __extent_writepage_io(struct inode *inode, while (cur <= end) { u64 em_end; + unsigned long max_nr; + if (cur >= i_size) { if (tree->ops && tree->ops->writepage_end_io_hook) tree->ops->writepage_end_io_hook(page, cur, @@ -3423,32 +3421,23 @@ static noinline_for_stack int __extent_writepage_io(struct inode *inode, continue; } - if (tree->ops && tree->ops->writepage_io_hook) { - ret = tree->ops->writepage_io_hook(page, cur, - cur + iosize - 1); - } else { - ret = 0; + max_nr = (i_size >> PAGE_SHIFT) + 1; + + set_range_writeback(tree, cur, cur + iosize - 1); + if (!PageWriteback(page)) { + btrfs_err(BTRFS_I(inode)->root->fs_info, + "page %lu not writeback, cur %llu end %llu", + page->index, cur, end); } - if (ret) { - SetPageError(page); - } else { - unsigned long max_nr = (i_size >> PAGE_SHIFT) + 1; - set_range_writeback(tree, cur, cur + iosize - 1); - if (!PageWriteback(page)) { - btrfs_err(BTRFS_I(inode)->root->fs_info, - "page %lu not writeback, cur %llu end %llu", - page->index, cur, end); - } + ret = submit_extent_page(write_flags, tree, wbc, page, + sector, iosize, pg_offset, + bdev, &epd->bio, max_nr, + end_bio_extent_writepage, + 0, 0, 0, false); + if (ret) + SetPageError(page); - ret = submit_extent_page(write_flags, tree, wbc, page, - sector, iosize, pg_offset, - bdev, &epd->bio, max_nr, - end_bio_extent_writepage, - 0, 0, 0, false); - if (ret) - SetPageError(page); - } cur = cur + iosize; pg_offset += iosize; nr++; @@ -3920,12 +3909,13 @@ static int extent_write_cache_pages(struct extent_io_tree *tree, struct inode *inode = mapping->host; int ret = 0; int done = 0; - int err = 0; int nr_to_write_done = 0; struct pagevec pvec; int nr_pages; pgoff_t index; pgoff_t end; /* Inclusive */ + pgoff_t done_index; + int range_whole = 0; int scanned = 0; int tag; @@ -3948,6 +3938,8 @@ static int extent_write_cache_pages(struct extent_io_tree *tree, } else { index = wbc->range_start >> PAGE_SHIFT; end = wbc->range_end >> PAGE_SHIFT; + if (wbc->range_start == 0 && wbc->range_end == LLONG_MAX) + range_whole = 1; scanned = 1; } if (wbc->sync_mode == WB_SYNC_ALL) @@ -3957,6 +3949,7 @@ static int extent_write_cache_pages(struct extent_io_tree *tree, retry: if (wbc->sync_mode == WB_SYNC_ALL) tag_pages_for_writeback(mapping, index, end); + done_index = index; while (!done && !nr_to_write_done && (index <= end) && (nr_pages = pagevec_lookup_tag(&pvec, mapping, &index, tag, min(end - index, (pgoff_t)PAGEVEC_SIZE-1) + 1))) { @@ -3966,6 +3959,7 @@ retry: for (i = 0; i < nr_pages; i++) { struct page *page = pvec.pages[i]; + done_index = page->index; /* * At this point we hold neither mapping->tree_lock nor * lock on the page itself: the page may be truncated or @@ -4007,8 +4001,20 @@ retry: unlock_page(page); ret = 0; } - if (!err && ret < 0) - err = ret; + if (ret < 0) { + /* + * done_index is set past this page, + * so media errors will not choke + * background writeout for the entire + * file. This has consequences for + * range_cyclic semantics (ie. it may + * not be suitable for data integrity + * writeout). + */ + done_index = page->index + 1; + done = 1; + break; + } /* * the filesystem may choose to bump up nr_to_write. @@ -4020,7 +4026,7 @@ retry: pagevec_release(&pvec); cond_resched(); } - if (!scanned && !done && !err) { + if (!scanned && !done) { /* * We hit the last page and there is more work to be done: wrap * back to the start of the file @@ -4029,8 +4035,12 @@ retry: index = 0; goto retry; } + + if (wbc->range_cyclic || (wbc->nr_to_write > 0 && range_whole)) + mapping->writeback_index = done_index; + btrfs_add_delayed_iput(inode); - return err; + return ret; } static void flush_epd_write_bio(struct extent_page_data *epd) @@ -4822,7 +4832,7 @@ struct extent_buffer *alloc_test_extent_buffer(struct btrfs_fs_info *fs_info, return NULL; eb->fs_info = fs_info; again: - ret = radix_tree_preload(GFP_NOFS & ~__GFP_HIGHMEM); + ret = radix_tree_preload(GFP_NOFS); if (ret) goto free_eb; spin_lock(&fs_info->buffer_lock); @@ -4923,7 +4933,7 @@ struct extent_buffer *alloc_extent_buffer(struct btrfs_fs_info *fs_info, if (uptodate) set_bit(EXTENT_BUFFER_UPTODATE, &eb->bflags); again: - ret = radix_tree_preload(GFP_NOFS & ~__GFP_HIGHMEM); + ret = radix_tree_preload(GFP_NOFS); if (ret) goto free_eb; diff --git a/fs/btrfs/extent_io.h b/fs/btrfs/extent_io.h index b5e0ade90e88..981f402bf754 100644 --- a/fs/btrfs/extent_io.h +++ b/fs/btrfs/extent_io.h @@ -71,7 +71,6 @@ struct extent_io_ops { u64 start, u64 end, int *page_started, unsigned long *nr_written); int (*writepage_start_hook)(struct page *page, u64 start, u64 end); - int (*writepage_io_hook)(struct page *page, u64 start, u64 end); extent_submit_bio_hook_t *submit_bio_hook; int (*merge_bio_hook)(int rw, struct page *page, unsigned long offset, size_t size, struct bio *bio, diff --git a/fs/btrfs/file.c b/fs/btrfs/file.c index 8d7b5a45c005..c98805c35bab 100644 --- a/fs/btrfs/file.c +++ b/fs/btrfs/file.c @@ -1696,25 +1696,26 @@ again: btrfs_end_write_no_snapshoting(root); btrfs_delalloc_release_metadata(inode, release_bytes); } else { - btrfs_delalloc_release_space(inode, pos, release_bytes); + btrfs_delalloc_release_space(inode, + round_down(pos, root->sectorsize), + release_bytes); } } return num_written ? num_written : ret; } -static ssize_t __btrfs_direct_write(struct kiocb *iocb, - struct iov_iter *from, - loff_t pos) +static ssize_t __btrfs_direct_write(struct kiocb *iocb, struct iov_iter *from) { struct file *file = iocb->ki_filp; struct inode *inode = file_inode(file); + loff_t pos = iocb->ki_pos; ssize_t written; ssize_t written_buffered; loff_t endbyte; int err; - written = generic_file_direct_write(iocb, from, pos); + written = generic_file_direct_write(iocb, from); if (written < 0 || !iov_iter_count(from)) return written; @@ -1832,7 +1833,7 @@ static ssize_t btrfs_file_write_iter(struct kiocb *iocb, atomic_inc(&BTRFS_I(inode)->sync_writers); if (iocb->ki_flags & IOCB_DIRECT) { - num_written = __btrfs_direct_write(iocb, from, pos); + num_written = __btrfs_direct_write(iocb, from); } else { num_written = __btrfs_buffered_write(file, from, pos); if (num_written > 0) @@ -1852,11 +1853,8 @@ static ssize_t btrfs_file_write_iter(struct kiocb *iocb, spin_lock(&BTRFS_I(inode)->lock); BTRFS_I(inode)->last_sub_trans = root->log_transid; spin_unlock(&BTRFS_I(inode)->lock); - if (num_written > 0) { - err = generic_write_sync(file, pos, num_written); - if (err < 0) - num_written = err; - } + if (num_written > 0) + num_written = generic_write_sync(iocb, num_written); if (sync) atomic_dec(&BTRFS_I(inode)->sync_writers); @@ -2956,7 +2954,7 @@ const struct file_operations btrfs_file_operations = { .fallocate = btrfs_fallocate, .unlocked_ioctl = btrfs_ioctl, #ifdef CONFIG_COMPAT - .compat_ioctl = btrfs_ioctl, + .compat_ioctl = btrfs_compat_ioctl, #endif .copy_file_range = btrfs_copy_file_range, .clone_file_range = btrfs_clone_file_range, diff --git a/fs/btrfs/inode-item.c b/fs/btrfs/inode-item.c index be4d22a5022f..b8acc07ac6c2 100644 --- a/fs/btrfs/inode-item.c +++ b/fs/btrfs/inode-item.c @@ -157,7 +157,7 @@ static int btrfs_del_inode_extref(struct btrfs_trans_handle *trans, */ if (!btrfs_find_name_in_ext_backref(path, ref_objectid, name, name_len, &extref)) { - btrfs_std_error(root->fs_info, -ENOENT, NULL); + btrfs_handle_fs_error(root->fs_info, -ENOENT, NULL); ret = -EROFS; goto out; } diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c index 2aaba58b4856..91419ef79b00 100644 --- a/fs/btrfs/inode.c +++ b/fs/btrfs/inode.c @@ -824,6 +824,7 @@ retry: async_extent->ram_size - 1, 0); goto out_free_reserve; } + btrfs_dec_block_group_reservations(root->fs_info, ins.objectid); /* * clear dirty, set writeback and unlock the pages. @@ -861,6 +862,7 @@ retry: } return; out_free_reserve: + btrfs_dec_block_group_reservations(root->fs_info, ins.objectid); btrfs_free_reserved_extent(root, ins.objectid, ins.offset, 1); out_free: extent_clear_unlock_delalloc(inode, async_extent->start, @@ -1038,6 +1040,8 @@ static noinline int cow_file_range(struct inode *inode, goto out_drop_extent_cache; } + btrfs_dec_block_group_reservations(root->fs_info, ins.objectid); + if (disk_num_bytes < cur_alloc_size) break; @@ -1066,6 +1070,7 @@ out: out_drop_extent_cache: btrfs_drop_extent_cache(inode, start, start + ram_size - 1, 0); out_reserve: + btrfs_dec_block_group_reservations(root->fs_info, ins.objectid); btrfs_free_reserved_extent(root, ins.objectid, ins.offset, 1); out_unlock: extent_clear_unlock_delalloc(inode, start, end, locked_page, @@ -1377,6 +1382,9 @@ next_slot: */ if (csum_exist_in_range(root, disk_bytenr, num_bytes)) goto out_check; + if (!btrfs_inc_nocow_writers(root->fs_info, + disk_bytenr)) + goto out_check; nocow = 1; } else if (extent_type == BTRFS_FILE_EXTENT_INLINE) { extent_end = found_key.offset + @@ -1391,6 +1399,9 @@ out_check: path->slots[0]++; if (!nolock && nocow) btrfs_end_write_no_snapshoting(root); + if (nocow) + btrfs_dec_nocow_writers(root->fs_info, + disk_bytenr); goto next_slot; } if (!nocow) { @@ -1411,6 +1422,9 @@ out_check: if (ret) { if (!nolock && nocow) btrfs_end_write_no_snapshoting(root); + if (nocow) + btrfs_dec_nocow_writers(root->fs_info, + disk_bytenr); goto error; } cow_start = (u64)-1; @@ -1453,6 +1467,8 @@ out_check: ret = btrfs_add_ordered_extent(inode, cur_offset, disk_bytenr, num_bytes, num_bytes, type); + if (nocow) + btrfs_dec_nocow_writers(root->fs_info, disk_bytenr); BUG_ON(ret); /* -ENOMEM */ if (root->root_key.objectid == @@ -7129,6 +7145,43 @@ out: return em; } +static struct extent_map *btrfs_create_dio_extent(struct inode *inode, + const u64 start, + const u64 len, + const u64 orig_start, + const u64 block_start, + const u64 block_len, + const u64 orig_block_len, + const u64 ram_bytes, + const int type) +{ + struct extent_map *em = NULL; + int ret; + + down_read(&BTRFS_I(inode)->dio_sem); + if (type != BTRFS_ORDERED_NOCOW) { + em = create_pinned_em(inode, start, len, orig_start, + block_start, block_len, orig_block_len, + ram_bytes, type); + if (IS_ERR(em)) + goto out; + } + ret = btrfs_add_ordered_extent_dio(inode, start, block_start, + len, block_len, type); + if (ret) { + if (em) { + free_extent_map(em); + btrfs_drop_extent_cache(inode, start, + start + len - 1, 0); + } + em = ERR_PTR(ret); + } + out: + up_read(&BTRFS_I(inode)->dio_sem); + + return em; +} + static struct extent_map *btrfs_new_extent_direct(struct inode *inode, u64 start, u64 len) { @@ -7144,41 +7197,13 @@ static struct extent_map *btrfs_new_extent_direct(struct inode *inode, if (ret) return ERR_PTR(ret); - /* - * Create the ordered extent before the extent map. This is to avoid - * races with the fast fsync path that would lead to it logging file - * extent items that point to disk extents that were not yet written to. - * The fast fsync path collects ordered extents into a local list and - * then collects all the new extent maps, so we must create the ordered - * extent first and make sure the fast fsync path collects any new - * ordered extents after collecting new extent maps as well. - * The fsync path simply can not rely on inode_dio_wait() because it - * causes deadlock with AIO. - */ - ret = btrfs_add_ordered_extent_dio(inode, start, ins.objectid, - ins.offset, ins.offset, 0); - if (ret) { + em = btrfs_create_dio_extent(inode, start, ins.offset, start, + ins.objectid, ins.offset, ins.offset, + ins.offset, 0); + btrfs_dec_block_group_reservations(root->fs_info, ins.objectid); + if (IS_ERR(em)) btrfs_free_reserved_extent(root, ins.objectid, ins.offset, 1); - return ERR_PTR(ret); - } - - em = create_pinned_em(inode, start, ins.offset, start, ins.objectid, - ins.offset, ins.offset, ins.offset, 0); - if (IS_ERR(em)) { - struct btrfs_ordered_extent *oe; - btrfs_free_reserved_extent(root, ins.objectid, ins.offset, 1); - oe = btrfs_lookup_ordered_extent(inode, start); - ASSERT(oe); - if (WARN_ON(!oe)) - return em; - set_bit(BTRFS_ORDERED_IOERR, &oe->flags); - set_bit(BTRFS_ORDERED_IO_DONE, &oe->flags); - btrfs_remove_ordered_extent(inode, oe); - /* Once for our lookup and once for the ordered extents tree. */ - btrfs_put_ordered_extent(oe); - btrfs_put_ordered_extent(oe); - } return em; } @@ -7650,24 +7675,21 @@ static int btrfs_get_blocks_direct(struct inode *inode, sector_t iblock, block_start = em->block_start + (start - em->start); if (can_nocow_extent(inode, start, &len, &orig_start, - &orig_block_len, &ram_bytes) == 1) { + &orig_block_len, &ram_bytes) == 1 && + btrfs_inc_nocow_writers(root->fs_info, block_start)) { + struct extent_map *em2; + + em2 = btrfs_create_dio_extent(inode, start, len, + orig_start, block_start, + len, orig_block_len, + ram_bytes, type); + btrfs_dec_nocow_writers(root->fs_info, block_start); if (type == BTRFS_ORDERED_PREALLOC) { free_extent_map(em); - em = create_pinned_em(inode, start, len, - orig_start, - block_start, len, - orig_block_len, - ram_bytes, type); - if (IS_ERR(em)) { - ret = PTR_ERR(em); - goto unlock_err; - } + em = em2; } - - ret = btrfs_add_ordered_extent_dio(inode, start, - block_start, len, len, type); - if (ret) { - free_extent_map(em); + if (em2 && IS_ERR(em2)) { + ret = PTR_ERR(em2); goto unlock_err; } goto unlock; @@ -8541,13 +8563,13 @@ out: return retval; } -static ssize_t btrfs_direct_IO(struct kiocb *iocb, struct iov_iter *iter, - loff_t offset) +static ssize_t btrfs_direct_IO(struct kiocb *iocb, struct iov_iter *iter) { struct file *file = iocb->ki_filp; struct inode *inode = file->f_mapping->host; struct btrfs_root *root = BTRFS_I(inode)->root; struct btrfs_dio_data dio_data = { 0 }; + loff_t offset = iocb->ki_pos; size_t count = 0; int flags = 0; bool wakeup = true; @@ -8607,7 +8629,7 @@ static ssize_t btrfs_direct_IO(struct kiocb *iocb, struct iov_iter *iter, ret = __blockdev_direct_IO(iocb, inode, BTRFS_I(inode)->root->fs_info->fs_devices->latest_bdev, - iter, offset, btrfs_get_blocks_direct, NULL, + iter, btrfs_get_blocks_direct, NULL, btrfs_submit_direct, flags); if (iov_iter_rw(iter) == WRITE) { current->journal_info = NULL; @@ -9230,6 +9252,7 @@ struct inode *btrfs_alloc_inode(struct super_block *sb) INIT_LIST_HEAD(&ei->delalloc_inodes); INIT_LIST_HEAD(&ei->delayed_iput); RB_CLEAR_NODE(&ei->rb_node); + init_rwsem(&ei->dio_sem); return inode; } @@ -9387,10 +9410,281 @@ static int btrfs_getattr(struct vfsmount *mnt, return 0; } +static int btrfs_rename_exchange(struct inode *old_dir, + struct dentry *old_dentry, + struct inode *new_dir, + struct dentry *new_dentry) +{ + struct btrfs_trans_handle *trans; + struct btrfs_root *root = BTRFS_I(old_dir)->root; + struct btrfs_root *dest = BTRFS_I(new_dir)->root; + struct inode *new_inode = new_dentry->d_inode; + struct inode *old_inode = old_dentry->d_inode; + struct timespec ctime = CURRENT_TIME; + struct dentry *parent; + u64 old_ino = btrfs_ino(old_inode); + u64 new_ino = btrfs_ino(new_inode); + u64 old_idx = 0; + u64 new_idx = 0; + u64 root_objectid; + int ret; + bool root_log_pinned = false; + bool dest_log_pinned = false; + + /* we only allow rename subvolume link between subvolumes */ + if (old_ino != BTRFS_FIRST_FREE_OBJECTID && root != dest) + return -EXDEV; + + /* close the race window with snapshot create/destroy ioctl */ + if (old_ino == BTRFS_FIRST_FREE_OBJECTID) + down_read(&root->fs_info->subvol_sem); + if (new_ino == BTRFS_FIRST_FREE_OBJECTID) + down_read(&dest->fs_info->subvol_sem); + + /* + * We want to reserve the absolute worst case amount of items. So if + * both inodes are subvols and we need to unlink them then that would + * require 4 item modifications, but if they are both normal inodes it + * would require 5 item modifications, so we'll assume their normal + * inodes. So 5 * 2 is 10, plus 2 for the new links, so 12 total items + * should cover the worst case number of items we'll modify. + */ + trans = btrfs_start_transaction(root, 12); + if (IS_ERR(trans)) { + ret = PTR_ERR(trans); + goto out_notrans; + } + + /* + * We need to find a free sequence number both in the source and + * in the destination directory for the exchange. + */ + ret = btrfs_set_inode_index(new_dir, &old_idx); + if (ret) + goto out_fail; + ret = btrfs_set_inode_index(old_dir, &new_idx); + if (ret) + goto out_fail; + + BTRFS_I(old_inode)->dir_index = 0ULL; + BTRFS_I(new_inode)->dir_index = 0ULL; + + /* Reference for the source. */ + if (old_ino == BTRFS_FIRST_FREE_OBJECTID) { + /* force full log commit if subvolume involved. */ + btrfs_set_log_full_commit(root->fs_info, trans); + } else { + btrfs_pin_log_trans(root); + root_log_pinned = true; + ret = btrfs_insert_inode_ref(trans, dest, + new_dentry->d_name.name, + new_dentry->d_name.len, + old_ino, + btrfs_ino(new_dir), old_idx); + if (ret) + goto out_fail; + } + + /* And now for the dest. */ + if (new_ino == BTRFS_FIRST_FREE_OBJECTID) { + /* force full log commit if subvolume involved. */ + btrfs_set_log_full_commit(dest->fs_info, trans); + } else { + btrfs_pin_log_trans(dest); + dest_log_pinned = true; + ret = btrfs_insert_inode_ref(trans, root, + old_dentry->d_name.name, + old_dentry->d_name.len, + new_ino, + btrfs_ino(old_dir), new_idx); + if (ret) + goto out_fail; + } + + /* Update inode version and ctime/mtime. */ + inode_inc_iversion(old_dir); + inode_inc_iversion(new_dir); + inode_inc_iversion(old_inode); + inode_inc_iversion(new_inode); + old_dir->i_ctime = old_dir->i_mtime = ctime; + new_dir->i_ctime = new_dir->i_mtime = ctime; + old_inode->i_ctime = ctime; + new_inode->i_ctime = ctime; + + if (old_dentry->d_parent != new_dentry->d_parent) { + btrfs_record_unlink_dir(trans, old_dir, old_inode, 1); + btrfs_record_unlink_dir(trans, new_dir, new_inode, 1); + } + + /* src is a subvolume */ + if (old_ino == BTRFS_FIRST_FREE_OBJECTID) { + root_objectid = BTRFS_I(old_inode)->root->root_key.objectid; + ret = btrfs_unlink_subvol(trans, root, old_dir, + root_objectid, + old_dentry->d_name.name, + old_dentry->d_name.len); + } else { /* src is an inode */ + ret = __btrfs_unlink_inode(trans, root, old_dir, + old_dentry->d_inode, + old_dentry->d_name.name, + old_dentry->d_name.len); + if (!ret) + ret = btrfs_update_inode(trans, root, old_inode); + } + if (ret) { + btrfs_abort_transaction(trans, root, ret); + goto out_fail; + } + + /* dest is a subvolume */ + if (new_ino == BTRFS_FIRST_FREE_OBJECTID) { + root_objectid = BTRFS_I(new_inode)->root->root_key.objectid; + ret = btrfs_unlink_subvol(trans, dest, new_dir, + root_objectid, + new_dentry->d_name.name, + new_dentry->d_name.len); + } else { /* dest is an inode */ + ret = __btrfs_unlink_inode(trans, dest, new_dir, + new_dentry->d_inode, + new_dentry->d_name.name, + new_dentry->d_name.len); + if (!ret) + ret = btrfs_update_inode(trans, dest, new_inode); + } + if (ret) { + btrfs_abort_transaction(trans, root, ret); + goto out_fail; + } + + ret = btrfs_add_link(trans, new_dir, old_inode, + new_dentry->d_name.name, + new_dentry->d_name.len, 0, old_idx); + if (ret) { + btrfs_abort_transaction(trans, root, ret); + goto out_fail; + } + + ret = btrfs_add_link(trans, old_dir, new_inode, + old_dentry->d_name.name, + old_dentry->d_name.len, 0, new_idx); + if (ret) { + btrfs_abort_transaction(trans, root, ret); + goto out_fail; + } + + if (old_inode->i_nlink == 1) + BTRFS_I(old_inode)->dir_index = old_idx; + if (new_inode->i_nlink == 1) + BTRFS_I(new_inode)->dir_index = new_idx; + + if (root_log_pinned) { + parent = new_dentry->d_parent; + btrfs_log_new_name(trans, old_inode, old_dir, parent); + btrfs_end_log_trans(root); + root_log_pinned = false; + } + if (dest_log_pinned) { + parent = old_dentry->d_parent; + btrfs_log_new_name(trans, new_inode, new_dir, parent); + btrfs_end_log_trans(dest); + dest_log_pinned = false; + } +out_fail: + /* + * If we have pinned a log and an error happened, we unpin tasks + * trying to sync the log and force them to fallback to a transaction + * commit if the log currently contains any of the inodes involved in + * this rename operation (to ensure we do not persist a log with an + * inconsistent state for any of these inodes or leading to any + * inconsistencies when replayed). If the transaction was aborted, the + * abortion reason is propagated to userspace when attempting to commit + * the transaction. If the log does not contain any of these inodes, we + * allow the tasks to sync it. + */ + if (ret && (root_log_pinned || dest_log_pinned)) { + if (btrfs_inode_in_log(old_dir, root->fs_info->generation) || + btrfs_inode_in_log(new_dir, root->fs_info->generation) || + btrfs_inode_in_log(old_inode, root->fs_info->generation) || + (new_inode && + btrfs_inode_in_log(new_inode, root->fs_info->generation))) + btrfs_set_log_full_commit(root->fs_info, trans); + + if (root_log_pinned) { + btrfs_end_log_trans(root); + root_log_pinned = false; + } + if (dest_log_pinned) { + btrfs_end_log_trans(dest); + dest_log_pinned = false; + } + } + ret = btrfs_end_transaction(trans, root); +out_notrans: + if (new_ino == BTRFS_FIRST_FREE_OBJECTID) + up_read(&dest->fs_info->subvol_sem); + if (old_ino == BTRFS_FIRST_FREE_OBJECTID) + up_read(&root->fs_info->subvol_sem); + + return ret; +} + +static int btrfs_whiteout_for_rename(struct btrfs_trans_handle *trans, + struct btrfs_root *root, + struct inode *dir, + struct dentry *dentry) +{ + int ret; + struct inode *inode; + u64 objectid; + u64 index; + + ret = btrfs_find_free_ino(root, &objectid); + if (ret) + return ret; + + inode = btrfs_new_inode(trans, root, dir, + dentry->d_name.name, + dentry->d_name.len, + btrfs_ino(dir), + objectid, + S_IFCHR | WHITEOUT_MODE, + &index); + + if (IS_ERR(inode)) { + ret = PTR_ERR(inode); + return ret; + } + + inode->i_op = &btrfs_special_inode_operations; + init_special_inode(inode, inode->i_mode, + WHITEOUT_DEV); + + ret = btrfs_init_inode_security(trans, inode, dir, + &dentry->d_name); + if (ret) + goto out; + + ret = btrfs_add_nondir(trans, dir, dentry, + inode, 0, index); + if (ret) + goto out; + + ret = btrfs_update_inode(trans, root, inode); +out: + unlock_new_inode(inode); + if (ret) + inode_dec_link_count(inode); + iput(inode); + + return ret; +} + static int btrfs_rename(struct inode *old_dir, struct dentry *old_dentry, - struct inode *new_dir, struct dentry *new_dentry) + struct inode *new_dir, struct dentry *new_dentry, + unsigned int flags) { struct btrfs_trans_handle *trans; + unsigned int trans_num_items; struct btrfs_root *root = BTRFS_I(old_dir)->root; struct btrfs_root *dest = BTRFS_I(new_dir)->root; struct inode *new_inode = d_inode(new_dentry); @@ -9399,6 +9693,7 @@ static int btrfs_rename(struct inode *old_dir, struct dentry *old_dentry, u64 root_objectid; int ret; u64 old_ino = btrfs_ino(old_inode); + bool log_pinned = false; if (btrfs_ino(new_dir) == BTRFS_EMPTY_SUBVOL_DIR_OBJECTID) return -EPERM; @@ -9449,15 +9744,21 @@ static int btrfs_rename(struct inode *old_dir, struct dentry *old_dentry, * We want to reserve the absolute worst case amount of items. So if * both inodes are subvols and we need to unlink them then that would * require 4 item modifications, but if they are both normal inodes it - * would require 5 item modifications, so we'll assume their normal + * would require 5 item modifications, so we'll assume they are normal * inodes. So 5 * 2 is 10, plus 1 for the new link, so 11 total items * should cover the worst case number of items we'll modify. + * If our rename has the whiteout flag, we need more 5 units for the + * new inode (1 inode item, 1 inode ref, 2 dir items and 1 xattr item + * when selinux is enabled). */ - trans = btrfs_start_transaction(root, 11); + trans_num_items = 11; + if (flags & RENAME_WHITEOUT) + trans_num_items += 5; + trans = btrfs_start_transaction(root, trans_num_items); if (IS_ERR(trans)) { - ret = PTR_ERR(trans); - goto out_notrans; - } + ret = PTR_ERR(trans); + goto out_notrans; + } if (dest != root) btrfs_record_root_in_trans(trans, dest); @@ -9471,6 +9772,8 @@ static int btrfs_rename(struct inode *old_dir, struct dentry *old_dentry, /* force full log commit if subvolume involved. */ btrfs_set_log_full_commit(root->fs_info, trans); } else { + btrfs_pin_log_trans(root); + log_pinned = true; ret = btrfs_insert_inode_ref(trans, dest, new_dentry->d_name.name, new_dentry->d_name.len, @@ -9478,14 +9781,6 @@ static int btrfs_rename(struct inode *old_dir, struct dentry *old_dentry, btrfs_ino(new_dir), index); if (ret) goto out_fail; - /* - * this is an ugly little race, but the rename is required - * to make sure that if we crash, the inode is either at the - * old name or the new one. pinning the log transaction lets - * us make sure we don't allow a log commit to come in after - * we unlink the name but before we add the new name back in. - */ - btrfs_pin_log_trans(root); } inode_inc_iversion(old_dir); @@ -9552,12 +9847,46 @@ static int btrfs_rename(struct inode *old_dir, struct dentry *old_dentry, if (old_inode->i_nlink == 1) BTRFS_I(old_inode)->dir_index = index; - if (old_ino != BTRFS_FIRST_FREE_OBJECTID) { + if (log_pinned) { struct dentry *parent = new_dentry->d_parent; + btrfs_log_new_name(trans, old_inode, old_dir, parent); btrfs_end_log_trans(root); + log_pinned = false; + } + + if (flags & RENAME_WHITEOUT) { + ret = btrfs_whiteout_for_rename(trans, root, old_dir, + old_dentry); + + if (ret) { + btrfs_abort_transaction(trans, root, ret); + goto out_fail; + } } out_fail: + /* + * If we have pinned the log and an error happened, we unpin tasks + * trying to sync the log and force them to fallback to a transaction + * commit if the log currently contains any of the inodes involved in + * this rename operation (to ensure we do not persist a log with an + * inconsistent state for any of these inodes or leading to any + * inconsistencies when replayed). If the transaction was aborted, the + * abortion reason is propagated to userspace when attempting to commit + * the transaction. If the log does not contain any of these inodes, we + * allow the tasks to sync it. + */ + if (ret && log_pinned) { + if (btrfs_inode_in_log(old_dir, root->fs_info->generation) || + btrfs_inode_in_log(new_dir, root->fs_info->generation) || + btrfs_inode_in_log(old_inode, root->fs_info->generation) || + (new_inode && + btrfs_inode_in_log(new_inode, root->fs_info->generation))) + btrfs_set_log_full_commit(root->fs_info, trans); + + btrfs_end_log_trans(root); + log_pinned = false; + } btrfs_end_transaction(trans, root); out_notrans: if (old_ino == BTRFS_FIRST_FREE_OBJECTID) @@ -9570,10 +9899,14 @@ static int btrfs_rename2(struct inode *old_dir, struct dentry *old_dentry, struct inode *new_dir, struct dentry *new_dentry, unsigned int flags) { - if (flags & ~RENAME_NOREPLACE) + if (flags & ~(RENAME_NOREPLACE | RENAME_EXCHANGE | RENAME_WHITEOUT)) return -EINVAL; - return btrfs_rename(old_dir, old_dentry, new_dir, new_dentry); + if (flags & RENAME_EXCHANGE) + return btrfs_rename_exchange(old_dir, old_dentry, new_dir, + new_dentry); + + return btrfs_rename(old_dir, old_dentry, new_dir, new_dentry, flags); } static void btrfs_run_delalloc_work(struct btrfs_work *work) @@ -9942,6 +10275,7 @@ static int __btrfs_prealloc_file_range(struct inode *inode, int mode, btrfs_end_transaction(trans, root); break; } + btrfs_dec_block_group_reservations(root->fs_info, ins.objectid); last_alloc = ins.offset; ret = insert_reserved_file_extent(trans, inode, @@ -10160,10 +10494,10 @@ static const struct inode_operations btrfs_dir_inode_operations = { .symlink = btrfs_symlink, .setattr = btrfs_setattr, .mknod = btrfs_mknod, - .setxattr = btrfs_setxattr, + .setxattr = generic_setxattr, .getxattr = generic_getxattr, .listxattr = btrfs_listxattr, - .removexattr = btrfs_removexattr, + .removexattr = generic_removexattr, .permission = btrfs_permission, .get_acl = btrfs_get_acl, .set_acl = btrfs_set_acl, @@ -10184,7 +10518,7 @@ static const struct file_operations btrfs_dir_file_operations = { .iterate = btrfs_real_readdir, .unlocked_ioctl = btrfs_ioctl, #ifdef CONFIG_COMPAT - .compat_ioctl = btrfs_ioctl, + .compat_ioctl = btrfs_compat_ioctl, #endif .release = btrfs_release_file, .fsync = btrfs_sync_file, @@ -10237,10 +10571,10 @@ static const struct address_space_operations btrfs_symlink_aops = { static const struct inode_operations btrfs_file_inode_operations = { .getattr = btrfs_getattr, .setattr = btrfs_setattr, - .setxattr = btrfs_setxattr, + .setxattr = generic_setxattr, .getxattr = generic_getxattr, .listxattr = btrfs_listxattr, - .removexattr = btrfs_removexattr, + .removexattr = generic_removexattr, .permission = btrfs_permission, .fiemap = btrfs_fiemap, .get_acl = btrfs_get_acl, @@ -10251,10 +10585,10 @@ static const struct inode_operations btrfs_special_inode_operations = { .getattr = btrfs_getattr, .setattr = btrfs_setattr, .permission = btrfs_permission, - .setxattr = btrfs_setxattr, + .setxattr = generic_setxattr, .getxattr = generic_getxattr, .listxattr = btrfs_listxattr, - .removexattr = btrfs_removexattr, + .removexattr = generic_removexattr, .get_acl = btrfs_get_acl, .set_acl = btrfs_set_acl, .update_time = btrfs_update_time, @@ -10265,10 +10599,10 @@ static const struct inode_operations btrfs_symlink_inode_operations = { .getattr = btrfs_getattr, .setattr = btrfs_setattr, .permission = btrfs_permission, - .setxattr = btrfs_setxattr, + .setxattr = generic_setxattr, .getxattr = generic_getxattr, .listxattr = btrfs_listxattr, - .removexattr = btrfs_removexattr, + .removexattr = generic_removexattr, .update_time = btrfs_update_time, }; diff --git a/fs/btrfs/ioctl.c b/fs/btrfs/ioctl.c index 5a23806ae418..4e700694b741 100644 --- a/fs/btrfs/ioctl.c +++ b/fs/btrfs/ioctl.c @@ -125,10 +125,10 @@ static unsigned int btrfs_flags_to_ioctl(unsigned int flags) if (flags & BTRFS_INODE_NODATACOW) iflags |= FS_NOCOW_FL; - if ((flags & BTRFS_INODE_COMPRESS) && !(flags & BTRFS_INODE_NOCOMPRESS)) - iflags |= FS_COMPR_FL; - else if (flags & BTRFS_INODE_NOCOMPRESS) + if (flags & BTRFS_INODE_NOCOMPRESS) iflags |= FS_NOCOMP_FL; + else if (flags & BTRFS_INODE_COMPRESS) + iflags |= FS_COMPR_FL; return iflags; } @@ -439,7 +439,7 @@ static noinline int create_subvol(struct inode *dir, { struct btrfs_trans_handle *trans; struct btrfs_key key; - struct btrfs_root_item root_item; + struct btrfs_root_item *root_item; struct btrfs_inode_item *inode_item; struct extent_buffer *leaf; struct btrfs_root *root = BTRFS_I(dir)->root; @@ -455,16 +455,22 @@ static noinline int create_subvol(struct inode *dir, u64 qgroup_reserved; uuid_le new_uuid; + root_item = kzalloc(sizeof(*root_item), GFP_KERNEL); + if (!root_item) + return -ENOMEM; + ret = btrfs_find_free_objectid(root->fs_info->tree_root, &objectid); if (ret) - return ret; + goto fail_free; /* * Don't create subvolume whose level is not zero. Or qgroup will be * screwed up since it assume subvolme qgroup's level to be 0. */ - if (btrfs_qgroup_level(objectid)) - return -ENOSPC; + if (btrfs_qgroup_level(objectid)) { + ret = -ENOSPC; + goto fail_free; + } btrfs_init_block_rsv(&block_rsv, BTRFS_BLOCK_RSV_TEMP); /* @@ -474,14 +480,14 @@ static noinline int create_subvol(struct inode *dir, ret = btrfs_subvolume_reserve_metadata(root, &block_rsv, 8, &qgroup_reserved, false); if (ret) - return ret; + goto fail_free; trans = btrfs_start_transaction(root, 0); if (IS_ERR(trans)) { ret = PTR_ERR(trans); btrfs_subvolume_release_metadata(root, &block_rsv, qgroup_reserved); - return ret; + goto fail_free; } trans->block_rsv = &block_rsv; trans->bytes_reserved = block_rsv.size; @@ -509,47 +515,45 @@ static noinline int create_subvol(struct inode *dir, BTRFS_UUID_SIZE); btrfs_mark_buffer_dirty(leaf); - memset(&root_item, 0, sizeof(root_item)); - - inode_item = &root_item.inode; + inode_item = &root_item->inode; btrfs_set_stack_inode_generation(inode_item, 1); btrfs_set_stack_inode_size(inode_item, 3); btrfs_set_stack_inode_nlink(inode_item, 1); btrfs_set_stack_inode_nbytes(inode_item, root->nodesize); btrfs_set_stack_inode_mode(inode_item, S_IFDIR | 0755); - btrfs_set_root_flags(&root_item, 0); - btrfs_set_root_limit(&root_item, 0); + btrfs_set_root_flags(root_item, 0); + btrfs_set_root_limit(root_item, 0); btrfs_set_stack_inode_flags(inode_item, BTRFS_INODE_ROOT_ITEM_INIT); - btrfs_set_root_bytenr(&root_item, leaf->start); - btrfs_set_root_generation(&root_item, trans->transid); - btrfs_set_root_level(&root_item, 0); - btrfs_set_root_refs(&root_item, 1); - btrfs_set_root_used(&root_item, leaf->len); - btrfs_set_root_last_snapshot(&root_item, 0); + btrfs_set_root_bytenr(root_item, leaf->start); + btrfs_set_root_generation(root_item, trans->transid); + btrfs_set_root_level(root_item, 0); + btrfs_set_root_refs(root_item, 1); + btrfs_set_root_used(root_item, leaf->len); + btrfs_set_root_last_snapshot(root_item, 0); - btrfs_set_root_generation_v2(&root_item, - btrfs_root_generation(&root_item)); + btrfs_set_root_generation_v2(root_item, + btrfs_root_generation(root_item)); uuid_le_gen(&new_uuid); - memcpy(root_item.uuid, new_uuid.b, BTRFS_UUID_SIZE); - btrfs_set_stack_timespec_sec(&root_item.otime, cur_time.tv_sec); - btrfs_set_stack_timespec_nsec(&root_item.otime, cur_time.tv_nsec); - root_item.ctime = root_item.otime; - btrfs_set_root_ctransid(&root_item, trans->transid); - btrfs_set_root_otransid(&root_item, trans->transid); + memcpy(root_item->uuid, new_uuid.b, BTRFS_UUID_SIZE); + btrfs_set_stack_timespec_sec(&root_item->otime, cur_time.tv_sec); + btrfs_set_stack_timespec_nsec(&root_item->otime, cur_time.tv_nsec); + root_item->ctime = root_item->otime; + btrfs_set_root_ctransid(root_item, trans->transid); + btrfs_set_root_otransid(root_item, trans->transid); btrfs_tree_unlock(leaf); free_extent_buffer(leaf); leaf = NULL; - btrfs_set_root_dirid(&root_item, new_dirid); + btrfs_set_root_dirid(root_item, new_dirid); key.objectid = objectid; key.offset = 0; key.type = BTRFS_ROOT_ITEM_KEY; ret = btrfs_insert_root(trans, root->fs_info->tree_root, &key, - &root_item); + root_item); if (ret) goto fail; @@ -601,12 +605,13 @@ static noinline int create_subvol(struct inode *dir, BUG_ON(ret); ret = btrfs_uuid_tree_add(trans, root->fs_info->uuid_root, - root_item.uuid, BTRFS_UUID_KEY_SUBVOL, + root_item->uuid, BTRFS_UUID_KEY_SUBVOL, objectid); if (ret) btrfs_abort_transaction(trans, root, ret); fail: + kfree(root_item); trans->block_rsv = NULL; trans->bytes_reserved = 0; btrfs_subvolume_release_metadata(root, &block_rsv, qgroup_reserved); @@ -629,6 +634,10 @@ fail: d_instantiate(dentry, inode); } return ret; + +fail_free: + kfree(root_item); + return ret; } static void btrfs_wait_for_no_snapshoting_writes(struct btrfs_root *root) @@ -681,7 +690,7 @@ static int create_snapshot(struct btrfs_root *root, struct inode *dir, if (ret) goto dec_and_free; - btrfs_wait_ordered_extents(root, -1); + btrfs_wait_ordered_extents(root, -1, 0, (u64)-1); btrfs_init_block_rsv(&pending_snapshot->block_rsv, BTRFS_BLOCK_RSV_TEMP); @@ -837,9 +846,11 @@ static noinline int btrfs_mksubvol(struct path *parent, struct dentry *dentry; int error; - error = mutex_lock_killable_nested(&dir->i_mutex, I_MUTEX_PARENT); - if (error == -EINTR) - return error; + inode_lock_nested(dir, I_MUTEX_PARENT); + // XXX: should've been + // mutex_lock_killable_nested(&dir->i_mutex, I_MUTEX_PARENT); + // if (error == -EINTR) + // return error; dentry = lookup_one_len(name, parent->dentry, namelen); error = PTR_ERR(dentry); @@ -2366,9 +2377,11 @@ static noinline int btrfs_ioctl_snap_destroy(struct file *file, goto out; - err = mutex_lock_killable_nested(&dir->i_mutex, I_MUTEX_PARENT); - if (err == -EINTR) - goto out_drop_write; + inode_lock_nested(dir, I_MUTEX_PARENT); + // XXX: should've been + // err = mutex_lock_killable_nested(&dir->i_mutex, I_MUTEX_PARENT); + // if (err == -EINTR) + // goto out_drop_write; dentry = lookup_one_len(vol_args->name, parent, namelen); if (IS_ERR(dentry)) { err = PTR_ERR(dentry); @@ -2558,7 +2571,7 @@ out_dput: dput(dentry); out_unlock_dir: inode_unlock(dir); -out_drop_write: +//out_drop_write: mnt_drop_write_file(file); out: kfree(vol_args); @@ -2667,10 +2680,10 @@ out: return ret; } -static long btrfs_ioctl_rm_dev(struct file *file, void __user *arg) +static long btrfs_ioctl_rm_dev_v2(struct file *file, void __user *arg) { struct btrfs_root *root = BTRFS_I(file_inode(file))->root; - struct btrfs_ioctl_vol_args *vol_args; + struct btrfs_ioctl_vol_args_v2 *vol_args; int ret; if (!capable(CAP_SYS_ADMIN)) @@ -2686,7 +2699,9 @@ static long btrfs_ioctl_rm_dev(struct file *file, void __user *arg) goto err_drop; } - vol_args->name[BTRFS_PATH_NAME_MAX] = '\0'; + /* Check for compatibility reject unknown flags */ + if (vol_args->flags & ~BTRFS_VOL_ARG_V2_FLAGS_SUPPORTED) + return -EOPNOTSUPP; if (atomic_xchg(&root->fs_info->mutually_exclusive_operation_running, 1)) { @@ -2695,13 +2710,23 @@ static long btrfs_ioctl_rm_dev(struct file *file, void __user *arg) } mutex_lock(&root->fs_info->volume_mutex); - ret = btrfs_rm_device(root, vol_args->name); + if (vol_args->flags & BTRFS_DEVICE_SPEC_BY_ID) { + ret = btrfs_rm_device(root, NULL, vol_args->devid); + } else { + vol_args->name[BTRFS_SUBVOL_NAME_MAX] = '\0'; + ret = btrfs_rm_device(root, vol_args->name, 0); + } mutex_unlock(&root->fs_info->volume_mutex); atomic_set(&root->fs_info->mutually_exclusive_operation_running, 0); - if (!ret) - btrfs_info(root->fs_info, "disk deleted %s",vol_args->name); - + if (!ret) { + if (vol_args->flags & BTRFS_DEVICE_SPEC_BY_ID) + btrfs_info(root->fs_info, "device deleted: id %llu", + vol_args->devid); + else + btrfs_info(root->fs_info, "device deleted: %s", + vol_args->name); + } out: kfree(vol_args); err_drop: @@ -2709,6 +2734,47 @@ err_drop: return ret; } +static long btrfs_ioctl_rm_dev(struct file *file, void __user *arg) +{ + struct btrfs_root *root = BTRFS_I(file_inode(file))->root; + struct btrfs_ioctl_vol_args *vol_args; + int ret; + + if (!capable(CAP_SYS_ADMIN)) + return -EPERM; + + ret = mnt_want_write_file(file); + if (ret) + return ret; + + if (atomic_xchg(&root->fs_info->mutually_exclusive_operation_running, + 1)) { + ret = BTRFS_ERROR_DEV_EXCL_RUN_IN_PROGRESS; + goto out_drop_write; + } + + vol_args = memdup_user(arg, sizeof(*vol_args)); + if (IS_ERR(vol_args)) { + ret = PTR_ERR(vol_args); + goto out; + } + + vol_args->name[BTRFS_PATH_NAME_MAX] = '\0'; + mutex_lock(&root->fs_info->volume_mutex); + ret = btrfs_rm_device(root, vol_args->name, 0); + mutex_unlock(&root->fs_info->volume_mutex); + + if (!ret) + btrfs_info(root->fs_info, "disk deleted %s",vol_args->name); + kfree(vol_args); +out: + atomic_set(&root->fs_info->mutually_exclusive_operation_running, 0); +out_drop_write: + mnt_drop_write_file(file); + + return ret; +} + static long btrfs_ioctl_fs_info(struct btrfs_root *root, void __user *arg) { struct btrfs_ioctl_fs_info_args *fi_args; @@ -3468,13 +3534,16 @@ static int btrfs_clone(struct inode *src, struct inode *inode, u64 last_dest_end = destoff; ret = -ENOMEM; - buf = vmalloc(root->nodesize); - if (!buf) - return ret; + buf = kmalloc(root->nodesize, GFP_KERNEL | __GFP_NOWARN); + if (!buf) { + buf = vmalloc(root->nodesize); + if (!buf) + return ret; + } path = btrfs_alloc_path(); if (!path) { - vfree(buf); + kvfree(buf); return ret; } @@ -3775,7 +3844,7 @@ process_slot: out: btrfs_free_path(path); - vfree(buf); + kvfree(buf); return ret; } @@ -4376,7 +4445,7 @@ static long btrfs_ioctl_dev_replace(struct btrfs_root *root, void __user *arg) 1)) { ret = BTRFS_ERROR_DEV_EXCL_RUN_IN_PROGRESS; } else { - ret = btrfs_dev_replace_start(root, p); + ret = btrfs_dev_replace_by_ioctl(root, p); atomic_set( &root->fs_info->mutually_exclusive_operation_running, 0); @@ -4847,8 +4916,8 @@ static long btrfs_ioctl_qgroup_assign(struct file *file, void __user *arg) /* update qgroup status and info */ err = btrfs_run_qgroups(trans, root->fs_info); if (err < 0) - btrfs_std_error(root->fs_info, ret, - "failed to update qgroup status and info\n"); + btrfs_handle_fs_error(root->fs_info, err, + "failed to update qgroup status and info"); err = btrfs_end_transaction(trans, root); if (err && !ret) ret = err; @@ -5394,9 +5463,15 @@ static int btrfs_ioctl_set_features(struct file *file, void __user *arg) if (ret) return ret; + ret = mnt_want_write_file(file); + if (ret) + return ret; + trans = btrfs_start_transaction(root, 0); - if (IS_ERR(trans)) - return PTR_ERR(trans); + if (IS_ERR(trans)) { + ret = PTR_ERR(trans); + goto out_drop_write; + } spin_lock(&root->fs_info->super_lock); newflags = btrfs_super_compat_flags(super_block); @@ -5415,7 +5490,11 @@ static int btrfs_ioctl_set_features(struct file *file, void __user *arg) btrfs_set_super_incompat_flags(super_block, newflags); spin_unlock(&root->fs_info->super_lock); - return btrfs_commit_transaction(trans, root); + ret = btrfs_commit_transaction(trans, root); +out_drop_write: + mnt_drop_write_file(file); + + return ret; } long btrfs_ioctl(struct file *file, unsigned int @@ -5459,6 +5538,8 @@ long btrfs_ioctl(struct file *file, unsigned int return btrfs_ioctl_add_dev(root, argp); case BTRFS_IOC_RM_DEV: return btrfs_ioctl_rm_dev(file, argp); + case BTRFS_IOC_RM_DEV_V2: + return btrfs_ioctl_rm_dev_v2(file, argp); case BTRFS_IOC_FS_INFO: return btrfs_ioctl_fs_info(root, argp); case BTRFS_IOC_DEV_INFO: @@ -5552,3 +5633,24 @@ long btrfs_ioctl(struct file *file, unsigned int return -ENOTTY; } + +#ifdef CONFIG_COMPAT +long btrfs_compat_ioctl(struct file *file, unsigned int cmd, unsigned long arg) +{ + switch (cmd) { + case FS_IOC32_GETFLAGS: + cmd = FS_IOC_GETFLAGS; + break; + case FS_IOC32_SETFLAGS: + cmd = FS_IOC_SETFLAGS; + break; + case FS_IOC32_GETVERSION: + cmd = FS_IOC_GETVERSION; + break; + default: + return -ENOIOCTLCMD; + } + + return btrfs_ioctl(file, cmd, (unsigned long) compat_ptr(arg)); +} +#endif diff --git a/fs/btrfs/ordered-data.c b/fs/btrfs/ordered-data.c index 0de7da5a610d..559170464d7c 100644 --- a/fs/btrfs/ordered-data.c +++ b/fs/btrfs/ordered-data.c @@ -661,14 +661,15 @@ static void btrfs_run_ordered_extent_work(struct btrfs_work *work) * wait for all the ordered extents in a root. This is done when balancing * space between drives. */ -int btrfs_wait_ordered_extents(struct btrfs_root *root, int nr) +int btrfs_wait_ordered_extents(struct btrfs_root *root, int nr, + const u64 range_start, const u64 range_len) { - struct list_head splice, works; + LIST_HEAD(splice); + LIST_HEAD(skipped); + LIST_HEAD(works); struct btrfs_ordered_extent *ordered, *next; int count = 0; - - INIT_LIST_HEAD(&splice); - INIT_LIST_HEAD(&works); + const u64 range_end = range_start + range_len; mutex_lock(&root->ordered_extent_mutex); spin_lock(&root->ordered_extent_lock); @@ -676,6 +677,14 @@ int btrfs_wait_ordered_extents(struct btrfs_root *root, int nr) while (!list_empty(&splice) && nr) { ordered = list_first_entry(&splice, struct btrfs_ordered_extent, root_extent_list); + + if (range_end <= ordered->start || + ordered->start + ordered->disk_len <= range_start) { + list_move_tail(&ordered->root_extent_list, &skipped); + cond_resched_lock(&root->ordered_extent_lock); + continue; + } + list_move_tail(&ordered->root_extent_list, &root->ordered_extents); atomic_inc(&ordered->refs); @@ -694,6 +703,7 @@ int btrfs_wait_ordered_extents(struct btrfs_root *root, int nr) nr--; count++; } + list_splice_tail(&skipped, &root->ordered_extents); list_splice_tail(&splice, &root->ordered_extents); spin_unlock(&root->ordered_extent_lock); @@ -708,7 +718,8 @@ int btrfs_wait_ordered_extents(struct btrfs_root *root, int nr) return count; } -void btrfs_wait_ordered_roots(struct btrfs_fs_info *fs_info, int nr) +void btrfs_wait_ordered_roots(struct btrfs_fs_info *fs_info, int nr, + const u64 range_start, const u64 range_len) { struct btrfs_root *root; struct list_head splice; @@ -728,7 +739,8 @@ void btrfs_wait_ordered_roots(struct btrfs_fs_info *fs_info, int nr) &fs_info->ordered_roots); spin_unlock(&fs_info->ordered_root_lock); - done = btrfs_wait_ordered_extents(root, nr); + done = btrfs_wait_ordered_extents(root, nr, + range_start, range_len); btrfs_put_fs_root(root); spin_lock(&fs_info->ordered_root_lock); diff --git a/fs/btrfs/ordered-data.h b/fs/btrfs/ordered-data.h index 23c96059cef2..8ef12623d65c 100644 --- a/fs/btrfs/ordered-data.h +++ b/fs/btrfs/ordered-data.h @@ -197,8 +197,10 @@ int btrfs_ordered_update_i_size(struct inode *inode, u64 offset, struct btrfs_ordered_extent *ordered); int btrfs_find_ordered_sum(struct inode *inode, u64 offset, u64 disk_bytenr, u32 *sum, int len); -int btrfs_wait_ordered_extents(struct btrfs_root *root, int nr); -void btrfs_wait_ordered_roots(struct btrfs_fs_info *fs_info, int nr); +int btrfs_wait_ordered_extents(struct btrfs_root *root, int nr, + const u64 range_start, const u64 range_len); +void btrfs_wait_ordered_roots(struct btrfs_fs_info *fs_info, int nr, + const u64 range_start, const u64 range_len); void btrfs_get_logged_extents(struct inode *inode, struct list_head *logged_list, const loff_t start, diff --git a/fs/btrfs/relocation.c b/fs/btrfs/relocation.c index 08ef890deca6..1cfd35cfac76 100644 --- a/fs/btrfs/relocation.c +++ b/fs/btrfs/relocation.c @@ -2418,7 +2418,7 @@ again: } out: if (ret) { - btrfs_std_error(root->fs_info, ret, NULL); + btrfs_handle_fs_error(root->fs_info, ret, NULL); if (!list_empty(&reloc_roots)) free_reloc_roots(&reloc_roots); @@ -4254,12 +4254,11 @@ int btrfs_relocate_block_group(struct btrfs_root *extent_root, u64 group_start) btrfs_info(extent_root->fs_info, "relocating block group %llu flags %llu", rc->block_group->key.objectid, rc->block_group->flags); - ret = btrfs_start_delalloc_roots(fs_info, 0, -1); - if (ret < 0) { - err = ret; - goto out; - } - btrfs_wait_ordered_roots(fs_info, -1); + btrfs_wait_block_group_reservations(rc->block_group); + btrfs_wait_nocow_writers(rc->block_group); + btrfs_wait_ordered_roots(fs_info, -1, + rc->block_group->key.objectid, + rc->block_group->key.offset); while (1) { mutex_lock(&fs_info->cleaner_mutex); diff --git a/fs/btrfs/root-tree.c b/fs/btrfs/root-tree.c index 9fcd6dfc3266..b2b14e7115f1 100644 --- a/fs/btrfs/root-tree.c +++ b/fs/btrfs/root-tree.c @@ -284,7 +284,7 @@ int btrfs_find_orphan_roots(struct btrfs_root *tree_root) trans = btrfs_join_transaction(tree_root); if (IS_ERR(trans)) { err = PTR_ERR(trans); - btrfs_std_error(tree_root->fs_info, err, + btrfs_handle_fs_error(tree_root->fs_info, err, "Failed to start trans to delete " "orphan item"); break; @@ -293,7 +293,7 @@ int btrfs_find_orphan_roots(struct btrfs_root *tree_root) root_key.objectid); btrfs_end_transaction(trans, tree_root); if (err) { - btrfs_std_error(tree_root->fs_info, err, + btrfs_handle_fs_error(tree_root->fs_info, err, "Failed to delete root orphan " "item"); break; diff --git a/fs/btrfs/scrub.c b/fs/btrfs/scrub.c index 4678f03e878e..fa35cdc46494 100644 --- a/fs/btrfs/scrub.c +++ b/fs/btrfs/scrub.c @@ -1350,7 +1350,7 @@ static int scrub_setup_recheck_block(struct scrub_block *original_sblock, recover->bbio = bbio; recover->map_length = mapped_length; - BUG_ON(page_index >= SCRUB_PAGES_PER_RD_BIO); + BUG_ON(page_index >= SCRUB_MAX_PAGES_PER_BLOCK); nmirrors = min(scrub_nr_raid_mirrors(bbio), BTRFS_MAX_MIRRORS); @@ -2127,6 +2127,8 @@ static void scrub_missing_raid56_end_io(struct bio *bio) if (bio->bi_error) sblock->no_io_error_seen = 0; + bio_put(bio); + btrfs_queue_work(fs_info->scrub_workers, &sblock->work); } @@ -2860,7 +2862,7 @@ static noinline_for_stack int scrub_raid56_parity(struct scrub_ctx *sctx, int extent_mirror_num; int stop_loop = 0; - nsectors = map->stripe_len / root->sectorsize; + nsectors = div_u64(map->stripe_len, root->sectorsize); bitmap_len = scrub_calc_parity_bitmap_len(nsectors); sparity = kzalloc(sizeof(struct scrub_parity) + 2 * bitmap_len, GFP_NOFS); @@ -3070,7 +3072,6 @@ static noinline_for_stack int scrub_stripe(struct scrub_ctx *sctx, int slot; u64 nstripes; struct extent_buffer *l; - struct btrfs_key key; u64 physical; u64 logical; u64 logic_end; @@ -3079,7 +3080,7 @@ static noinline_for_stack int scrub_stripe(struct scrub_ctx *sctx, int mirror_num; struct reada_control *reada1; struct reada_control *reada2; - struct btrfs_key key_start; + struct btrfs_key key; struct btrfs_key key_end; u64 increment = map->stripe_len; u64 offset; @@ -3158,21 +3159,21 @@ static noinline_for_stack int scrub_stripe(struct scrub_ctx *sctx, scrub_blocked_if_needed(fs_info); /* FIXME it might be better to start readahead at commit root */ - key_start.objectid = logical; - key_start.type = BTRFS_EXTENT_ITEM_KEY; - key_start.offset = (u64)0; + key.objectid = logical; + key.type = BTRFS_EXTENT_ITEM_KEY; + key.offset = (u64)0; key_end.objectid = logic_end; key_end.type = BTRFS_METADATA_ITEM_KEY; key_end.offset = (u64)-1; - reada1 = btrfs_reada_add(root, &key_start, &key_end); + reada1 = btrfs_reada_add(root, &key, &key_end); - key_start.objectid = BTRFS_EXTENT_CSUM_OBJECTID; - key_start.type = BTRFS_EXTENT_CSUM_KEY; - key_start.offset = logical; + key.objectid = BTRFS_EXTENT_CSUM_OBJECTID; + key.type = BTRFS_EXTENT_CSUM_KEY; + key.offset = logical; key_end.objectid = BTRFS_EXTENT_CSUM_OBJECTID; key_end.type = BTRFS_EXTENT_CSUM_KEY; key_end.offset = logic_end; - reada2 = btrfs_reada_add(csum_root, &key_start, &key_end); + reada2 = btrfs_reada_add(csum_root, &key, &key_end); if (!IS_ERR(reada1)) btrfs_reada_wait(reada1); diff --git a/fs/btrfs/send.c b/fs/btrfs/send.c index 8d358c547c59..6a8c86074aa4 100644 --- a/fs/btrfs/send.c +++ b/fs/btrfs/send.c @@ -5939,6 +5939,7 @@ long btrfs_ioctl_send(struct file *mnt_file, void __user *arg_) u32 i; u64 *clone_sources_tmp = NULL; int clone_sources_to_rollback = 0; + unsigned alloc_size; int sort_clone_roots = 0; int index; @@ -5978,6 +5979,12 @@ long btrfs_ioctl_send(struct file *mnt_file, void __user *arg_) goto out; } + if (arg->clone_sources_count > + ULLONG_MAX / sizeof(*arg->clone_sources)) { + ret = -EINVAL; + goto out; + } + if (!access_ok(VERIFY_READ, arg->clone_sources, sizeof(*arg->clone_sources) * arg->clone_sources_count)) { @@ -6022,40 +6029,53 @@ long btrfs_ioctl_send(struct file *mnt_file, void __user *arg_) sctx->clone_roots_cnt = arg->clone_sources_count; sctx->send_max_size = BTRFS_SEND_BUF_SIZE; - sctx->send_buf = vmalloc(sctx->send_max_size); + sctx->send_buf = kmalloc(sctx->send_max_size, GFP_KERNEL | __GFP_NOWARN); if (!sctx->send_buf) { - ret = -ENOMEM; - goto out; + sctx->send_buf = vmalloc(sctx->send_max_size); + if (!sctx->send_buf) { + ret = -ENOMEM; + goto out; + } } - sctx->read_buf = vmalloc(BTRFS_SEND_READ_SIZE); + sctx->read_buf = kmalloc(BTRFS_SEND_READ_SIZE, GFP_KERNEL | __GFP_NOWARN); if (!sctx->read_buf) { - ret = -ENOMEM; - goto out; + sctx->read_buf = vmalloc(BTRFS_SEND_READ_SIZE); + if (!sctx->read_buf) { + ret = -ENOMEM; + goto out; + } } sctx->pending_dir_moves = RB_ROOT; sctx->waiting_dir_moves = RB_ROOT; sctx->orphan_dirs = RB_ROOT; - sctx->clone_roots = vzalloc(sizeof(struct clone_root) * - (arg->clone_sources_count + 1)); + alloc_size = sizeof(struct clone_root) * (arg->clone_sources_count + 1); + + sctx->clone_roots = kzalloc(alloc_size, GFP_KERNEL | __GFP_NOWARN); if (!sctx->clone_roots) { - ret = -ENOMEM; - goto out; + sctx->clone_roots = vzalloc(alloc_size); + if (!sctx->clone_roots) { + ret = -ENOMEM; + goto out; + } } + alloc_size = arg->clone_sources_count * sizeof(*arg->clone_sources); + if (arg->clone_sources_count) { - clone_sources_tmp = vmalloc(arg->clone_sources_count * - sizeof(*arg->clone_sources)); + clone_sources_tmp = kmalloc(alloc_size, GFP_KERNEL | __GFP_NOWARN); if (!clone_sources_tmp) { - ret = -ENOMEM; - goto out; + clone_sources_tmp = vmalloc(alloc_size); + if (!clone_sources_tmp) { + ret = -ENOMEM; + goto out; + } } ret = copy_from_user(clone_sources_tmp, arg->clone_sources, - arg->clone_sources_count * - sizeof(*arg->clone_sources)); + alloc_size); if (ret) { ret = -EFAULT; goto out; @@ -6089,7 +6109,7 @@ long btrfs_ioctl_send(struct file *mnt_file, void __user *arg_) sctx->clone_roots[i].root = clone_root; clone_sources_to_rollback = i + 1; } - vfree(clone_sources_tmp); + kvfree(clone_sources_tmp); clone_sources_tmp = NULL; } @@ -6207,15 +6227,15 @@ out: btrfs_root_dec_send_in_progress(sctx->parent_root); kfree(arg); - vfree(clone_sources_tmp); + kvfree(clone_sources_tmp); if (sctx) { if (sctx->send_filp) fput(sctx->send_filp); - vfree(sctx->clone_roots); - vfree(sctx->send_buf); - vfree(sctx->read_buf); + kvfree(sctx->clone_roots); + kvfree(sctx->send_buf); + kvfree(sctx->read_buf); name_cache_free(sctx); diff --git a/fs/btrfs/super.c b/fs/btrfs/super.c index 00b8f37cc306..bf71071ab6f6 100644 --- a/fs/btrfs/super.c +++ b/fs/btrfs/super.c @@ -97,15 +97,6 @@ const char *btrfs_decode_error(int errno) return errstr; } -static void save_error_info(struct btrfs_fs_info *fs_info) -{ - /* - * today we only save the error info into ram. Long term we'll - * also send it down to the disk - */ - set_bit(BTRFS_FS_STATE_ERROR, &fs_info->fs_state); -} - /* btrfs handle error by forcing the filesystem readonly */ static void btrfs_handle_error(struct btrfs_fs_info *fs_info) { @@ -131,11 +122,11 @@ static void btrfs_handle_error(struct btrfs_fs_info *fs_info) } /* - * __btrfs_std_error decodes expected errors from the caller and + * __btrfs_handle_fs_error decodes expected errors from the caller and * invokes the approciate error response. */ __cold -void __btrfs_std_error(struct btrfs_fs_info *fs_info, const char *function, +void __btrfs_handle_fs_error(struct btrfs_fs_info *fs_info, const char *function, unsigned int line, int errno, const char *fmt, ...) { struct super_block *sb = fs_info->sb; @@ -170,8 +161,13 @@ void __btrfs_std_error(struct btrfs_fs_info *fs_info, const char *function, } #endif + /* + * Today we only save the error info to memory. Long term we'll + * also send it down to the disk + */ + set_bit(BTRFS_FS_STATE_ERROR, &fs_info->fs_state); + /* Don't go through full error handling during mount */ - save_error_info(fs_info); if (sb->s_flags & MS_BORN) btrfs_handle_error(fs_info); } @@ -252,7 +248,7 @@ void __btrfs_abort_transaction(struct btrfs_trans_handle *trans, /* Wake up anybody who may be waiting on this transaction */ wake_up(&root->fs_info->transaction_wait); wake_up(&root->fs_info->transaction_blocked_wait); - __btrfs_std_error(root->fs_info, function, line, errno, NULL); + __btrfs_handle_fs_error(root->fs_info, function, line, errno, NULL); } /* * __btrfs_panic decodes unexpected, fatal errors from the caller, @@ -1160,7 +1156,7 @@ int btrfs_sync_fs(struct super_block *sb, int wait) return 0; } - btrfs_wait_ordered_roots(fs_info, -1); + btrfs_wait_ordered_roots(fs_info, -1, 0, (u64)-1); trans = btrfs_attach_transaction_barrier(root); if (IS_ERR(trans)) { @@ -1488,10 +1484,10 @@ static int setup_security_options(struct btrfs_fs_info *fs_info, memcpy(&fs_info->security_opts, sec_opts, sizeof(*sec_opts)); } else { /* - * Since SELinux(the only one supports security_mnt_opts) does - * NOT support changing context during remount/mount same sb, - * This must be the same or part of the same security options, - * just free it. + * Since SELinux (the only one supporting security_mnt_opts) + * does NOT support changing context during remount/mount of + * the same sb, this must be the same or part of the same + * security options, just free it. */ security_free_mnt_opts(sec_opts); } @@ -1669,8 +1665,8 @@ static inline void btrfs_remount_cleanup(struct btrfs_fs_info *fs_info, unsigned long old_opts) { /* - * We need cleanup all defragable inodes if the autodefragment is - * close or the fs is R/O. + * We need to cleanup all defragable inodes if the autodefragment is + * close or the filesystem is read only. */ if (btrfs_raw_test_opt(old_opts, AUTO_DEFRAG) && (!btrfs_raw_test_opt(fs_info->mount_opt, AUTO_DEFRAG) || @@ -2051,9 +2047,10 @@ static int btrfs_statfs(struct dentry *dentry, struct kstatfs *buf) struct btrfs_block_rsv *block_rsv = &fs_info->global_block_rsv; int ret; u64 thresh = 0; + int mixed = 0; /* - * holding chunk_muext to avoid allocating new chunks, holding + * holding chunk_mutex to avoid allocating new chunks, holding * device_list_mutex to avoid the device being removed */ rcu_read_lock(); @@ -2076,8 +2073,17 @@ static int btrfs_statfs(struct dentry *dentry, struct kstatfs *buf) } } } - if (found->flags & BTRFS_BLOCK_GROUP_METADATA) - total_free_meta += found->disk_total - found->disk_used; + + /* + * Metadata in mixed block goup profiles are accounted in data + */ + if (!mixed && found->flags & BTRFS_BLOCK_GROUP_METADATA) { + if (found->flags & BTRFS_BLOCK_GROUP_DATA) + mixed = 1; + else + total_free_meta += found->disk_total - + found->disk_used; + } total_used += found->disk_used; } @@ -2090,7 +2096,11 @@ static int btrfs_statfs(struct dentry *dentry, struct kstatfs *buf) /* Account global block reserve as used, it's in logical size already */ spin_lock(&block_rsv->lock); - buf->f_bfree -= block_rsv->size >> bits; + /* Mixed block groups accounting is not byte-accurate, avoid overflow */ + if (buf->f_bfree >= block_rsv->size >> bits) + buf->f_bfree -= block_rsv->size >> bits; + else + buf->f_bfree = 0; spin_unlock(&block_rsv->lock); buf->f_bavail = div_u64(total_free_data, factor); @@ -2115,7 +2125,7 @@ static int btrfs_statfs(struct dentry *dentry, struct kstatfs *buf) */ thresh = 4 * 1024 * 1024; - if (total_free_meta - thresh < block_rsv->size) + if (!mixed && total_free_meta - thresh < block_rsv->size) buf->f_bavail = 0; buf->f_type = BTRFS_SUPER_MAGIC; diff --git a/fs/btrfs/sysfs.c b/fs/btrfs/sysfs.c index 539e7b5e3f86..4879656bda3c 100644 --- a/fs/btrfs/sysfs.c +++ b/fs/btrfs/sysfs.c @@ -120,6 +120,9 @@ static ssize_t btrfs_feature_attr_store(struct kobject *kobj, if (!fs_info) return -EPERM; + if (fs_info->sb->s_flags & MS_RDONLY) + return -EROFS; + ret = kstrtoul(skip_spaces(buf), 0, &val); if (ret) return ret; @@ -364,7 +367,13 @@ static ssize_t btrfs_label_show(struct kobject *kobj, { struct btrfs_fs_info *fs_info = to_fs_info(kobj); char *label = fs_info->super_copy->label; - return snprintf(buf, PAGE_SIZE, label[0] ? "%s\n" : "%s", label); + ssize_t ret; + + spin_lock(&fs_info->super_lock); + ret = snprintf(buf, PAGE_SIZE, label[0] ? "%s\n" : "%s", label); + spin_unlock(&fs_info->super_lock); + + return ret; } static ssize_t btrfs_label_store(struct kobject *kobj, @@ -374,6 +383,9 @@ static ssize_t btrfs_label_store(struct kobject *kobj, struct btrfs_fs_info *fs_info = to_fs_info(kobj); size_t p_len; + if (!fs_info) + return -EPERM; + if (fs_info->sb->s_flags & MS_RDONLY) return -EROFS; diff --git a/fs/btrfs/transaction.c b/fs/btrfs/transaction.c index 43885e51b882..5b0b758a3f79 100644 --- a/fs/btrfs/transaction.c +++ b/fs/btrfs/transaction.c @@ -311,10 +311,11 @@ loop: * when the transaction commits */ static int record_root_in_trans(struct btrfs_trans_handle *trans, - struct btrfs_root *root) + struct btrfs_root *root, + int force) { - if (test_bit(BTRFS_ROOT_REF_COWS, &root->state) && - root->last_trans < trans->transid) { + if ((test_bit(BTRFS_ROOT_REF_COWS, &root->state) && + root->last_trans < trans->transid) || force) { WARN_ON(root == root->fs_info->extent_root); WARN_ON(root->commit_root != root->node); @@ -331,7 +332,7 @@ static int record_root_in_trans(struct btrfs_trans_handle *trans, smp_wmb(); spin_lock(&root->fs_info->fs_roots_radix_lock); - if (root->last_trans == trans->transid) { + if (root->last_trans == trans->transid && !force) { spin_unlock(&root->fs_info->fs_roots_radix_lock); return 0; } @@ -402,7 +403,7 @@ int btrfs_record_root_in_trans(struct btrfs_trans_handle *trans, return 0; mutex_lock(&root->fs_info->reloc_mutex); - record_root_in_trans(trans, root); + record_root_in_trans(trans, root, 0); mutex_unlock(&root->fs_info->reloc_mutex); return 0; @@ -1310,6 +1311,97 @@ int btrfs_defrag_root(struct btrfs_root *root) return ret; } +/* Bisesctability fixup, remove in 4.8 */ +#ifndef btrfs_std_error +#define btrfs_std_error btrfs_handle_fs_error +#endif + +/* + * Do all special snapshot related qgroup dirty hack. + * + * Will do all needed qgroup inherit and dirty hack like switch commit + * roots inside one transaction and write all btree into disk, to make + * qgroup works. + */ +static int qgroup_account_snapshot(struct btrfs_trans_handle *trans, + struct btrfs_root *src, + struct btrfs_root *parent, + struct btrfs_qgroup_inherit *inherit, + u64 dst_objectid) +{ + struct btrfs_fs_info *fs_info = src->fs_info; + int ret; + + /* + * Save some performance in the case that qgroups are not + * enabled. If this check races with the ioctl, rescan will + * kick in anyway. + */ + mutex_lock(&fs_info->qgroup_ioctl_lock); + if (!fs_info->quota_enabled) { + mutex_unlock(&fs_info->qgroup_ioctl_lock); + return 0; + } + mutex_unlock(&fs_info->qgroup_ioctl_lock); + + /* + * We are going to commit transaction, see btrfs_commit_transaction() + * comment for reason locking tree_log_mutex + */ + mutex_lock(&fs_info->tree_log_mutex); + + ret = commit_fs_roots(trans, src); + if (ret) + goto out; + ret = btrfs_qgroup_prepare_account_extents(trans, fs_info); + if (ret < 0) + goto out; + ret = btrfs_qgroup_account_extents(trans, fs_info); + if (ret < 0) + goto out; + + /* Now qgroup are all updated, we can inherit it to new qgroups */ + ret = btrfs_qgroup_inherit(trans, fs_info, + src->root_key.objectid, dst_objectid, + inherit); + if (ret < 0) + goto out; + + /* + * Now we do a simplified commit transaction, which will: + * 1) commit all subvolume and extent tree + * To ensure all subvolume and extent tree have a valid + * commit_root to accounting later insert_dir_item() + * 2) write all btree blocks onto disk + * This is to make sure later btree modification will be cowed + * Or commit_root can be populated and cause wrong qgroup numbers + * In this simplified commit, we don't really care about other trees + * like chunk and root tree, as they won't affect qgroup. + * And we don't write super to avoid half committed status. + */ + ret = commit_cowonly_roots(trans, src); + if (ret) + goto out; + switch_commit_roots(trans->transaction, fs_info); + ret = btrfs_write_and_wait_transaction(trans, src); + if (ret) + btrfs_std_error(fs_info, ret, + "Error while writing out transaction for qgroup"); + +out: + mutex_unlock(&fs_info->tree_log_mutex); + + /* + * Force parent root to be updated, as we recorded it before so its + * last_trans == cur_transid. + * Or it won't be committed again onto disk after later + * insert_dir_item() + */ + if (!ret) + record_root_in_trans(trans, parent, 1); + return ret; +} + /* * new snapshots need to be created at a very specific time in the * transaction commit. This does the actual creation. @@ -1383,7 +1475,7 @@ static noinline int create_pending_snapshot(struct btrfs_trans_handle *trans, dentry = pending->dentry; parent_inode = pending->dir; parent_root = BTRFS_I(parent_inode)->root; - record_root_in_trans(trans, parent_root); + record_root_in_trans(trans, parent_root, 0); cur_time = current_fs_time(parent_inode->i_sb); @@ -1420,7 +1512,7 @@ static noinline int create_pending_snapshot(struct btrfs_trans_handle *trans, goto fail; } - record_root_in_trans(trans, root); + record_root_in_trans(trans, root, 0); btrfs_set_root_last_snapshot(&root->root_item, trans->transid); memcpy(new_root_item, &root->root_item, sizeof(*new_root_item)); btrfs_check_and_init_root_item(new_root_item); @@ -1516,6 +1608,17 @@ static noinline int create_pending_snapshot(struct btrfs_trans_handle *trans, goto fail; } + /* + * Do special qgroup accounting for snapshot, as we do some qgroup + * snapshot hack to do fast snapshot. + * To co-operate with that hack, we do hack again. + * Or snapshot will be greatly slowed down by a subtree qgroup rescan + */ + ret = qgroup_account_snapshot(trans, root, parent_root, + pending->inherit, objectid); + if (ret < 0) + goto fail; + ret = btrfs_insert_dir_item(trans, parent_root, dentry->d_name.name, dentry->d_name.len, parent_inode, &key, @@ -1559,23 +1662,6 @@ static noinline int create_pending_snapshot(struct btrfs_trans_handle *trans, goto fail; } - /* - * account qgroup counters before qgroup_inherit() - */ - ret = btrfs_qgroup_prepare_account_extents(trans, fs_info); - if (ret) - goto fail; - ret = btrfs_qgroup_account_extents(trans, fs_info); - if (ret) - goto fail; - ret = btrfs_qgroup_inherit(trans, fs_info, - root->root_key.objectid, - objectid, pending->inherit); - if (ret) { - btrfs_abort_transaction(trans, root, ret); - goto fail; - } - fail: pending->error = ret; dir_item_existed: @@ -1821,7 +1907,7 @@ static inline int btrfs_start_delalloc_flush(struct btrfs_fs_info *fs_info) static inline void btrfs_wait_delalloc_flush(struct btrfs_fs_info *fs_info) { if (btrfs_test_opt(fs_info->tree_root, FLUSHONCOMMIT)) - btrfs_wait_ordered_roots(fs_info, -1); + btrfs_wait_ordered_roots(fs_info, -1, 0, (u64)-1); } static inline void @@ -2145,7 +2231,7 @@ int btrfs_commit_transaction(struct btrfs_trans_handle *trans, ret = btrfs_write_and_wait_transaction(trans, root); if (ret) { - btrfs_std_error(root->fs_info, ret, + btrfs_handle_fs_error(root->fs_info, ret, "Error while writing out transaction"); mutex_unlock(&root->fs_info->tree_log_mutex); goto scrub_continue; diff --git a/fs/btrfs/tree-log.c b/fs/btrfs/tree-log.c index 517d0ccb351e..8aaca5c6af94 100644 --- a/fs/btrfs/tree-log.c +++ b/fs/btrfs/tree-log.c @@ -4141,6 +4141,7 @@ static int btrfs_log_changed_extents(struct btrfs_trans_handle *trans, INIT_LIST_HEAD(&extents); + down_write(&BTRFS_I(inode)->dio_sem); write_lock(&tree->lock); test_gen = root->fs_info->last_trans_committed; @@ -4169,13 +4170,20 @@ static int btrfs_log_changed_extents(struct btrfs_trans_handle *trans, } list_sort(NULL, &extents, extent_cmp); + btrfs_get_logged_extents(inode, logged_list, start, end); /* - * Collect any new ordered extents within the range. This is to - * prevent logging file extent items without waiting for the disk - * location they point to being written. We do this only to deal - * with races against concurrent lockless direct IO writes. + * Some ordered extents started by fsync might have completed + * before we could collect them into the list logged_list, which + * means they're gone, not in our logged_list nor in the inode's + * ordered tree. We want the application/user space to know an + * error happened while attempting to persist file data so that + * it can take proper action. If such error happened, we leave + * without writing to the log tree and the fsync must report the + * file data write error and not commit the current transaction. */ - btrfs_get_logged_extents(inode, logged_list, start, end); + ret = btrfs_inode_check_errors(inode); + if (ret) + ctx->io_err = ret; process: while (!list_empty(&extents)) { em = list_entry(extents.next, struct extent_map, list); @@ -4202,6 +4210,7 @@ process: } WARN_ON(!list_empty(&extents)); write_unlock(&tree->lock); + up_write(&BTRFS_I(inode)->dio_sem); btrfs_release_path(path); return ret; @@ -4623,23 +4632,6 @@ static int btrfs_log_inode(struct btrfs_trans_handle *trans, mutex_lock(&BTRFS_I(inode)->log_mutex); /* - * Collect ordered extents only if we are logging data. This is to - * ensure a subsequent request to log this inode in LOG_INODE_ALL mode - * will process the ordered extents if they still exists at the time, - * because when we collect them we test and set for the flag - * BTRFS_ORDERED_LOGGED to prevent multiple log requests to process the - * same ordered extents. The consequence for the LOG_INODE_ALL log mode - * not processing the ordered extents is that we end up logging the - * corresponding file extent items, based on the extent maps in the - * inode's extent_map_tree's modified_list, without logging the - * respective checksums (since the may still be only attached to the - * ordered extents and have not been inserted in the csum tree by - * btrfs_finish_ordered_io() yet). - */ - if (inode_only == LOG_INODE_ALL) - btrfs_get_logged_extents(inode, &logged_list, start, end); - - /* * a brute force approach to making sure we get the most uptodate * copies of everything. */ @@ -4846,21 +4838,6 @@ log_extents: goto out_unlock; } if (fast_search) { - /* - * Some ordered extents started by fsync might have completed - * before we collected the ordered extents in logged_list, which - * means they're gone, not in our logged_list nor in the inode's - * ordered tree. We want the application/user space to know an - * error happened while attempting to persist file data so that - * it can take proper action. If such error happened, we leave - * without writing to the log tree and the fsync must report the - * file data write error and not commit the current transaction. - */ - err = btrfs_inode_check_errors(inode); - if (err) { - ctx->io_err = err; - goto out_unlock; - } ret = btrfs_log_changed_extents(trans, root, inode, dst_path, &logged_list, ctx, start, end); if (ret) { @@ -4988,7 +4965,7 @@ static noinline int check_parent_dirs_for_sync(struct btrfs_trans_handle *trans, goto out; if (!S_ISDIR(inode->i_mode)) { - if (!parent || d_really_is_negative(parent) || sb != d_inode(parent)->i_sb) + if (!parent || d_really_is_negative(parent) || sb != parent->d_sb) goto out; inode = d_inode(parent); } @@ -5009,7 +4986,7 @@ static noinline int check_parent_dirs_for_sync(struct btrfs_trans_handle *trans, break; } - if (!parent || d_really_is_negative(parent) || sb != d_inode(parent)->i_sb) + if (!parent || d_really_is_negative(parent) || sb != parent->d_sb) break; if (IS_ROOT(parent)) @@ -5158,7 +5135,7 @@ process_leaf: } ctx->log_new_dentries = false; - if (type == BTRFS_FT_DIR) + if (type == BTRFS_FT_DIR || type == BTRFS_FT_SYMLINK) log_mode = LOG_INODE_ALL; btrfs_release_path(path); ret = btrfs_log_inode(trans, root, di_inode, @@ -5278,11 +5255,16 @@ static int btrfs_log_all_parents(struct btrfs_trans_handle *trans, if (IS_ERR(dir_inode)) continue; + if (ctx) + ctx->log_new_dentries = false; ret = btrfs_log_inode(trans, root, dir_inode, LOG_INODE_ALL, 0, LLONG_MAX, ctx); if (!ret && btrfs_must_commit_transaction(trans, dir_inode)) ret = 1; + if (!ret && ctx && ctx->log_new_dentries) + ret = log_new_dir_dentries(trans, root, + dir_inode, ctx); iput(dir_inode); if (ret) goto out; @@ -5422,7 +5404,7 @@ static int btrfs_log_inode_parent(struct btrfs_trans_handle *trans, } while (1) { - if (!parent || d_really_is_negative(parent) || sb != d_inode(parent)->i_sb) + if (!parent || d_really_is_negative(parent) || sb != parent->d_sb) break; inode = d_inode(parent); @@ -5519,7 +5501,7 @@ int btrfs_recover_log_trees(struct btrfs_root *log_root_tree) ret = walk_log_tree(trans, log_root_tree, &wc); if (ret) { - btrfs_std_error(fs_info, ret, "Failed to pin buffers while " + btrfs_handle_fs_error(fs_info, ret, "Failed to pin buffers while " "recovering log root tree."); goto error; } @@ -5533,7 +5515,7 @@ again: ret = btrfs_search_slot(NULL, log_root_tree, &key, path, 0, 0); if (ret < 0) { - btrfs_std_error(fs_info, ret, + btrfs_handle_fs_error(fs_info, ret, "Couldn't find tree log root."); goto error; } @@ -5551,7 +5533,7 @@ again: log = btrfs_read_fs_root(log_root_tree, &found_key); if (IS_ERR(log)) { ret = PTR_ERR(log); - btrfs_std_error(fs_info, ret, + btrfs_handle_fs_error(fs_info, ret, "Couldn't read tree log root."); goto error; } @@ -5566,7 +5548,7 @@ again: free_extent_buffer(log->node); free_extent_buffer(log->commit_root); kfree(log); - btrfs_std_error(fs_info, ret, "Couldn't read target root " + btrfs_handle_fs_error(fs_info, ret, "Couldn't read target root " "for tree log recovery."); goto error; } @@ -5652,11 +5634,9 @@ void btrfs_record_unlink_dir(struct btrfs_trans_handle *trans, * into the file. When the file is logged we check it and * don't log the parents if the file is fully on disk. */ - if (S_ISREG(inode->i_mode)) { - mutex_lock(&BTRFS_I(inode)->log_mutex); - BTRFS_I(inode)->last_unlink_trans = trans->transid; - mutex_unlock(&BTRFS_I(inode)->log_mutex); - } + mutex_lock(&BTRFS_I(inode)->log_mutex); + BTRFS_I(inode)->last_unlink_trans = trans->transid; + mutex_unlock(&BTRFS_I(inode)->log_mutex); /* * if this directory was already logged any new diff --git a/fs/btrfs/volumes.c b/fs/btrfs/volumes.c index bd0f45fb38c4..2b88127bba5b 100644 --- a/fs/btrfs/volumes.c +++ b/fs/btrfs/volumes.c @@ -20,13 +20,13 @@ #include <linux/slab.h> #include <linux/buffer_head.h> #include <linux/blkdev.h> -#include <linux/random.h> #include <linux/iocontext.h> #include <linux/capability.h> #include <linux/ratelimit.h> #include <linux/kthread.h> #include <linux/raid/pq.h> #include <linux/semaphore.h> +#include <linux/uuid.h> #include <asm/div64.h> #include "ctree.h" #include "extent_map.h" @@ -118,6 +118,21 @@ const u64 btrfs_raid_group[BTRFS_NR_RAID_TYPES] = { [BTRFS_RAID_RAID6] = BTRFS_BLOCK_GROUP_RAID6, }; +/* + * Table to convert BTRFS_RAID_* to the error code if minimum number of devices + * condition is not met. Zero means there's no corresponding + * BTRFS_ERROR_DEV_*_NOT_MET value. + */ +const int btrfs_raid_mindev_error[BTRFS_NR_RAID_TYPES] = { + [BTRFS_RAID_RAID10] = BTRFS_ERROR_DEV_RAID10_MIN_NOT_MET, + [BTRFS_RAID_RAID1] = BTRFS_ERROR_DEV_RAID1_MIN_NOT_MET, + [BTRFS_RAID_DUP] = 0, + [BTRFS_RAID_RAID0] = 0, + [BTRFS_RAID_SINGLE] = 0, + [BTRFS_RAID_RAID5] = BTRFS_ERROR_DEV_RAID5_MIN_NOT_MET, + [BTRFS_RAID_RAID6] = BTRFS_ERROR_DEV_RAID6_MIN_NOT_MET, +}; + static int init_first_rw_device(struct btrfs_trans_handle *trans, struct btrfs_root *root, struct btrfs_device *device); @@ -699,7 +714,8 @@ static noinline int device_list_add(const char *path, * if there is new btrfs on an already registered device, * then remove the stale device entry. */ - btrfs_free_stale_device(device); + if (ret > 0) + btrfs_free_stale_device(device); *fs_devices_ret = fs_devices; @@ -988,6 +1004,56 @@ int btrfs_open_devices(struct btrfs_fs_devices *fs_devices, return ret; } +void btrfs_release_disk_super(struct page *page) +{ + kunmap(page); + put_page(page); +} + +int btrfs_read_disk_super(struct block_device *bdev, u64 bytenr, + struct page **page, struct btrfs_super_block **disk_super) +{ + void *p; + pgoff_t index; + + /* make sure our super fits in the device */ + if (bytenr + PAGE_SIZE >= i_size_read(bdev->bd_inode)) + return 1; + + /* make sure our super fits in the page */ + if (sizeof(**disk_super) > PAGE_SIZE) + return 1; + + /* make sure our super doesn't straddle pages on disk */ + index = bytenr >> PAGE_SHIFT; + if ((bytenr + sizeof(**disk_super) - 1) >> PAGE_SHIFT != index) + return 1; + + /* pull in the page with our super */ + *page = read_cache_page_gfp(bdev->bd_inode->i_mapping, + index, GFP_KERNEL); + + if (IS_ERR_OR_NULL(*page)) + return 1; + + p = kmap(*page); + + /* align our pointer to the offset of the super block */ + *disk_super = p + (bytenr & ~PAGE_MASK); + + if (btrfs_super_bytenr(*disk_super) != bytenr || + btrfs_super_magic(*disk_super) != BTRFS_MAGIC) { + btrfs_release_disk_super(*page); + return 1; + } + + if ((*disk_super)->label[0] && + (*disk_super)->label[BTRFS_LABEL_SIZE - 1]) + (*disk_super)->label[BTRFS_LABEL_SIZE - 1] = '\0'; + + return 0; +} + /* * Look for a btrfs signature on a device. This may be called out of the mount path * and we are not allowed to call set_blocksize during the scan. The superblock @@ -999,13 +1065,11 @@ int btrfs_scan_one_device(const char *path, fmode_t flags, void *holder, struct btrfs_super_block *disk_super; struct block_device *bdev; struct page *page; - void *p; int ret = -EINVAL; u64 devid; u64 transid; u64 total_devices; u64 bytenr; - pgoff_t index; /* * we would like to check all the supers, but that would make @@ -1018,41 +1082,14 @@ int btrfs_scan_one_device(const char *path, fmode_t flags, void *holder, mutex_lock(&uuid_mutex); bdev = blkdev_get_by_path(path, flags, holder); - if (IS_ERR(bdev)) { ret = PTR_ERR(bdev); goto error; } - /* make sure our super fits in the device */ - if (bytenr + PAGE_SIZE >= i_size_read(bdev->bd_inode)) - goto error_bdev_put; - - /* make sure our super fits in the page */ - if (sizeof(*disk_super) > PAGE_SIZE) - goto error_bdev_put; - - /* make sure our super doesn't straddle pages on disk */ - index = bytenr >> PAGE_SHIFT; - if ((bytenr + sizeof(*disk_super) - 1) >> PAGE_SHIFT != index) - goto error_bdev_put; - - /* pull in the page with our super */ - page = read_cache_page_gfp(bdev->bd_inode->i_mapping, - index, GFP_NOFS); - - if (IS_ERR_OR_NULL(page)) + if (btrfs_read_disk_super(bdev, bytenr, &page, &disk_super)) goto error_bdev_put; - p = kmap(page); - - /* align our pointer to the offset of the super block */ - disk_super = p + (bytenr & ~PAGE_MASK); - - if (btrfs_super_bytenr(disk_super) != bytenr || - btrfs_super_magic(disk_super) != BTRFS_MAGIC) - goto error_unmap; - devid = btrfs_stack_device_id(&disk_super->dev_item); transid = btrfs_super_generation(disk_super); total_devices = btrfs_super_num_devices(disk_super); @@ -1060,8 +1097,6 @@ int btrfs_scan_one_device(const char *path, fmode_t flags, void *holder, ret = device_list_add(path, disk_super, devid, fs_devices_ret); if (ret > 0) { if (disk_super->label[0]) { - if (disk_super->label[BTRFS_LABEL_SIZE - 1]) - disk_super->label[BTRFS_LABEL_SIZE - 1] = '\0'; printk(KERN_INFO "BTRFS: device label %s ", disk_super->label); } else { printk(KERN_INFO "BTRFS: device fsid %pU ", disk_super->fsid); @@ -1073,9 +1108,7 @@ int btrfs_scan_one_device(const char *path, fmode_t flags, void *holder, if (!ret && fs_devices_ret) (*fs_devices_ret)->total_devices = total_devices; -error_unmap: - kunmap(page); - put_page(page); + btrfs_release_disk_super(page); error_bdev_put: blkdev_put(bdev, flags); @@ -1454,7 +1487,7 @@ again: extent = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_dev_extent); } else { - btrfs_std_error(root->fs_info, ret, "Slot search failed"); + btrfs_handle_fs_error(root->fs_info, ret, "Slot search failed"); goto out; } @@ -1462,7 +1495,7 @@ again: ret = btrfs_del_item(trans, root, path); if (ret) { - btrfs_std_error(root->fs_info, ret, + btrfs_handle_fs_error(root->fs_info, ret, "Failed to remove dev extent item"); } else { set_bit(BTRFS_TRANS_HAVE_FREE_BGS, &trans->transaction->flags); @@ -1688,32 +1721,92 @@ out: return ret; } -int btrfs_rm_device(struct btrfs_root *root, char *device_path) +/* + * Verify that @num_devices satisfies the RAID profile constraints in the whole + * filesystem. It's up to the caller to adjust that number regarding eg. device + * replace. + */ +static int btrfs_check_raid_min_devices(struct btrfs_fs_info *fs_info, + u64 num_devices) +{ + u64 all_avail; + unsigned seq; + int i; + + do { + seq = read_seqbegin(&fs_info->profiles_lock); + + all_avail = fs_info->avail_data_alloc_bits | + fs_info->avail_system_alloc_bits | + fs_info->avail_metadata_alloc_bits; + } while (read_seqretry(&fs_info->profiles_lock, seq)); + + for (i = 0; i < BTRFS_NR_RAID_TYPES; i++) { + if (!(all_avail & btrfs_raid_group[i])) + continue; + + if (num_devices < btrfs_raid_array[i].devs_min) { + int ret = btrfs_raid_mindev_error[i]; + + if (ret) + return ret; + } + } + + return 0; +} + +struct btrfs_device *btrfs_find_next_active_device(struct btrfs_fs_devices *fs_devs, + struct btrfs_device *device) { - struct btrfs_device *device; struct btrfs_device *next_device; - struct block_device *bdev; - struct buffer_head *bh = NULL; - struct btrfs_super_block *disk_super; + + list_for_each_entry(next_device, &fs_devs->devices, dev_list) { + if (next_device != device && + !next_device->missing && next_device->bdev) + return next_device; + } + + return NULL; +} + +/* + * Helper function to check if the given device is part of s_bdev / latest_bdev + * and replace it with the provided or the next active device, in the context + * where this function called, there should be always be another device (or + * this_dev) which is active. + */ +void btrfs_assign_next_active_device(struct btrfs_fs_info *fs_info, + struct btrfs_device *device, struct btrfs_device *this_dev) +{ + struct btrfs_device *next_device; + + if (this_dev) + next_device = this_dev; + else + next_device = btrfs_find_next_active_device(fs_info->fs_devices, + device); + ASSERT(next_device); + + if (fs_info->sb->s_bdev && + (fs_info->sb->s_bdev == device->bdev)) + fs_info->sb->s_bdev = next_device->bdev; + + if (fs_info->fs_devices->latest_bdev == device->bdev) + fs_info->fs_devices->latest_bdev = next_device->bdev; +} + +int btrfs_rm_device(struct btrfs_root *root, char *device_path, u64 devid) +{ + struct btrfs_device *device; struct btrfs_fs_devices *cur_devices; - u64 all_avail; - u64 devid; u64 num_devices; - u8 *dev_uuid; - unsigned seq; int ret = 0; bool clear_super = false; + char *dev_name = NULL; mutex_lock(&uuid_mutex); - do { - seq = read_seqbegin(&root->fs_info->profiles_lock); - - all_avail = root->fs_info->avail_data_alloc_bits | - root->fs_info->avail_system_alloc_bits | - root->fs_info->avail_metadata_alloc_bits; - } while (read_seqretry(&root->fs_info->profiles_lock, seq)); - num_devices = root->fs_info->fs_devices->num_devices; btrfs_dev_replace_lock(&root->fs_info->dev_replace, 0); if (btrfs_dev_replace_is_ongoing(&root->fs_info->dev_replace)) { @@ -1722,78 +1815,23 @@ int btrfs_rm_device(struct btrfs_root *root, char *device_path) } btrfs_dev_replace_unlock(&root->fs_info->dev_replace, 0); - if ((all_avail & BTRFS_BLOCK_GROUP_RAID10) && num_devices <= 4) { - ret = BTRFS_ERROR_DEV_RAID10_MIN_NOT_MET; - goto out; - } - - if ((all_avail & BTRFS_BLOCK_GROUP_RAID1) && num_devices <= 2) { - ret = BTRFS_ERROR_DEV_RAID1_MIN_NOT_MET; + ret = btrfs_check_raid_min_devices(root->fs_info, num_devices - 1); + if (ret) goto out; - } - if ((all_avail & BTRFS_BLOCK_GROUP_RAID5) && - root->fs_info->fs_devices->rw_devices <= 2) { - ret = BTRFS_ERROR_DEV_RAID5_MIN_NOT_MET; - goto out; - } - if ((all_avail & BTRFS_BLOCK_GROUP_RAID6) && - root->fs_info->fs_devices->rw_devices <= 3) { - ret = BTRFS_ERROR_DEV_RAID6_MIN_NOT_MET; + ret = btrfs_find_device_by_devspec(root, devid, device_path, + &device); + if (ret) goto out; - } - - if (strcmp(device_path, "missing") == 0) { - struct list_head *devices; - struct btrfs_device *tmp; - - device = NULL; - devices = &root->fs_info->fs_devices->devices; - /* - * It is safe to read the devices since the volume_mutex - * is held. - */ - list_for_each_entry(tmp, devices, dev_list) { - if (tmp->in_fs_metadata && - !tmp->is_tgtdev_for_dev_replace && - !tmp->bdev) { - device = tmp; - break; - } - } - bdev = NULL; - bh = NULL; - disk_super = NULL; - if (!device) { - ret = BTRFS_ERROR_DEV_MISSING_NOT_FOUND; - goto out; - } - } else { - ret = btrfs_get_bdev_and_sb(device_path, - FMODE_WRITE | FMODE_EXCL, - root->fs_info->bdev_holder, 0, - &bdev, &bh); - if (ret) - goto out; - disk_super = (struct btrfs_super_block *)bh->b_data; - devid = btrfs_stack_device_id(&disk_super->dev_item); - dev_uuid = disk_super->dev_item.uuid; - device = btrfs_find_device(root->fs_info, devid, dev_uuid, - disk_super->fsid); - if (!device) { - ret = -ENOENT; - goto error_brelse; - } - } if (device->is_tgtdev_for_dev_replace) { ret = BTRFS_ERROR_DEV_TGT_REPLACE; - goto error_brelse; + goto out; } if (device->writeable && root->fs_info->fs_devices->rw_devices == 1) { ret = BTRFS_ERROR_DEV_ONLY_WRITABLE; - goto error_brelse; + goto out; } if (device->writeable) { @@ -1801,6 +1839,11 @@ int btrfs_rm_device(struct btrfs_root *root, char *device_path) list_del_init(&device->dev_alloc_list); device->fs_devices->rw_devices--; unlock_chunks(root); + dev_name = kstrdup(device->name->str, GFP_KERNEL); + if (!dev_name) { + ret = -ENOMEM; + goto error_undo; + } clear_super = true; } @@ -1842,12 +1885,7 @@ int btrfs_rm_device(struct btrfs_root *root, char *device_path) if (device->missing) device->fs_devices->missing_devices--; - next_device = list_entry(root->fs_info->fs_devices->devices.next, - struct btrfs_device, dev_list); - if (device->bdev == root->fs_info->sb->s_bdev) - root->fs_info->sb->s_bdev = next_device->bdev; - if (device->bdev == root->fs_info->fs_devices->latest_bdev) - root->fs_info->fs_devices->latest_bdev = next_device->bdev; + btrfs_assign_next_active_device(root->fs_info, device, NULL); if (device->bdev) { device->fs_devices->open_devices--; @@ -1883,63 +1921,23 @@ int btrfs_rm_device(struct btrfs_root *root, char *device_path) * at this point, the device is zero sized. We want to * remove it from the devices list and zero out the old super */ - if (clear_super && disk_super) { - u64 bytenr; - int i; - - /* make sure this device isn't detected as part of - * the FS anymore - */ - memset(&disk_super->magic, 0, sizeof(disk_super->magic)); - set_buffer_dirty(bh); - sync_dirty_buffer(bh); - - /* clear the mirror copies of super block on the disk - * being removed, 0th copy is been taken care above and - * the below would take of the rest - */ - for (i = 1; i < BTRFS_SUPER_MIRROR_MAX; i++) { - bytenr = btrfs_sb_offset(i); - if (bytenr + BTRFS_SUPER_INFO_SIZE >= - i_size_read(bdev->bd_inode)) - break; - - brelse(bh); - bh = __bread(bdev, bytenr / 4096, - BTRFS_SUPER_INFO_SIZE); - if (!bh) - continue; - - disk_super = (struct btrfs_super_block *)bh->b_data; - - if (btrfs_super_bytenr(disk_super) != bytenr || - btrfs_super_magic(disk_super) != BTRFS_MAGIC) { - continue; - } - memset(&disk_super->magic, 0, - sizeof(disk_super->magic)); - set_buffer_dirty(bh); - sync_dirty_buffer(bh); + if (clear_super) { + struct block_device *bdev; + + bdev = blkdev_get_by_path(dev_name, FMODE_READ | FMODE_EXCL, + root->fs_info->bdev_holder); + if (!IS_ERR(bdev)) { + btrfs_scratch_superblocks(bdev, dev_name); + blkdev_put(bdev, FMODE_READ | FMODE_EXCL); } } - ret = 0; - - if (bdev) { - /* Notify udev that device has changed */ - btrfs_kobject_uevent(bdev, KOBJ_CHANGE); - - /* Update ctime/mtime for device path for libblkid */ - update_dev_time(device_path); - } - -error_brelse: - brelse(bh); - if (bdev) - blkdev_put(bdev, FMODE_READ | FMODE_EXCL); out: + kfree(dev_name); + mutex_unlock(&uuid_mutex); return ret; + error_undo: if (device->writeable) { lock_chunks(root); @@ -1948,7 +1946,7 @@ error_undo: device->fs_devices->rw_devices++; unlock_chunks(root); } - goto error_brelse; + goto out; } void btrfs_rm_dev_replace_remove_srcdev(struct btrfs_fs_info *fs_info, @@ -1972,11 +1970,8 @@ void btrfs_rm_dev_replace_remove_srcdev(struct btrfs_fs_info *fs_info, if (srcdev->missing) fs_devices->missing_devices--; - if (srcdev->writeable) { + if (srcdev->writeable) fs_devices->rw_devices--; - /* zero out the old super if it is writable */ - btrfs_scratch_superblocks(srcdev->bdev, srcdev->name->str); - } if (srcdev->bdev) fs_devices->open_devices--; @@ -1987,6 +1982,10 @@ void btrfs_rm_dev_replace_free_srcdev(struct btrfs_fs_info *fs_info, { struct btrfs_fs_devices *fs_devices = srcdev->fs_devices; + if (srcdev->writeable) { + /* zero out the old super if it is writable */ + btrfs_scratch_superblocks(srcdev->bdev, srcdev->name->str); + } call_rcu(&srcdev->rcu, free_device); /* @@ -2016,32 +2015,33 @@ void btrfs_rm_dev_replace_free_srcdev(struct btrfs_fs_info *fs_info, void btrfs_destroy_dev_replace_tgtdev(struct btrfs_fs_info *fs_info, struct btrfs_device *tgtdev) { - struct btrfs_device *next_device; - mutex_lock(&uuid_mutex); WARN_ON(!tgtdev); mutex_lock(&fs_info->fs_devices->device_list_mutex); btrfs_sysfs_rm_device_link(fs_info->fs_devices, tgtdev); - if (tgtdev->bdev) { - btrfs_scratch_superblocks(tgtdev->bdev, tgtdev->name->str); + if (tgtdev->bdev) fs_info->fs_devices->open_devices--; - } + fs_info->fs_devices->num_devices--; - next_device = list_entry(fs_info->fs_devices->devices.next, - struct btrfs_device, dev_list); - if (tgtdev->bdev == fs_info->sb->s_bdev) - fs_info->sb->s_bdev = next_device->bdev; - if (tgtdev->bdev == fs_info->fs_devices->latest_bdev) - fs_info->fs_devices->latest_bdev = next_device->bdev; - list_del_rcu(&tgtdev->dev_list); + btrfs_assign_next_active_device(fs_info, tgtdev, NULL); - call_rcu(&tgtdev->rcu, free_device); + list_del_rcu(&tgtdev->dev_list); mutex_unlock(&fs_info->fs_devices->device_list_mutex); mutex_unlock(&uuid_mutex); + + /* + * The update_dev_time() with in btrfs_scratch_superblocks() + * may lead to a call to btrfs_show_devname() which will try + * to hold device_list_mutex. And here this device + * is already out of device list, so we don't have to hold + * the device_list_mutex lock. + */ + btrfs_scratch_superblocks(tgtdev->bdev, tgtdev->name->str); + call_rcu(&tgtdev->rcu, free_device); } static int btrfs_find_device_by_path(struct btrfs_root *root, char *device_path, @@ -2102,6 +2102,31 @@ int btrfs_find_device_missing_or_by_path(struct btrfs_root *root, } /* + * Lookup a device given by device id, or the path if the id is 0. + */ +int btrfs_find_device_by_devspec(struct btrfs_root *root, u64 devid, + char *devpath, + struct btrfs_device **device) +{ + int ret; + + if (devid) { + ret = 0; + *device = btrfs_find_device(root->fs_info, devid, NULL, + NULL); + if (!*device) + ret = -ENOENT; + } else { + if (!devpath || !devpath[0]) + return -EINVAL; + + ret = btrfs_find_device_missing_or_by_path(root, devpath, + device); + } + return ret; +} + +/* * does all the dirty work required for changing file system's UUID. */ static int btrfs_prepare_sprout(struct btrfs_root *root) @@ -2418,7 +2443,7 @@ int btrfs_init_new_device(struct btrfs_root *root, char *device_path) ret = btrfs_relocate_sys_chunks(root); if (ret < 0) - btrfs_std_error(root->fs_info, ret, + btrfs_handle_fs_error(root->fs_info, ret, "Failed to relocate sys chunks after " "device initialization. This can be fixed " "using the \"btrfs balance\" command."); @@ -2663,7 +2688,7 @@ static int btrfs_free_chunk(struct btrfs_trans_handle *trans, if (ret < 0) goto out; else if (ret > 0) { /* Logic error or corruption */ - btrfs_std_error(root->fs_info, -ENOENT, + btrfs_handle_fs_error(root->fs_info, -ENOENT, "Failed lookup while freeing chunk."); ret = -ENOENT; goto out; @@ -2671,7 +2696,7 @@ static int btrfs_free_chunk(struct btrfs_trans_handle *trans, ret = btrfs_del_item(trans, root, path); if (ret < 0) - btrfs_std_error(root->fs_info, ret, + btrfs_handle_fs_error(root->fs_info, ret, "Failed to delete chunk item."); out: btrfs_free_path(path); @@ -2857,7 +2882,7 @@ static int btrfs_relocate_chunk(struct btrfs_root *root, u64 chunk_offset) chunk_offset); if (IS_ERR(trans)) { ret = PTR_ERR(trans); - btrfs_std_error(root->fs_info, ret, NULL); + btrfs_handle_fs_error(root->fs_info, ret, NULL); return ret; } @@ -3402,6 +3427,7 @@ static int __btrfs_balance(struct btrfs_fs_info *fs_info) u32 count_meta = 0; u32 count_sys = 0; int chunk_reserved = 0; + u64 bytes_used = 0; /* step one make some room on all the devices */ devices = &fs_info->fs_devices->devices; @@ -3540,7 +3566,13 @@ again: goto loop; } - if ((chunk_type & BTRFS_BLOCK_GROUP_DATA) && !chunk_reserved) { + ASSERT(fs_info->data_sinfo); + spin_lock(&fs_info->data_sinfo->lock); + bytes_used = fs_info->data_sinfo->bytes_used; + spin_unlock(&fs_info->data_sinfo->lock); + + if ((chunk_type & BTRFS_BLOCK_GROUP_DATA) && + !chunk_reserved && !bytes_used) { trans = btrfs_start_transaction(chunk_root, 0); if (IS_ERR(trans)) { mutex_unlock(&fs_info->delete_unused_bgs_mutex); @@ -3632,7 +3664,7 @@ static void __cancel_balance(struct btrfs_fs_info *fs_info) unset_balance_control(fs_info); ret = del_balance_item(fs_info->tree_root); if (ret) - btrfs_std_error(fs_info, ret, NULL); + btrfs_handle_fs_error(fs_info, ret, NULL); atomic_set(&fs_info->mutually_exclusive_operation_running, 0); } @@ -3693,10 +3725,8 @@ int btrfs_balance(struct btrfs_balance_control *bctl, num_devices--; } btrfs_dev_replace_unlock(&fs_info->dev_replace, 0); - allowed = BTRFS_AVAIL_ALLOC_BIT_SINGLE; - if (num_devices == 1) - allowed |= BTRFS_BLOCK_GROUP_DUP; - else if (num_devices > 1) + allowed = BTRFS_AVAIL_ALLOC_BIT_SINGLE | BTRFS_BLOCK_GROUP_DUP; + if (num_devices > 1) allowed |= (BTRFS_BLOCK_GROUP_RAID0 | BTRFS_BLOCK_GROUP_RAID1); if (num_devices > 2) allowed |= BTRFS_BLOCK_GROUP_RAID5; @@ -5278,7 +5308,15 @@ static int __btrfs_map_block(struct btrfs_fs_info *fs_info, int rw, stripe_nr = div64_u64(stripe_nr, stripe_len); stripe_offset = stripe_nr * stripe_len; - BUG_ON(offset < stripe_offset); + if (offset < stripe_offset) { + btrfs_crit(fs_info, "stripe math has gone wrong, " + "stripe_offset=%llu, offset=%llu, start=%llu, " + "logical=%llu, stripe_len=%llu", + stripe_offset, offset, em->start, logical, + stripe_len); + free_extent_map(em); + return -EINVAL; + } /* stripe_offset is the offset of this block in its stripe*/ stripe_offset = offset - stripe_offset; @@ -5519,7 +5557,13 @@ static int __btrfs_map_block(struct btrfs_fs_info *fs_info, int rw, &stripe_index); mirror_num = stripe_index + 1; } - BUG_ON(stripe_index >= map->num_stripes); + if (stripe_index >= map->num_stripes) { + btrfs_crit(fs_info, "stripe index math went horribly wrong, " + "got stripe_index=%u, num_stripes=%u", + stripe_index, map->num_stripes); + ret = -EINVAL; + goto out; + } num_alloc_stripes = num_stripes; if (dev_replace_is_ongoing) { @@ -6242,7 +6286,7 @@ static int read_one_chunk(struct btrfs_root *root, struct btrfs_key *key, "invalid chunk length %llu", length); return -EIO; } - if (!is_power_of_2(stripe_len)) { + if (!is_power_of_2(stripe_len) || stripe_len != BTRFS_STRIPE_LEN) { btrfs_err(root->fs_info, "invalid chunk stripe length: %llu", stripe_len); return -EIO; diff --git a/fs/btrfs/volumes.h b/fs/btrfs/volumes.h index 1939ebde63df..0ac90f8d85bd 100644 --- a/fs/btrfs/volumes.h +++ b/fs/btrfs/volumes.h @@ -340,14 +340,14 @@ struct btrfs_raid_attr { }; extern const struct btrfs_raid_attr btrfs_raid_array[BTRFS_NR_RAID_TYPES]; - +extern const int btrfs_raid_mindev_error[BTRFS_NR_RAID_TYPES]; extern const u64 btrfs_raid_group[BTRFS_NR_RAID_TYPES]; struct map_lookup { u64 type; int io_align; int io_width; - int stripe_len; + u64 stripe_len; int sector_size; int num_stripes; int sub_stripes; @@ -357,52 +357,6 @@ struct map_lookup { #define map_lookup_size(n) (sizeof(struct map_lookup) + \ (sizeof(struct btrfs_bio_stripe) * (n))) -/* - * Restriper's general type filter - */ -#define BTRFS_BALANCE_DATA (1ULL << 0) -#define BTRFS_BALANCE_SYSTEM (1ULL << 1) -#define BTRFS_BALANCE_METADATA (1ULL << 2) - -#define BTRFS_BALANCE_TYPE_MASK (BTRFS_BALANCE_DATA | \ - BTRFS_BALANCE_SYSTEM | \ - BTRFS_BALANCE_METADATA) - -#define BTRFS_BALANCE_FORCE (1ULL << 3) -#define BTRFS_BALANCE_RESUME (1ULL << 4) - -/* - * Balance filters - */ -#define BTRFS_BALANCE_ARGS_PROFILES (1ULL << 0) -#define BTRFS_BALANCE_ARGS_USAGE (1ULL << 1) -#define BTRFS_BALANCE_ARGS_DEVID (1ULL << 2) -#define BTRFS_BALANCE_ARGS_DRANGE (1ULL << 3) -#define BTRFS_BALANCE_ARGS_VRANGE (1ULL << 4) -#define BTRFS_BALANCE_ARGS_LIMIT (1ULL << 5) -#define BTRFS_BALANCE_ARGS_LIMIT_RANGE (1ULL << 6) -#define BTRFS_BALANCE_ARGS_STRIPES_RANGE (1ULL << 7) -#define BTRFS_BALANCE_ARGS_USAGE_RANGE (1ULL << 10) - -#define BTRFS_BALANCE_ARGS_MASK \ - (BTRFS_BALANCE_ARGS_PROFILES | \ - BTRFS_BALANCE_ARGS_USAGE | \ - BTRFS_BALANCE_ARGS_DEVID | \ - BTRFS_BALANCE_ARGS_DRANGE | \ - BTRFS_BALANCE_ARGS_VRANGE | \ - BTRFS_BALANCE_ARGS_LIMIT | \ - BTRFS_BALANCE_ARGS_LIMIT_RANGE | \ - BTRFS_BALANCE_ARGS_STRIPES_RANGE | \ - BTRFS_BALANCE_ARGS_USAGE_RANGE) - -/* - * Profile changing flags. When SOFT is set we won't relocate chunk if - * it already has the target profile (even though it may be - * half-filled). - */ -#define BTRFS_BALANCE_ARGS_CONVERT (1ULL << 8) -#define BTRFS_BALANCE_ARGS_SOFT (1ULL << 9) - struct btrfs_balance_args; struct btrfs_balance_progress; struct btrfs_balance_control { @@ -445,13 +399,18 @@ int btrfs_scan_one_device(const char *path, fmode_t flags, void *holder, struct btrfs_fs_devices **fs_devices_ret); int btrfs_close_devices(struct btrfs_fs_devices *fs_devices); void btrfs_close_extra_devices(struct btrfs_fs_devices *fs_devices, int step); +void btrfs_assign_next_active_device(struct btrfs_fs_info *fs_info, + struct btrfs_device *device, struct btrfs_device *this_dev); int btrfs_find_device_missing_or_by_path(struct btrfs_root *root, char *device_path, struct btrfs_device **device); +int btrfs_find_device_by_devspec(struct btrfs_root *root, u64 devid, + char *devpath, + struct btrfs_device **device); struct btrfs_device *btrfs_alloc_device(struct btrfs_fs_info *fs_info, const u64 *devid, const u8 *uuid); -int btrfs_rm_device(struct btrfs_root *root, char *device_path); +int btrfs_rm_device(struct btrfs_root *root, char *device_path, u64 devid); void btrfs_cleanup_fs_uuids(void); int btrfs_num_copies(struct btrfs_fs_info *fs_info, u64 logical, u64 len); int btrfs_grow_device(struct btrfs_trans_handle *trans, diff --git a/fs/btrfs/xattr.c b/fs/btrfs/xattr.c index 145d2b89e62d..3bfb252206c7 100644 --- a/fs/btrfs/xattr.c +++ b/fs/btrfs/xattr.c @@ -237,6 +237,9 @@ int __btrfs_setxattr(struct btrfs_trans_handle *trans, struct btrfs_root *root = BTRFS_I(inode)->root; int ret; + if (btrfs_root_readonly(root)) + return -EROFS; + if (trans) return do_setxattr(trans, inode, name, value, size, flags); @@ -369,11 +372,9 @@ err: } static int btrfs_xattr_handler_get(const struct xattr_handler *handler, - struct dentry *dentry, const char *name, - void *buffer, size_t size) + struct dentry *unused, struct inode *inode, + const char *name, void *buffer, size_t size) { - struct inode *inode = d_inode(dentry); - name = xattr_full_name(handler, name); return __btrfs_getxattr(inode, name, buffer, size); } @@ -434,25 +435,6 @@ const struct xattr_handler *btrfs_xattr_handlers[] = { NULL, }; -int btrfs_setxattr(struct dentry *dentry, const char *name, const void *value, - size_t size, int flags) -{ - struct btrfs_root *root = BTRFS_I(d_inode(dentry))->root; - - if (btrfs_root_readonly(root)) - return -EROFS; - return generic_setxattr(dentry, name, value, size, flags); -} - -int btrfs_removexattr(struct dentry *dentry, const char *name) -{ - struct btrfs_root *root = BTRFS_I(d_inode(dentry))->root; - - if (btrfs_root_readonly(root)) - return -EROFS; - return generic_removexattr(dentry, name); -} - static int btrfs_initxattrs(struct inode *inode, const struct xattr *xattr_array, void *fs_info) { diff --git a/fs/btrfs/xattr.h b/fs/btrfs/xattr.h index 96807b3d22f5..15fc4743dc70 100644 --- a/fs/btrfs/xattr.h +++ b/fs/btrfs/xattr.h @@ -28,9 +28,6 @@ extern ssize_t __btrfs_getxattr(struct inode *inode, const char *name, extern int __btrfs_setxattr(struct btrfs_trans_handle *trans, struct inode *inode, const char *name, const void *value, size_t size, int flags); -extern int btrfs_setxattr(struct dentry *dentry, const char *name, - const void *value, size_t size, int flags); -extern int btrfs_removexattr(struct dentry *dentry, const char *name); extern int btrfs_xattr_security_init(struct btrfs_trans_handle *trans, struct inode *inode, struct inode *dir, diff --git a/fs/buffer.c b/fs/buffer.c index af0d9a82a8ed..754813a6962b 100644 --- a/fs/buffer.c +++ b/fs/buffer.c @@ -255,17 +255,17 @@ out: */ static void free_more_memory(void) { - struct zone *zone; + struct zoneref *z; int nid; wakeup_flusher_threads(1024, WB_REASON_FREE_MORE_MEM); yield(); for_each_online_node(nid) { - (void)first_zones_zonelist(node_zonelist(nid, GFP_NOFS), - gfp_zone(GFP_NOFS), NULL, - &zone); - if (zone) + + z = first_zones_zonelist(node_zonelist(nid, GFP_NOFS), + gfp_zone(GFP_NOFS), NULL); + if (z->zone) try_to_free_pages(node_zonelist(nid, GFP_NOFS), 0, GFP_NOFS, NULL); } diff --git a/fs/ceph/acl.c b/fs/ceph/acl.c index f19708487e2f..4f67227f69a5 100644 --- a/fs/ceph/acl.c +++ b/fs/ceph/acl.c @@ -37,6 +37,8 @@ static inline void ceph_set_cached_acl(struct inode *inode, spin_lock(&ci->i_ceph_lock); if (__ceph_caps_issued_mask(ci, CEPH_CAP_XATTR_SHARED, 0)) set_cached_acl(inode, type, acl); + else + forget_cached_acl(inode, type); spin_unlock(&ci->i_ceph_lock); } @@ -88,7 +90,6 @@ int ceph_set_acl(struct inode *inode, struct posix_acl *acl, int type) char *value = NULL; struct iattr newattrs; umode_t new_mode = inode->i_mode, old_mode = inode->i_mode; - struct dentry *dentry; switch (type) { case ACL_TYPE_ACCESS: @@ -126,29 +127,26 @@ int ceph_set_acl(struct inode *inode, struct posix_acl *acl, int type) goto out_free; } - dentry = d_find_alias(inode); if (new_mode != old_mode) { newattrs.ia_mode = new_mode; newattrs.ia_valid = ATTR_MODE; - ret = ceph_setattr(dentry, &newattrs); + ret = __ceph_setattr(inode, &newattrs); if (ret) - goto out_dput; + goto out_free; } - ret = __ceph_setxattr(dentry, name, value, size, 0); + ret = __ceph_setxattr(inode, name, value, size, 0); if (ret) { if (new_mode != old_mode) { newattrs.ia_mode = old_mode; newattrs.ia_valid = ATTR_MODE; - ceph_setattr(dentry, &newattrs); + __ceph_setattr(inode, &newattrs); } - goto out_dput; + goto out_free; } ceph_set_cached_acl(inode, type, acl); -out_dput: - dput(dentry); out_free: kfree(value); out: diff --git a/fs/ceph/addr.c b/fs/ceph/addr.c index 4801571f51cb..43098cd9602b 100644 --- a/fs/ceph/addr.c +++ b/fs/ceph/addr.c @@ -1292,8 +1292,7 @@ static int ceph_write_end(struct file *file, struct address_space *mapping, * intercept O_DIRECT reads and writes early, this function should * never get called. */ -static ssize_t ceph_direct_io(struct kiocb *iocb, struct iov_iter *iter, - loff_t pos) +static ssize_t ceph_direct_io(struct kiocb *iocb, struct iov_iter *iter) { WARN_ON(1); return -EINVAL; diff --git a/fs/ceph/dir.c b/fs/ceph/dir.c index 4fb2bbc2a272..3ab1192d2029 100644 --- a/fs/ceph/dir.c +++ b/fs/ceph/dir.c @@ -5,6 +5,7 @@ #include <linux/namei.h> #include <linux/slab.h> #include <linux/sched.h> +#include <linux/xattr.h> #include "super.h" #include "mds_client.h" @@ -1342,10 +1343,10 @@ const struct inode_operations ceph_dir_iops = { .permission = ceph_permission, .getattr = ceph_getattr, .setattr = ceph_setattr, - .setxattr = ceph_setxattr, - .getxattr = ceph_getxattr, + .setxattr = generic_setxattr, + .getxattr = generic_getxattr, .listxattr = ceph_listxattr, - .removexattr = ceph_removexattr, + .removexattr = generic_removexattr, .get_acl = ceph_get_acl, .set_acl = ceph_set_acl, .mknod = ceph_mknod, diff --git a/fs/ceph/file.c b/fs/ceph/file.c index a79f9269831e..4f1dc7120916 100644 --- a/fs/ceph/file.c +++ b/fs/ceph/file.c @@ -1382,12 +1382,11 @@ retry_snap: ceph_cap_string(got)); ceph_put_cap_refs(ci, got); - if (written >= 0 && - ((file->f_flags & O_SYNC) || IS_SYNC(file->f_mapping->host) || - ceph_osdmap_flag(osdc->osdmap, CEPH_OSDMAP_NEARFULL))) { - err = vfs_fsync_range(file, pos, pos + written - 1, 1); - if (err < 0) - written = err; + if (written >= 0) { + if (ceph_osdmap_flag(osdc->osdmap, CEPH_OSDMAP_NEARFULL)) + iocb->ki_flags |= IOCB_DSYNC; + + written = generic_write_sync(iocb, written); } goto out_unlocked; diff --git a/fs/ceph/inode.c b/fs/ceph/inode.c index edfade037738..e669cfa9d793 100644 --- a/fs/ceph/inode.c +++ b/fs/ceph/inode.c @@ -8,6 +8,7 @@ #include <linux/kernel.h> #include <linux/writeback.h> #include <linux/vmalloc.h> +#include <linux/xattr.h> #include <linux/posix_acl.h> #include <linux/random.h> @@ -92,10 +93,10 @@ const struct inode_operations ceph_file_iops = { .permission = ceph_permission, .setattr = ceph_setattr, .getattr = ceph_getattr, - .setxattr = ceph_setxattr, - .getxattr = ceph_getxattr, + .setxattr = generic_setxattr, + .getxattr = generic_getxattr, .listxattr = ceph_listxattr, - .removexattr = ceph_removexattr, + .removexattr = generic_removexattr, .get_acl = ceph_get_acl, .set_acl = ceph_set_acl, }; @@ -1770,22 +1771,18 @@ static const struct inode_operations ceph_symlink_iops = { .get_link = simple_get_link, .setattr = ceph_setattr, .getattr = ceph_getattr, - .setxattr = ceph_setxattr, - .getxattr = ceph_getxattr, + .setxattr = generic_setxattr, + .getxattr = generic_getxattr, .listxattr = ceph_listxattr, - .removexattr = ceph_removexattr, + .removexattr = generic_removexattr, }; -/* - * setattr - */ -int ceph_setattr(struct dentry *dentry, struct iattr *attr) +int __ceph_setattr(struct inode *inode, struct iattr *attr) { - struct inode *inode = d_inode(dentry); struct ceph_inode_info *ci = ceph_inode(inode); const unsigned int ia_valid = attr->ia_valid; struct ceph_mds_request *req; - struct ceph_mds_client *mdsc = ceph_sb_to_client(dentry->d_sb)->mdsc; + struct ceph_mds_client *mdsc = ceph_sb_to_client(inode->i_sb)->mdsc; struct ceph_cap_flush *prealloc_cf; int issued; int release = 0, dirtied = 0; @@ -2010,6 +2007,14 @@ out_put: } /* + * setattr + */ +int ceph_setattr(struct dentry *dentry, struct iattr *attr) +{ + return __ceph_setattr(d_inode(dentry), attr); +} + +/* * Verify that we have a lease on the given mask. If not, * do a getattr against an mds. */ diff --git a/fs/ceph/mds_client.c b/fs/ceph/mds_client.c index 541ead4d8965..85b8517f17a0 100644 --- a/fs/ceph/mds_client.c +++ b/fs/ceph/mds_client.c @@ -386,9 +386,7 @@ void ceph_put_mds_session(struct ceph_mds_session *s) atomic_read(&s->s_ref), atomic_read(&s->s_ref)-1); if (atomic_dec_and_test(&s->s_ref)) { if (s->s_auth.authorizer) - ceph_auth_destroy_authorizer( - s->s_mdsc->fsc->client->monc.auth, - s->s_auth.authorizer); + ceph_auth_destroy_authorizer(s->s_auth.authorizer); kfree(s); } } @@ -3900,7 +3898,7 @@ static struct ceph_auth_handshake *get_authorizer(struct ceph_connection *con, struct ceph_auth_handshake *auth = &s->s_auth; if (force_new && auth->authorizer) { - ceph_auth_destroy_authorizer(ac, auth->authorizer); + ceph_auth_destroy_authorizer(auth->authorizer); auth->authorizer = NULL; } if (!auth->authorizer) { diff --git a/fs/ceph/super.h b/fs/ceph/super.h index e705c4d612d7..7b99eb756477 100644 --- a/fs/ceph/super.h +++ b/fs/ceph/super.h @@ -785,19 +785,15 @@ static inline int ceph_do_getattr(struct inode *inode, int mask, bool force) return __ceph_do_getattr(inode, NULL, mask, force); } extern int ceph_permission(struct inode *inode, int mask); +extern int __ceph_setattr(struct inode *inode, struct iattr *attr); extern int ceph_setattr(struct dentry *dentry, struct iattr *attr); extern int ceph_getattr(struct vfsmount *mnt, struct dentry *dentry, struct kstat *stat); /* xattr.c */ -extern int ceph_setxattr(struct dentry *, const char *, const void *, - size_t, int); -int __ceph_setxattr(struct dentry *, const char *, const void *, size_t, int); +int __ceph_setxattr(struct inode *, const char *, const void *, size_t, int); ssize_t __ceph_getxattr(struct inode *, const char *, void *, size_t); -int __ceph_removexattr(struct dentry *, const char *); -extern ssize_t ceph_getxattr(struct dentry *, const char *, void *, size_t); extern ssize_t ceph_listxattr(struct dentry *, char *, size_t); -extern int ceph_removexattr(struct dentry *, const char *); extern void __ceph_build_xattrs_blob(struct ceph_inode_info *ci); extern void __ceph_destroy_xattrs(struct ceph_inode_info *ci); extern void __init ceph_xattr_init(void); diff --git a/fs/ceph/xattr.c b/fs/ceph/xattr.c index 9410abdef3ce..0d66722c6a52 100644 --- a/fs/ceph/xattr.c +++ b/fs/ceph/xattr.c @@ -16,6 +16,8 @@ static int __remove_xattr(struct ceph_inode_info *ci, struct ceph_inode_xattr *xattr); +const struct xattr_handler ceph_other_xattr_handler; + /* * List of handlers for synthetic system.* attributes. Other * attributes are handled directly. @@ -25,6 +27,7 @@ const struct xattr_handler *ceph_xattr_handlers[] = { &posix_acl_access_xattr_handler, &posix_acl_default_xattr_handler, #endif + &ceph_other_xattr_handler, NULL, }; @@ -33,7 +36,6 @@ static bool ceph_is_valid_xattr(const char *name) return !strncmp(name, XATTR_CEPH_PREFIX, XATTR_CEPH_PREFIX_LEN) || !strncmp(name, XATTR_SECURITY_PREFIX, XATTR_SECURITY_PREFIX_LEN) || - !strncmp(name, XATTR_SYSTEM_PREFIX, XATTR_SYSTEM_PREFIX_LEN) || !strncmp(name, XATTR_TRUSTED_PREFIX, XATTR_TRUSTED_PREFIX_LEN) || !strncmp(name, XATTR_USER_PREFIX, XATTR_USER_PREFIX_LEN); } @@ -496,19 +498,6 @@ static int __remove_xattr(struct ceph_inode_info *ci, return 0; } -static int __remove_xattr_by_name(struct ceph_inode_info *ci, - const char *name) -{ - struct rb_node **p; - struct ceph_inode_xattr *xattr; - int err; - - p = &ci->i_xattrs.index.rb_node; - xattr = __get_xattr(ci, name); - err = __remove_xattr(ci, xattr); - return err; -} - static char *__copy_xattr_names(struct ceph_inode_info *ci, char *dest) { @@ -740,9 +729,6 @@ ssize_t __ceph_getxattr(struct inode *inode, const char *name, void *value, int req_mask; int err; - if (!ceph_is_valid_xattr(name)) - return -ENODATA; - /* let's see if a virtual xattr was requested */ vxattr = ceph_match_vxattr(inode, name); if (vxattr) { @@ -804,15 +790,6 @@ out: return err; } -ssize_t ceph_getxattr(struct dentry *dentry, const char *name, void *value, - size_t size) -{ - if (!strncmp(name, XATTR_SYSTEM_PREFIX, XATTR_SYSTEM_PREFIX_LEN)) - return generic_getxattr(dentry, name, value, size); - - return __ceph_getxattr(d_inode(dentry), name, value, size); -} - ssize_t ceph_listxattr(struct dentry *dentry, char *names, size_t size) { struct inode *inode = d_inode(dentry); @@ -877,11 +854,10 @@ out: return err; } -static int ceph_sync_setxattr(struct dentry *dentry, const char *name, +static int ceph_sync_setxattr(struct inode *inode, const char *name, const char *value, size_t size, int flags) { - struct ceph_fs_client *fsc = ceph_sb_to_client(dentry->d_sb); - struct inode *inode = d_inode(dentry); + struct ceph_fs_client *fsc = ceph_sb_to_client(inode->i_sb); struct ceph_inode_info *ci = ceph_inode(inode); struct ceph_mds_request *req; struct ceph_mds_client *mdsc = fsc->mdsc; @@ -939,13 +915,12 @@ out: return err; } -int __ceph_setxattr(struct dentry *dentry, const char *name, +int __ceph_setxattr(struct inode *inode, const char *name, const void *value, size_t size, int flags) { - struct inode *inode = d_inode(dentry); struct ceph_vxattr *vxattr; struct ceph_inode_info *ci = ceph_inode(inode); - struct ceph_mds_client *mdsc = ceph_sb_to_client(dentry->d_sb)->mdsc; + struct ceph_mds_client *mdsc = ceph_sb_to_client(inode->i_sb)->mdsc; struct ceph_cap_flush *prealloc_cf = NULL; int issued; int err; @@ -958,8 +933,8 @@ int __ceph_setxattr(struct dentry *dentry, const char *name, int required_blob_size; bool lock_snap_rwsem = false; - if (!ceph_is_valid_xattr(name)) - return -EOPNOTSUPP; + if (ceph_snap(inode) != CEPH_NOSNAP) + return -EROFS; vxattr = ceph_match_vxattr(inode, name); if (vxattr && vxattr->readonly) @@ -1056,7 +1031,7 @@ do_sync_unlocked: "during filling trace\n", inode); err = -EBUSY; } else { - err = ceph_sync_setxattr(dentry, name, value, size, flags); + err = ceph_sync_setxattr(inode, name, value, size, flags); } out: ceph_free_cap_flush(prealloc_cf); @@ -1066,146 +1041,29 @@ out: return err; } -int ceph_setxattr(struct dentry *dentry, const char *name, - const void *value, size_t size, int flags) -{ - if (ceph_snap(d_inode(dentry)) != CEPH_NOSNAP) - return -EROFS; - - if (!strncmp(name, XATTR_SYSTEM_PREFIX, XATTR_SYSTEM_PREFIX_LEN)) - return generic_setxattr(dentry, name, value, size, flags); - - if (size == 0) - value = ""; /* empty EA, do not remove */ - - return __ceph_setxattr(dentry, name, value, size, flags); -} - -static int ceph_send_removexattr(struct dentry *dentry, const char *name) +static int ceph_get_xattr_handler(const struct xattr_handler *handler, + struct dentry *dentry, struct inode *inode, + const char *name, void *value, size_t size) { - struct ceph_fs_client *fsc = ceph_sb_to_client(dentry->d_sb); - struct ceph_mds_client *mdsc = fsc->mdsc; - struct inode *inode = d_inode(dentry); - struct ceph_mds_request *req; - int err; - - req = ceph_mdsc_create_request(mdsc, CEPH_MDS_OP_RMXATTR, - USE_AUTH_MDS); - if (IS_ERR(req)) - return PTR_ERR(req); - req->r_path2 = kstrdup(name, GFP_NOFS); - if (!req->r_path2) - return -ENOMEM; - - req->r_inode = inode; - ihold(inode); - req->r_num_caps = 1; - req->r_inode_drop = CEPH_CAP_XATTR_SHARED; - err = ceph_mdsc_do_request(mdsc, NULL, req); - ceph_mdsc_put_request(req); - return err; + if (!ceph_is_valid_xattr(name)) + return -EOPNOTSUPP; + return __ceph_getxattr(inode, name, value, size); } -int __ceph_removexattr(struct dentry *dentry, const char *name) +static int ceph_set_xattr_handler(const struct xattr_handler *handler, + struct dentry *dentry, const char *name, + const void *value, size_t size, int flags) { - struct inode *inode = d_inode(dentry); - struct ceph_vxattr *vxattr; - struct ceph_inode_info *ci = ceph_inode(inode); - struct ceph_mds_client *mdsc = ceph_sb_to_client(dentry->d_sb)->mdsc; - struct ceph_cap_flush *prealloc_cf = NULL; - int issued; - int err; - int required_blob_size; - int dirty; - bool lock_snap_rwsem = false; - if (!ceph_is_valid_xattr(name)) return -EOPNOTSUPP; - - vxattr = ceph_match_vxattr(inode, name); - if (vxattr && vxattr->readonly) - return -EOPNOTSUPP; - - /* pass any unhandled ceph.* xattrs through to the MDS */ - if (!strncmp(name, XATTR_CEPH_PREFIX, XATTR_CEPH_PREFIX_LEN)) - goto do_sync_unlocked; - - prealloc_cf = ceph_alloc_cap_flush(); - if (!prealloc_cf) - return -ENOMEM; - - err = -ENOMEM; - spin_lock(&ci->i_ceph_lock); -retry: - issued = __ceph_caps_issued(ci, NULL); - if (ci->i_xattrs.version == 0 || !(issued & CEPH_CAP_XATTR_EXCL)) - goto do_sync; - - if (!lock_snap_rwsem && !ci->i_head_snapc) { - lock_snap_rwsem = true; - if (!down_read_trylock(&mdsc->snap_rwsem)) { - spin_unlock(&ci->i_ceph_lock); - down_read(&mdsc->snap_rwsem); - spin_lock(&ci->i_ceph_lock); - goto retry; - } - } - - dout("removexattr %p issued %s\n", inode, ceph_cap_string(issued)); - - __build_xattrs(inode); - - required_blob_size = __get_required_blob_size(ci, 0, 0); - - if (!ci->i_xattrs.prealloc_blob || - required_blob_size > ci->i_xattrs.prealloc_blob->alloc_len) { - struct ceph_buffer *blob; - - spin_unlock(&ci->i_ceph_lock); - dout(" preaallocating new blob size=%d\n", required_blob_size); - blob = ceph_buffer_new(required_blob_size, GFP_NOFS); - if (!blob) - goto do_sync_unlocked; - spin_lock(&ci->i_ceph_lock); - if (ci->i_xattrs.prealloc_blob) - ceph_buffer_put(ci->i_xattrs.prealloc_blob); - ci->i_xattrs.prealloc_blob = blob; - goto retry; - } - - err = __remove_xattr_by_name(ceph_inode(inode), name); - - dirty = __ceph_mark_dirty_caps(ci, CEPH_CAP_XATTR_EXCL, - &prealloc_cf); - ci->i_xattrs.dirty = true; - inode->i_ctime = current_fs_time(inode->i_sb); - spin_unlock(&ci->i_ceph_lock); - if (lock_snap_rwsem) - up_read(&mdsc->snap_rwsem); - if (dirty) - __mark_inode_dirty(inode, dirty); - ceph_free_cap_flush(prealloc_cf); - return err; -do_sync: - spin_unlock(&ci->i_ceph_lock); -do_sync_unlocked: - if (lock_snap_rwsem) - up_read(&mdsc->snap_rwsem); - ceph_free_cap_flush(prealloc_cf); - err = ceph_send_removexattr(dentry, name); - return err; + return __ceph_setxattr(d_inode(dentry), name, value, size, flags); } -int ceph_removexattr(struct dentry *dentry, const char *name) -{ - if (ceph_snap(d_inode(dentry)) != CEPH_NOSNAP) - return -EROFS; - - if (!strncmp(name, XATTR_SYSTEM_PREFIX, XATTR_SYSTEM_PREFIX_LEN)) - return generic_removexattr(dentry, name); - - return __ceph_removexattr(dentry, name); -} +const struct xattr_handler ceph_other_xattr_handler = { + .prefix = "", /* match any name => handlers called with full name */ + .get = ceph_get_xattr_handler, + .set = ceph_set_xattr_handler, +}; #ifdef CONFIG_SECURITY bool ceph_security_xattr_wanted(struct inode *in) diff --git a/fs/char_dev.c b/fs/char_dev.c index 24b142569ca9..687471dc04a0 100644 --- a/fs/char_dev.c +++ b/fs/char_dev.c @@ -91,6 +91,10 @@ __register_chrdev_region(unsigned int major, unsigned int baseminor, break; } + if (i < CHRDEV_MAJOR_DYN_END) + pr_warn("CHRDEV \"%s\" major number %d goes below the dynamic allocation range", + name, i); + if (i == 0) { ret = -EBUSY; goto out; diff --git a/fs/cifs/Makefile b/fs/cifs/Makefile index 1964d212ab08..eed7eb09f46f 100644 --- a/fs/cifs/Makefile +++ b/fs/cifs/Makefile @@ -5,9 +5,10 @@ obj-$(CONFIG_CIFS) += cifs.o cifs-y := cifsfs.o cifssmb.o cifs_debug.o connect.o dir.o file.o inode.o \ link.o misc.o netmisc.o smbencrypt.o transport.o asn1.o \ - cifs_unicode.o nterr.o xattr.o cifsencrypt.o \ + cifs_unicode.o nterr.o cifsencrypt.o \ readdir.o ioctl.o sess.o export.o smb1ops.o winucase.o +cifs-$(CONFIG_CIFS_XATTR) += xattr.o cifs-$(CONFIG_CIFS_ACL) += cifsacl.o cifs-$(CONFIG_CIFS_UPCALL) += cifs_spnego.o diff --git a/fs/cifs/cifs_dfs_ref.c b/fs/cifs/cifs_dfs_ref.c index e956cba94338..ec9dbbcca3b9 100644 --- a/fs/cifs/cifs_dfs_ref.c +++ b/fs/cifs/cifs_dfs_ref.c @@ -151,8 +151,12 @@ char *cifs_compose_mount_options(const char *sb_mountdata, if (sb_mountdata == NULL) return ERR_PTR(-EINVAL); - if (strlen(fullpath) - ref->path_consumed) + if (strlen(fullpath) - ref->path_consumed) { prepath = fullpath + ref->path_consumed; + /* skip initial delimiter */ + if (*prepath == '/' || *prepath == '\\') + prepath++; + } *devname = cifs_build_devname(ref->node_name, prepath); if (IS_ERR(*devname)) { @@ -302,7 +306,7 @@ static struct vfsmount *cifs_dfs_do_automount(struct dentry *mntpt) if (full_path == NULL) goto cdda_exit; - cifs_sb = CIFS_SB(d_inode(mntpt)->i_sb); + cifs_sb = CIFS_SB(mntpt->d_sb); tlink = cifs_sb_tlink(cifs_sb); if (IS_ERR(tlink)) { mnt = ERR_CAST(tlink); diff --git a/fs/cifs/cifs_spnego.c b/fs/cifs/cifs_spnego.c index 6908080e9b6d..b611fc2e8984 100644 --- a/fs/cifs/cifs_spnego.c +++ b/fs/cifs/cifs_spnego.c @@ -24,10 +24,13 @@ #include <linux/string.h> #include <keys/user-type.h> #include <linux/key-type.h> +#include <linux/keyctl.h> #include <linux/inet.h> #include "cifsglob.h" #include "cifs_spnego.h" #include "cifs_debug.h" +#include "cifsproto.h" +static const struct cred *spnego_cred; /* create a new cifs key */ static int @@ -102,6 +105,7 @@ cifs_get_spnego_key(struct cifs_ses *sesInfo) size_t desc_len; struct key *spnego_key; const char *hostname = server->hostname; + const struct cred *saved_cred; /* length of fields (with semicolons): ver=0xyz ip4=ipaddress host=hostname sec=mechanism uid=0xFF user=username */ @@ -163,7 +167,9 @@ cifs_get_spnego_key(struct cifs_ses *sesInfo) sprintf(dp, ";pid=0x%x", current->pid); cifs_dbg(FYI, "key description = %s\n", description); + saved_cred = override_creds(spnego_cred); spnego_key = request_key(&cifs_spnego_key_type, description, ""); + revert_creds(saved_cred); #ifdef CONFIG_CIFS_DEBUG2 if (cifsFYI && !IS_ERR(spnego_key)) { @@ -177,3 +183,64 @@ out: kfree(description); return spnego_key; } + +int +init_cifs_spnego(void) +{ + struct cred *cred; + struct key *keyring; + int ret; + + cifs_dbg(FYI, "Registering the %s key type\n", + cifs_spnego_key_type.name); + + /* + * Create an override credential set with special thread keyring for + * spnego upcalls. + */ + + cred = prepare_kernel_cred(NULL); + if (!cred) + return -ENOMEM; + + keyring = keyring_alloc(".cifs_spnego", + GLOBAL_ROOT_UID, GLOBAL_ROOT_GID, cred, + (KEY_POS_ALL & ~KEY_POS_SETATTR) | + KEY_USR_VIEW | KEY_USR_READ, + KEY_ALLOC_NOT_IN_QUOTA, NULL, NULL); + if (IS_ERR(keyring)) { + ret = PTR_ERR(keyring); + goto failed_put_cred; + } + + ret = register_key_type(&cifs_spnego_key_type); + if (ret < 0) + goto failed_put_key; + + /* + * instruct request_key() to use this special keyring as a cache for + * the results it looks up + */ + set_bit(KEY_FLAG_ROOT_CAN_CLEAR, &keyring->flags); + cred->thread_keyring = keyring; + cred->jit_keyring = KEY_REQKEY_DEFL_THREAD_KEYRING; + spnego_cred = cred; + + cifs_dbg(FYI, "cifs spnego keyring: %d\n", key_serial(keyring)); + return 0; + +failed_put_key: + key_put(keyring); +failed_put_cred: + put_cred(cred); + return ret; +} + +void +exit_cifs_spnego(void) +{ + key_revoke(spnego_cred->thread_keyring); + unregister_key_type(&cifs_spnego_key_type); + put_cred(spnego_cred); + cifs_dbg(FYI, "Unregistered %s key type\n", cifs_spnego_key_type.name); +} diff --git a/fs/cifs/cifsacl.c b/fs/cifs/cifsacl.c index 3f93125916bf..71e8a56e9479 100644 --- a/fs/cifs/cifsacl.c +++ b/fs/cifs/cifsacl.c @@ -360,7 +360,7 @@ init_cifs_idmap(void) GLOBAL_ROOT_UID, GLOBAL_ROOT_GID, cred, (KEY_POS_ALL & ~KEY_POS_SETATTR) | KEY_USR_VIEW | KEY_USR_READ, - KEY_ALLOC_NOT_IN_QUOTA, NULL); + KEY_ALLOC_NOT_IN_QUOTA, NULL, NULL); if (IS_ERR(keyring)) { ret = PTR_ERR(keyring); goto failed_put_cred; diff --git a/fs/cifs/cifsencrypt.c b/fs/cifs/cifsencrypt.c index 4897dacf8944..6aeb8d4616a4 100644 --- a/fs/cifs/cifsencrypt.c +++ b/fs/cifs/cifsencrypt.c @@ -66,45 +66,15 @@ cifs_crypto_shash_md5_allocate(struct TCP_Server_Info *server) return 0; } -/* - * Calculate and return the CIFS signature based on the mac key and SMB PDU. - * The 16 byte signature must be allocated by the caller. Note we only use the - * 1st eight bytes and that the smb header signature field on input contains - * the sequence number before this function is called. Also, this function - * should be called with the server->srv_mutex held. - */ -static int cifs_calc_signature(struct smb_rqst *rqst, - struct TCP_Server_Info *server, char *signature) +int __cifs_calc_signature(struct smb_rqst *rqst, + struct TCP_Server_Info *server, char *signature, + struct shash_desc *shash) { int i; int rc; struct kvec *iov = rqst->rq_iov; int n_vec = rqst->rq_nvec; - if (iov == NULL || signature == NULL || server == NULL) - return -EINVAL; - - if (!server->secmech.sdescmd5) { - rc = cifs_crypto_shash_md5_allocate(server); - if (rc) { - cifs_dbg(VFS, "%s: Can't alloc md5 crypto\n", __func__); - return -1; - } - } - - rc = crypto_shash_init(&server->secmech.sdescmd5->shash); - if (rc) { - cifs_dbg(VFS, "%s: Could not init md5\n", __func__); - return rc; - } - - rc = crypto_shash_update(&server->secmech.sdescmd5->shash, - server->session_key.response, server->session_key.len); - if (rc) { - cifs_dbg(VFS, "%s: Could not update with response\n", __func__); - return rc; - } - for (i = 0; i < n_vec; i++) { if (iov[i].iov_len == 0) continue; @@ -117,12 +87,10 @@ static int cifs_calc_signature(struct smb_rqst *rqst, if (i == 0) { if (iov[0].iov_len <= 8) /* cmd field at offset 9 */ break; /* nothing to sign or corrupt header */ - rc = - crypto_shash_update(&server->secmech.sdescmd5->shash, + rc = crypto_shash_update(shash, iov[i].iov_base + 4, iov[i].iov_len - 4); } else { - rc = - crypto_shash_update(&server->secmech.sdescmd5->shash, + rc = crypto_shash_update(shash, iov[i].iov_base, iov[i].iov_len); } if (rc) { @@ -134,21 +102,64 @@ static int cifs_calc_signature(struct smb_rqst *rqst, /* now hash over the rq_pages array */ for (i = 0; i < rqst->rq_npages; i++) { - struct kvec p_iov; + void *kaddr = kmap(rqst->rq_pages[i]); + size_t len = rqst->rq_pagesz; + + if (i == rqst->rq_npages - 1) + len = rqst->rq_tailsz; + + crypto_shash_update(shash, kaddr, len); - cifs_rqst_page_to_kvec(rqst, i, &p_iov); - crypto_shash_update(&server->secmech.sdescmd5->shash, - p_iov.iov_base, p_iov.iov_len); kunmap(rqst->rq_pages[i]); } - rc = crypto_shash_final(&server->secmech.sdescmd5->shash, signature); + rc = crypto_shash_final(shash, signature); if (rc) - cifs_dbg(VFS, "%s: Could not generate md5 hash\n", __func__); + cifs_dbg(VFS, "%s: Could not generate hash\n", __func__); return rc; } +/* + * Calculate and return the CIFS signature based on the mac key and SMB PDU. + * The 16 byte signature must be allocated by the caller. Note we only use the + * 1st eight bytes and that the smb header signature field on input contains + * the sequence number before this function is called. Also, this function + * should be called with the server->srv_mutex held. + */ +static int cifs_calc_signature(struct smb_rqst *rqst, + struct TCP_Server_Info *server, char *signature) +{ + int rc; + + if (!rqst->rq_iov || !signature || !server) + return -EINVAL; + + if (!server->secmech.sdescmd5) { + rc = cifs_crypto_shash_md5_allocate(server); + if (rc) { + cifs_dbg(VFS, "%s: Can't alloc md5 crypto\n", __func__); + return -1; + } + } + + rc = crypto_shash_init(&server->secmech.sdescmd5->shash); + if (rc) { + cifs_dbg(VFS, "%s: Could not init md5\n", __func__); + return rc; + } + + rc = crypto_shash_update(&server->secmech.sdescmd5->shash, + server->session_key.response, server->session_key.len); + if (rc) { + cifs_dbg(VFS, "%s: Could not update with response\n", __func__); + return rc; + } + + return __cifs_calc_signature(rqst, server, signature, + &server->secmech.sdescmd5->shash); +} + /* must be called with server->srv_mutex held */ int cifs_sign_rqst(struct smb_rqst *rqst, struct TCP_Server_Info *server, __u32 *pexpected_response_sequence_number) diff --git a/fs/cifs/cifsfs.c b/fs/cifs/cifsfs.c index 89201564c346..5d8b7edf8a8f 100644 --- a/fs/cifs/cifsfs.c +++ b/fs/cifs/cifsfs.c @@ -37,6 +37,7 @@ #include <linux/freezer.h> #include <linux/namei.h> #include <linux/random.h> +#include <linux/xattr.h> #include <net/ipv6.h> #include "cifsfs.h" #include "cifspdu.h" @@ -135,6 +136,7 @@ cifs_read_super(struct super_block *sb) sb->s_magic = CIFS_MAGIC_NUMBER; sb->s_op = &cifs_super_ops; + sb->s_xattr = cifs_xattr_handlers; sb->s_bdi = &cifs_sb->bdi; sb->s_blocksize = CIFS_MAX_MSGSIZE; sb->s_blocksize_bits = 14; /* default 2**14 = CIFS_MAX_MSGSIZE */ @@ -888,44 +890,33 @@ const struct inode_operations cifs_dir_inode_ops = { .rmdir = cifs_rmdir, .rename2 = cifs_rename2, .permission = cifs_permission, -/* revalidate:cifs_revalidate, */ .setattr = cifs_setattr, .symlink = cifs_symlink, .mknod = cifs_mknod, -#ifdef CONFIG_CIFS_XATTR - .setxattr = cifs_setxattr, - .getxattr = cifs_getxattr, + .setxattr = generic_setxattr, + .getxattr = generic_getxattr, .listxattr = cifs_listxattr, - .removexattr = cifs_removexattr, -#endif + .removexattr = generic_removexattr, }; const struct inode_operations cifs_file_inode_ops = { -/* revalidate:cifs_revalidate, */ .setattr = cifs_setattr, - .getattr = cifs_getattr, /* do we need this anymore? */ + .getattr = cifs_getattr, .permission = cifs_permission, -#ifdef CONFIG_CIFS_XATTR - .setxattr = cifs_setxattr, - .getxattr = cifs_getxattr, + .setxattr = generic_setxattr, + .getxattr = generic_getxattr, .listxattr = cifs_listxattr, - .removexattr = cifs_removexattr, -#endif + .removexattr = generic_removexattr, }; const struct inode_operations cifs_symlink_inode_ops = { .readlink = generic_readlink, .get_link = cifs_get_link, .permission = cifs_permission, - /* BB add the following two eventually */ - /* revalidate: cifs_revalidate, - setattr: cifs_notify_change, *//* BB do we need notify change */ -#ifdef CONFIG_CIFS_XATTR - .setxattr = cifs_setxattr, - .getxattr = cifs_getxattr, + .setxattr = generic_setxattr, + .getxattr = generic_getxattr, .listxattr = cifs_listxattr, - .removexattr = cifs_removexattr, -#endif + .removexattr = generic_removexattr, }; static int cifs_clone_file_range(struct file *src_file, loff_t off, @@ -1083,7 +1074,7 @@ const struct file_operations cifs_file_direct_nobrl_ops = { }; const struct file_operations cifs_dir_ops = { - .iterate = cifs_readdir, + .iterate_shared = cifs_readdir, .release = cifs_closedir, .read = generic_read_dir, .unlocked_ioctl = cifs_ioctl, @@ -1307,7 +1298,7 @@ init_cifs(void) goto out_destroy_mids; #ifdef CONFIG_CIFS_UPCALL - rc = register_key_type(&cifs_spnego_key_type); + rc = init_cifs_spnego(); if (rc) goto out_destroy_request_bufs; #endif /* CONFIG_CIFS_UPCALL */ @@ -1330,7 +1321,7 @@ out_init_cifs_idmap: out_register_key_type: #endif #ifdef CONFIG_CIFS_UPCALL - unregister_key_type(&cifs_spnego_key_type); + exit_cifs_spnego(); out_destroy_request_bufs: #endif cifs_destroy_request_bufs(); diff --git a/fs/cifs/cifsfs.h b/fs/cifs/cifsfs.h index 83aac8ba50b0..9dcf974acc47 100644 --- a/fs/cifs/cifsfs.h +++ b/fs/cifs/cifsfs.h @@ -120,15 +120,19 @@ extern const char *cifs_get_link(struct dentry *, struct inode *, struct delayed_call *); extern int cifs_symlink(struct inode *inode, struct dentry *direntry, const char *symname); -extern int cifs_removexattr(struct dentry *, const char *); -extern int cifs_setxattr(struct dentry *, const char *, const void *, - size_t, int); -extern ssize_t cifs_getxattr(struct dentry *, const char *, void *, size_t); + +#ifdef CONFIG_CIFS_XATTR +extern const struct xattr_handler *cifs_xattr_handlers[]; extern ssize_t cifs_listxattr(struct dentry *, char *, size_t); +#else +# define cifs_xattr_handlers NULL +# define cifs_listxattr NULL +#endif + extern long cifs_ioctl(struct file *filep, unsigned int cmd, unsigned long arg); #ifdef CONFIG_CIFS_NFSD_EXPORT extern const struct export_operations cifs_export_ops; #endif /* CONFIG_CIFS_NFSD_EXPORT */ -#define CIFS_VERSION "2.08" +#define CIFS_VERSION "2.09" #endif /* _CIFSFS_H */ diff --git a/fs/cifs/cifsglob.h b/fs/cifs/cifsglob.h index f2cc0b3d1af7..bba106cdc43c 100644 --- a/fs/cifs/cifsglob.h +++ b/fs/cifs/cifsglob.h @@ -615,8 +615,6 @@ struct TCP_Server_Info { bool sec_mskerberos; /* supports legacy MS Kerberos */ bool large_buf; /* is current buffer large? */ struct delayed_work echo; /* echo ping workqueue job */ - struct kvec *iov; /* reusable kvec array for receives */ - unsigned int nr_iov; /* number of kvecs in array */ char *smallbuf; /* pointer to current "small" buffer */ char *bigbuf; /* pointer to current "big" buffer */ unsigned int total_read; /* total amount of data read in this pass */ diff --git a/fs/cifs/cifsproto.h b/fs/cifs/cifsproto.h index eed7ff50faf0..1243bd326591 100644 --- a/fs/cifs/cifsproto.h +++ b/fs/cifs/cifsproto.h @@ -37,8 +37,6 @@ extern void cifs_buf_release(void *); extern struct smb_hdr *cifs_small_buf_get(void); extern void cifs_small_buf_release(void *); extern void free_rsp_buf(int, void *); -extern void cifs_rqst_page_to_kvec(struct smb_rqst *rqst, unsigned int idx, - struct kvec *iov); extern int smb_send(struct TCP_Server_Info *, struct smb_hdr *, unsigned int /* length */); extern unsigned int _get_xid(void); @@ -60,6 +58,8 @@ do { \ } while (0) extern int init_cifs_idmap(void); extern void exit_cifs_idmap(void); +extern int init_cifs_spnego(void); +extern void exit_cifs_spnego(void); extern char *build_path_from_dentry(struct dentry *); extern char *cifs_build_path_to_root(struct smb_vol *vol, struct cifs_sb_info *cifs_sb, @@ -181,10 +181,9 @@ extern int set_cifs_acl(struct cifs_ntsd *, __u32, struct inode *, extern void dequeue_mid(struct mid_q_entry *mid, bool malformed); extern int cifs_read_from_socket(struct TCP_Server_Info *server, char *buf, - unsigned int to_read); -extern int cifs_readv_from_socket(struct TCP_Server_Info *server, - struct kvec *iov_orig, unsigned int nr_segs, - unsigned int to_read); + unsigned int to_read); +extern int cifs_read_page_from_socket(struct TCP_Server_Info *server, + struct page *page, unsigned int to_read); extern void cifs_setup_cifs_sb(struct smb_vol *pvolume_info, struct cifs_sb_info *cifs_sb); extern int cifs_match_super(struct super_block *, void *); @@ -512,4 +511,7 @@ int cifs_create_mf_symlink(unsigned int xid, struct cifs_tcon *tcon, struct cifs_sb_info *cifs_sb, const unsigned char *path, char *pbuf, unsigned int *pbytes_written); +int __cifs_calc_signature(struct smb_rqst *rqst, + struct TCP_Server_Info *server, char *signature, + struct shash_desc *shash); #endif /* _CIFSPROTO_H */ diff --git a/fs/cifs/cifssmb.c b/fs/cifs/cifssmb.c index a894bf809ff7..d47197ea4ab6 100644 --- a/fs/cifs/cifssmb.c +++ b/fs/cifs/cifssmb.c @@ -1447,10 +1447,8 @@ cifs_readv_receive(struct TCP_Server_Info *server, struct mid_q_entry *mid) len = min_t(unsigned int, buflen, server->vals->read_rsp_size) - HEADER_SIZE(server) + 1; - rdata->iov.iov_base = buf + HEADER_SIZE(server) - 1; - rdata->iov.iov_len = len; - - length = cifs_readv_from_socket(server, &rdata->iov, 1, len); + length = cifs_read_from_socket(server, + buf + HEADER_SIZE(server) - 1, len); if (length < 0) return length; server->total_read += length; @@ -1502,9 +1500,8 @@ cifs_readv_receive(struct TCP_Server_Info *server, struct mid_q_entry *mid) len = data_offset - server->total_read; if (len > 0) { /* read any junk before data into the rest of smallbuf */ - rdata->iov.iov_base = buf + server->total_read; - rdata->iov.iov_len = len; - length = cifs_readv_from_socket(server, &rdata->iov, 1, len); + length = cifs_read_from_socket(server, + buf + server->total_read, len); if (length < 0) return length; server->total_read += length; @@ -3366,7 +3363,7 @@ static int cifs_copy_posix_acl(char *trgt, char *src, const int buflen, if (le16_to_cpu(cifs_acl->version) != CIFS_ACL_VERSION) return -EOPNOTSUPP; - if (acl_type & ACL_TYPE_ACCESS) { + if (acl_type == ACL_TYPE_ACCESS) { count = le16_to_cpu(cifs_acl->access_entry_count); pACE = &cifs_acl->ace_array[0]; size = sizeof(struct cifs_posix_acl); @@ -3377,7 +3374,7 @@ static int cifs_copy_posix_acl(char *trgt, char *src, const int buflen, size_of_data_area, size); return -EINVAL; } - } else if (acl_type & ACL_TYPE_DEFAULT) { + } else if (acl_type == ACL_TYPE_DEFAULT) { count = le16_to_cpu(cifs_acl->access_entry_count); size = sizeof(struct cifs_posix_acl); size += sizeof(struct cifs_posix_ace) * count; diff --git a/fs/cifs/connect.c b/fs/cifs/connect.c index 6f62ac821a84..66736f57b5ab 100644 --- a/fs/cifs/connect.c +++ b/fs/cifs/connect.c @@ -501,99 +501,34 @@ server_unresponsive(struct TCP_Server_Info *server) return false; } -/* - * kvec_array_init - clone a kvec array, and advance into it - * @new: pointer to memory for cloned array - * @iov: pointer to original array - * @nr_segs: number of members in original array - * @bytes: number of bytes to advance into the cloned array - * - * This function will copy the array provided in iov to a section of memory - * and advance the specified number of bytes into the new array. It returns - * the number of segments in the new array. "new" must be at least as big as - * the original iov array. - */ -static unsigned int -kvec_array_init(struct kvec *new, struct kvec *iov, unsigned int nr_segs, - size_t bytes) -{ - size_t base = 0; - - while (bytes || !iov->iov_len) { - int copy = min(bytes, iov->iov_len); - - bytes -= copy; - base += copy; - if (iov->iov_len == base) { - iov++; - nr_segs--; - base = 0; - } - } - memcpy(new, iov, sizeof(*iov) * nr_segs); - new->iov_base += base; - new->iov_len -= base; - return nr_segs; -} - -static struct kvec * -get_server_iovec(struct TCP_Server_Info *server, unsigned int nr_segs) -{ - struct kvec *new_iov; - - if (server->iov && nr_segs <= server->nr_iov) - return server->iov; - - /* not big enough -- allocate a new one and release the old */ - new_iov = kmalloc(sizeof(*new_iov) * nr_segs, GFP_NOFS); - if (new_iov) { - kfree(server->iov); - server->iov = new_iov; - server->nr_iov = nr_segs; - } - return new_iov; -} - -int -cifs_readv_from_socket(struct TCP_Server_Info *server, struct kvec *iov_orig, - unsigned int nr_segs, unsigned int to_read) +static int +cifs_readv_from_socket(struct TCP_Server_Info *server, struct msghdr *smb_msg) { int length = 0; int total_read; - unsigned int segs; - struct msghdr smb_msg; - struct kvec *iov; - iov = get_server_iovec(server, nr_segs); - if (!iov) - return -ENOMEM; - - smb_msg.msg_control = NULL; - smb_msg.msg_controllen = 0; + smb_msg->msg_control = NULL; + smb_msg->msg_controllen = 0; - for (total_read = 0; to_read; total_read += length, to_read -= length) { + for (total_read = 0; msg_data_left(smb_msg); total_read += length) { try_to_freeze(); - if (server_unresponsive(server)) { - total_read = -ECONNABORTED; - break; - } + if (server_unresponsive(server)) + return -ECONNABORTED; - segs = kvec_array_init(iov, iov_orig, nr_segs, total_read); + length = sock_recvmsg(server->ssocket, smb_msg, 0); - length = kernel_recvmsg(server->ssocket, &smb_msg, - iov, segs, to_read, 0); + if (server->tcpStatus == CifsExiting) + return -ESHUTDOWN; - if (server->tcpStatus == CifsExiting) { - total_read = -ESHUTDOWN; - break; - } else if (server->tcpStatus == CifsNeedReconnect) { + if (server->tcpStatus == CifsNeedReconnect) { cifs_reconnect(server); - total_read = -ECONNABORTED; - break; - } else if (length == -ERESTARTSYS || - length == -EAGAIN || - length == -EINTR) { + return -ECONNABORTED; + } + + if (length == -ERESTARTSYS || + length == -EAGAIN || + length == -EINTR) { /* * Minimum sleep to prevent looping, allowing socket * to clear and app threads to set tcpStatus @@ -602,12 +537,12 @@ cifs_readv_from_socket(struct TCP_Server_Info *server, struct kvec *iov_orig, usleep_range(1000, 2000); length = 0; continue; - } else if (length <= 0) { - cifs_dbg(FYI, "Received no data or error: expecting %d\n" - "got %d", to_read, length); + } + + if (length <= 0) { + cifs_dbg(FYI, "Received no data or error: %d\n", length); cifs_reconnect(server); - total_read = -ECONNABORTED; - break; + return -ECONNABORTED; } } return total_read; @@ -617,12 +552,21 @@ int cifs_read_from_socket(struct TCP_Server_Info *server, char *buf, unsigned int to_read) { - struct kvec iov; + struct msghdr smb_msg; + struct kvec iov = {.iov_base = buf, .iov_len = to_read}; + iov_iter_kvec(&smb_msg.msg_iter, READ | ITER_KVEC, &iov, 1, to_read); - iov.iov_base = buf; - iov.iov_len = to_read; + return cifs_readv_from_socket(server, &smb_msg); +} - return cifs_readv_from_socket(server, &iov, 1, to_read); +int +cifs_read_page_from_socket(struct TCP_Server_Info *server, struct page *page, + unsigned int to_read) +{ + struct msghdr smb_msg; + struct bio_vec bv = {.bv_page = page, .bv_len = to_read}; + iov_iter_bvec(&smb_msg.msg_iter, READ | ITER_BVEC, &bv, 1, to_read); + return cifs_readv_from_socket(server, &smb_msg); } static bool @@ -783,7 +727,6 @@ static void clean_demultiplex_info(struct TCP_Server_Info *server) } kfree(server->hostname); - kfree(server->iov); kfree(server); length = atomic_dec_return(&tcpSesAllocCount); @@ -1196,8 +1139,12 @@ cifs_parse_devname(const char *devname, struct smb_vol *vol) convert_delimiter(vol->UNC, '\\'); - /* If pos is NULL, or is a bogus trailing delimiter then no prepath */ - if (!*pos++ || !*pos) + /* skip any delimiter */ + if (*pos == '/' || *pos == '\\') + pos++; + + /* If pos is NULL then no prepath */ + if (!*pos) return 0; vol->prepath = kstrdup(pos, GFP_KERNEL); @@ -2918,7 +2865,7 @@ static inline void cifs_reclassify_socket4(struct socket *sock) { struct sock *sk = sock->sk; - BUG_ON(sock_owned_by_user(sk)); + BUG_ON(!sock_allow_reclassification(sk)); sock_lock_init_class_and_name(sk, "slock-AF_INET-CIFS", &cifs_slock_key[0], "sk_lock-AF_INET-CIFS", &cifs_key[0]); } @@ -2927,7 +2874,7 @@ static inline void cifs_reclassify_socket6(struct socket *sock) { struct sock *sk = sock->sk; - BUG_ON(sock_owned_by_user(sk)); + BUG_ON(!sock_allow_reclassification(sk)); sock_lock_init_class_and_name(sk, "slock-AF_INET6-CIFS", &cifs_slock_key[1], "sk_lock-AF_INET6-CIFS", &cifs_key[1]); } diff --git a/fs/cifs/file.c b/fs/cifs/file.c index c03d0744648b..9793ae0bcaa2 100644 --- a/fs/cifs/file.c +++ b/fs/cifs/file.c @@ -271,7 +271,7 @@ struct cifsFileInfo * cifs_new_fileinfo(struct cifs_fid *fid, struct file *file, struct tcon_link *tlink, __u32 oplock) { - struct dentry *dentry = file->f_path.dentry; + struct dentry *dentry = file_dentry(file); struct inode *inode = d_inode(dentry); struct cifsInodeInfo *cinode = CIFS_I(inode); struct cifsFileInfo *cfile; @@ -461,7 +461,7 @@ int cifs_open(struct inode *inode, struct file *file) tcon = tlink_tcon(tlink); server = tcon->ses->server; - full_path = build_path_from_dentry(file->f_path.dentry); + full_path = build_path_from_dentry(file_dentry(file)); if (full_path == NULL) { rc = -ENOMEM; goto out; @@ -2687,11 +2687,8 @@ cifs_writev(struct kiocb *iocb, struct iov_iter *from) out: inode_unlock(inode); - if (rc > 0) { - ssize_t err = generic_write_sync(file, iocb->ki_pos - rc, rc); - if (err < 0) - rc = err; - } + if (rc > 0) + rc = generic_write_sync(iocb, rc); up_read(&cinode->lock_sem); return rc; } @@ -2855,39 +2852,31 @@ cifs_uncached_read_into_pages(struct TCP_Server_Info *server, int result = 0; unsigned int i; unsigned int nr_pages = rdata->nr_pages; - struct kvec iov; rdata->got_bytes = 0; rdata->tailsz = PAGE_SIZE; for (i = 0; i < nr_pages; i++) { struct page *page = rdata->pages[i]; + size_t n; - if (len >= PAGE_SIZE) { - /* enough data to fill the page */ - iov.iov_base = kmap(page); - iov.iov_len = PAGE_SIZE; - cifs_dbg(FYI, "%u: iov_base=%p iov_len=%zu\n", - i, iov.iov_base, iov.iov_len); - len -= PAGE_SIZE; - } else if (len > 0) { - /* enough for partial page, fill and zero the rest */ - iov.iov_base = kmap(page); - iov.iov_len = len; - cifs_dbg(FYI, "%u: iov_base=%p iov_len=%zu\n", - i, iov.iov_base, iov.iov_len); - memset(iov.iov_base + len, '\0', PAGE_SIZE - len); - rdata->tailsz = len; - len = 0; - } else { + if (len <= 0) { /* no need to hold page hostage */ rdata->pages[i] = NULL; rdata->nr_pages--; put_page(page); continue; } - - result = cifs_readv_from_socket(server, &iov, 1, iov.iov_len); - kunmap(page); + n = len; + if (len >= PAGE_SIZE) { + /* enough data to fill the page */ + n = PAGE_SIZE; + len -= n; + } else { + zero_user(page, len, PAGE_SIZE - len); + rdata->tailsz = len; + len = 0; + } + result = cifs_read_page_from_socket(server, page, n); if (result < 0) break; @@ -3303,7 +3292,6 @@ cifs_readpages_read_into_pages(struct TCP_Server_Info *server, u64 eof; pgoff_t eof_index; unsigned int nr_pages = rdata->nr_pages; - struct kvec iov; /* determine the eof that the server (probably) has */ eof = CIFS_I(rdata->mapping->host)->server_eof; @@ -3314,23 +3302,14 @@ cifs_readpages_read_into_pages(struct TCP_Server_Info *server, rdata->tailsz = PAGE_SIZE; for (i = 0; i < nr_pages; i++) { struct page *page = rdata->pages[i]; + size_t n = PAGE_SIZE; if (len >= PAGE_SIZE) { - /* enough data to fill the page */ - iov.iov_base = kmap(page); - iov.iov_len = PAGE_SIZE; - cifs_dbg(FYI, "%u: idx=%lu iov_base=%p iov_len=%zu\n", - i, page->index, iov.iov_base, iov.iov_len); len -= PAGE_SIZE; } else if (len > 0) { /* enough for partial page, fill and zero the rest */ - iov.iov_base = kmap(page); - iov.iov_len = len; - cifs_dbg(FYI, "%u: idx=%lu iov_base=%p iov_len=%zu\n", - i, page->index, iov.iov_base, iov.iov_len); - memset(iov.iov_base + len, - '\0', PAGE_SIZE - len); - rdata->tailsz = len; + zero_user(page, len, PAGE_SIZE - len); + n = rdata->tailsz = len; len = 0; } else if (page->index > eof_index) { /* @@ -3360,8 +3339,7 @@ cifs_readpages_read_into_pages(struct TCP_Server_Info *server, continue; } - result = cifs_readv_from_socket(server, &iov, 1, iov.iov_len); - kunmap(page); + result = cifs_read_page_from_socket(server, page, n); if (result < 0) break; @@ -3854,7 +3832,7 @@ void cifs_oplock_break(struct work_struct *work) * Direct IO is not yet supported in the cached mode. */ static ssize_t -cifs_direct_io(struct kiocb *iocb, struct iov_iter *iter, loff_t pos) +cifs_direct_io(struct kiocb *iocb, struct iov_iter *iter) { /* * FIXME diff --git a/fs/cifs/inode.c b/fs/cifs/inode.c index 5f9ad5c42180..514dadb0575d 100644 --- a/fs/cifs/inode.c +++ b/fs/cifs/inode.c @@ -2418,8 +2418,7 @@ cifs_setattr_exit: int cifs_setattr(struct dentry *direntry, struct iattr *attrs) { - struct inode *inode = d_inode(direntry); - struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb); + struct cifs_sb_info *cifs_sb = CIFS_SB(direntry->d_sb); struct cifs_tcon *pTcon = cifs_sb_master_tcon(cifs_sb); if (pTcon->unix_ext) diff --git a/fs/cifs/readdir.c b/fs/cifs/readdir.c index b30a4a6d98a0..65cf85dcda09 100644 --- a/fs/cifs/readdir.c +++ b/fs/cifs/readdir.c @@ -78,20 +78,34 @@ cifs_prime_dcache(struct dentry *parent, struct qstr *name, { struct dentry *dentry, *alias; struct inode *inode; - struct super_block *sb = d_inode(parent)->i_sb; + struct super_block *sb = parent->d_sb; struct cifs_sb_info *cifs_sb = CIFS_SB(sb); + DECLARE_WAIT_QUEUE_HEAD_ONSTACK(wq); cifs_dbg(FYI, "%s: for %s\n", __func__, name->name); dentry = d_hash_and_lookup(parent, name); + if (!dentry) { + /* + * If we know that the inode will need to be revalidated + * immediately, then don't create a new dentry for it. + * We'll end up doing an on the wire call either way and + * this spares us an invalidation. + */ + if (fattr->cf_flags & CIFS_FATTR_NEED_REVAL) + return; +retry: + dentry = d_alloc_parallel(parent, name, &wq); + } if (IS_ERR(dentry)) return; - - if (dentry) { + if (!d_in_lookup(dentry)) { inode = d_inode(dentry); if (inode) { - if (d_mountpoint(dentry)) - goto out; + if (d_mountpoint(dentry)) { + dput(dentry); + return; + } /* * If we're generating inode numbers, then we don't * want to clobber the existing one with the one that @@ -106,33 +120,22 @@ cifs_prime_dcache(struct dentry *parent, struct qstr *name, (inode->i_mode & S_IFMT) == (fattr->cf_mode & S_IFMT)) { cifs_fattr_to_inode(inode, fattr); - goto out; + dput(dentry); + return; } } d_invalidate(dentry); dput(dentry); + goto retry; + } else { + inode = cifs_iget(sb, fattr); + if (!inode) + inode = ERR_PTR(-ENOMEM); + alias = d_splice_alias(inode, dentry); + d_lookup_done(dentry); + if (alias && !IS_ERR(alias)) + dput(alias); } - - /* - * If we know that the inode will need to be revalidated immediately, - * then don't create a new dentry for it. We'll end up doing an on - * the wire call either way and this spares us an invalidation. - */ - if (fattr->cf_flags & CIFS_FATTR_NEED_REVAL) - return; - - dentry = d_alloc(parent, name); - if (!dentry) - return; - - inode = cifs_iget(sb, fattr); - if (!inode) - goto out; - - alias = d_splice_alias(inode, dentry); - if (alias && !IS_ERR(alias)) - dput(alias); -out: dput(dentry); } @@ -300,7 +303,7 @@ initiate_cifs_search(const unsigned int xid, struct file *file) cifsFile->invalidHandle = true; cifsFile->srch_inf.endOfSearch = false; - full_path = build_path_from_dentry(file->f_path.dentry); + full_path = build_path_from_dentry(file_dentry(file)); if (full_path == NULL) { rc = -ENOMEM; goto error_exit; @@ -759,7 +762,7 @@ static int cifs_filldir(char *find_entry, struct file *file, */ fattr.cf_flags |= CIFS_FATTR_NEED_REVAL; - cifs_prime_dcache(file->f_path.dentry, &name, &fattr); + cifs_prime_dcache(file_dentry(file), &name, &fattr); ino = cifs_uniqueid_to_ino_t(fattr.cf_uniqueid); return !dir_emit(ctx, name.name, name.len, ino, fattr.cf_dtype); diff --git a/fs/cifs/sess.c b/fs/cifs/sess.c index 59727e32ed0f..af0ec2d5ad0e 100644 --- a/fs/cifs/sess.c +++ b/fs/cifs/sess.c @@ -400,19 +400,27 @@ int build_ntlmssp_auth_blob(unsigned char *pbuffer, sec_blob->LmChallengeResponse.MaximumLength = 0; sec_blob->NtChallengeResponse.BufferOffset = cpu_to_le32(tmp - pbuffer); - rc = setup_ntlmv2_rsp(ses, nls_cp); - if (rc) { - cifs_dbg(VFS, "Error %d during NTLMSSP authentication\n", rc); - goto setup_ntlmv2_ret; + if (ses->user_name != NULL) { + rc = setup_ntlmv2_rsp(ses, nls_cp); + if (rc) { + cifs_dbg(VFS, "Error %d during NTLMSSP authentication\n", rc); + goto setup_ntlmv2_ret; + } + memcpy(tmp, ses->auth_key.response + CIFS_SESS_KEY_SIZE, + ses->auth_key.len - CIFS_SESS_KEY_SIZE); + tmp += ses->auth_key.len - CIFS_SESS_KEY_SIZE; + + sec_blob->NtChallengeResponse.Length = + cpu_to_le16(ses->auth_key.len - CIFS_SESS_KEY_SIZE); + sec_blob->NtChallengeResponse.MaximumLength = + cpu_to_le16(ses->auth_key.len - CIFS_SESS_KEY_SIZE); + } else { + /* + * don't send an NT Response for anonymous access + */ + sec_blob->NtChallengeResponse.Length = 0; + sec_blob->NtChallengeResponse.MaximumLength = 0; } - memcpy(tmp, ses->auth_key.response + CIFS_SESS_KEY_SIZE, - ses->auth_key.len - CIFS_SESS_KEY_SIZE); - tmp += ses->auth_key.len - CIFS_SESS_KEY_SIZE; - - sec_blob->NtChallengeResponse.Length = - cpu_to_le16(ses->auth_key.len - CIFS_SESS_KEY_SIZE); - sec_blob->NtChallengeResponse.MaximumLength = - cpu_to_le16(ses->auth_key.len - CIFS_SESS_KEY_SIZE); if (ses->domainName == NULL) { sec_blob->DomainName.BufferOffset = cpu_to_le32(tmp - pbuffer); @@ -670,20 +678,24 @@ sess_auth_lanman(struct sess_data *sess_data) pSMB->req.hdr.Flags2 &= ~SMBFLG2_UNICODE; - /* no capabilities flags in old lanman negotiation */ - pSMB->old_req.PasswordLength = cpu_to_le16(CIFS_AUTH_RESP_SIZE); - - /* Calculate hash with password and copy into bcc_ptr. - * Encryption Key (stored as in cryptkey) gets used if the - * security mode bit in Negottiate Protocol response states - * to use challenge/response method (i.e. Password bit is 1). - */ - rc = calc_lanman_hash(ses->password, ses->server->cryptkey, - ses->server->sec_mode & SECMODE_PW_ENCRYPT ? - true : false, lnm_session_key); - - memcpy(bcc_ptr, (char *)lnm_session_key, CIFS_AUTH_RESP_SIZE); - bcc_ptr += CIFS_AUTH_RESP_SIZE; + if (ses->user_name != NULL) { + /* no capabilities flags in old lanman negotiation */ + pSMB->old_req.PasswordLength = cpu_to_le16(CIFS_AUTH_RESP_SIZE); + + /* Calculate hash with password and copy into bcc_ptr. + * Encryption Key (stored as in cryptkey) gets used if the + * security mode bit in Negottiate Protocol response states + * to use challenge/response method (i.e. Password bit is 1). + */ + rc = calc_lanman_hash(ses->password, ses->server->cryptkey, + ses->server->sec_mode & SECMODE_PW_ENCRYPT ? + true : false, lnm_session_key); + + memcpy(bcc_ptr, (char *)lnm_session_key, CIFS_AUTH_RESP_SIZE); + bcc_ptr += CIFS_AUTH_RESP_SIZE; + } else { + pSMB->old_req.PasswordLength = 0; + } /* * can not sign if LANMAN negotiated so no need @@ -769,26 +781,31 @@ sess_auth_ntlm(struct sess_data *sess_data) capabilities = cifs_ssetup_hdr(ses, pSMB); pSMB->req_no_secext.Capabilities = cpu_to_le32(capabilities); - pSMB->req_no_secext.CaseInsensitivePasswordLength = - cpu_to_le16(CIFS_AUTH_RESP_SIZE); - pSMB->req_no_secext.CaseSensitivePasswordLength = - cpu_to_le16(CIFS_AUTH_RESP_SIZE); - - /* calculate ntlm response and session key */ - rc = setup_ntlm_response(ses, sess_data->nls_cp); - if (rc) { - cifs_dbg(VFS, "Error %d during NTLM authentication\n", - rc); - goto out; - } + if (ses->user_name != NULL) { + pSMB->req_no_secext.CaseInsensitivePasswordLength = + cpu_to_le16(CIFS_AUTH_RESP_SIZE); + pSMB->req_no_secext.CaseSensitivePasswordLength = + cpu_to_le16(CIFS_AUTH_RESP_SIZE); + + /* calculate ntlm response and session key */ + rc = setup_ntlm_response(ses, sess_data->nls_cp); + if (rc) { + cifs_dbg(VFS, "Error %d during NTLM authentication\n", + rc); + goto out; + } - /* copy ntlm response */ - memcpy(bcc_ptr, ses->auth_key.response + CIFS_SESS_KEY_SIZE, - CIFS_AUTH_RESP_SIZE); - bcc_ptr += CIFS_AUTH_RESP_SIZE; - memcpy(bcc_ptr, ses->auth_key.response + CIFS_SESS_KEY_SIZE, - CIFS_AUTH_RESP_SIZE); - bcc_ptr += CIFS_AUTH_RESP_SIZE; + /* copy ntlm response */ + memcpy(bcc_ptr, ses->auth_key.response + CIFS_SESS_KEY_SIZE, + CIFS_AUTH_RESP_SIZE); + bcc_ptr += CIFS_AUTH_RESP_SIZE; + memcpy(bcc_ptr, ses->auth_key.response + CIFS_SESS_KEY_SIZE, + CIFS_AUTH_RESP_SIZE); + bcc_ptr += CIFS_AUTH_RESP_SIZE; + } else { + pSMB->req_no_secext.CaseInsensitivePasswordLength = 0; + pSMB->req_no_secext.CaseSensitivePasswordLength = 0; + } if (ses->capabilities & CAP_UNICODE) { /* unicode strings must be word aligned */ @@ -878,22 +895,26 @@ sess_auth_ntlmv2(struct sess_data *sess_data) /* LM2 password would be here if we supported it */ pSMB->req_no_secext.CaseInsensitivePasswordLength = 0; - /* calculate nlmv2 response and session key */ - rc = setup_ntlmv2_rsp(ses, sess_data->nls_cp); - if (rc) { - cifs_dbg(VFS, "Error %d during NTLMv2 authentication\n", rc); - goto out; - } + if (ses->user_name != NULL) { + /* calculate nlmv2 response and session key */ + rc = setup_ntlmv2_rsp(ses, sess_data->nls_cp); + if (rc) { + cifs_dbg(VFS, "Error %d during NTLMv2 authentication\n", rc); + goto out; + } - memcpy(bcc_ptr, ses->auth_key.response + CIFS_SESS_KEY_SIZE, - ses->auth_key.len - CIFS_SESS_KEY_SIZE); - bcc_ptr += ses->auth_key.len - CIFS_SESS_KEY_SIZE; + memcpy(bcc_ptr, ses->auth_key.response + CIFS_SESS_KEY_SIZE, + ses->auth_key.len - CIFS_SESS_KEY_SIZE); + bcc_ptr += ses->auth_key.len - CIFS_SESS_KEY_SIZE; - /* set case sensitive password length after tilen may get - * assigned, tilen is 0 otherwise. - */ - pSMB->req_no_secext.CaseSensitivePasswordLength = - cpu_to_le16(ses->auth_key.len - CIFS_SESS_KEY_SIZE); + /* set case sensitive password length after tilen may get + * assigned, tilen is 0 otherwise. + */ + pSMB->req_no_secext.CaseSensitivePasswordLength = + cpu_to_le16(ses->auth_key.len - CIFS_SESS_KEY_SIZE); + } else { + pSMB->req_no_secext.CaseSensitivePasswordLength = 0; + } if (ses->capabilities & CAP_UNICODE) { if (sess_data->iov[0].iov_len % 2) { diff --git a/fs/cifs/smb2glob.h b/fs/cifs/smb2glob.h index bc0bb9c34f72..0ffa18094335 100644 --- a/fs/cifs/smb2glob.h +++ b/fs/cifs/smb2glob.h @@ -44,6 +44,7 @@ #define SMB2_OP_DELETE 7 #define SMB2_OP_HARDLINK 8 #define SMB2_OP_SET_EOF 9 +#define SMB2_OP_RMDIR 10 /* Used when constructing chained read requests. */ #define CHAINED_REQUEST 1 diff --git a/fs/cifs/smb2inode.c b/fs/cifs/smb2inode.c index 899bbc86f73e..4f0231e685a9 100644 --- a/fs/cifs/smb2inode.c +++ b/fs/cifs/smb2inode.c @@ -80,6 +80,10 @@ smb2_open_op_close(const unsigned int xid, struct cifs_tcon *tcon, * SMB2_open() call. */ break; + case SMB2_OP_RMDIR: + tmprc = SMB2_rmdir(xid, tcon, fid.persistent_fid, + fid.volatile_fid); + break; case SMB2_OP_RENAME: tmprc = SMB2_rename(xid, tcon, fid.persistent_fid, fid.volatile_fid, (__le16 *)data); @@ -191,8 +195,8 @@ smb2_rmdir(const unsigned int xid, struct cifs_tcon *tcon, const char *name, struct cifs_sb_info *cifs_sb) { return smb2_open_op_close(xid, tcon, cifs_sb, name, DELETE, FILE_OPEN, - CREATE_NOT_FILE | CREATE_DELETE_ON_CLOSE, - NULL, SMB2_OP_DELETE); + CREATE_NOT_FILE, + NULL, SMB2_OP_RMDIR); } int diff --git a/fs/cifs/smb2pdu.c b/fs/cifs/smb2pdu.c index 42e1f440eb1e..8f38e33d365b 100644 --- a/fs/cifs/smb2pdu.c +++ b/fs/cifs/smb2pdu.c @@ -2575,6 +2575,22 @@ SMB2_rename(const unsigned int xid, struct cifs_tcon *tcon, } int +SMB2_rmdir(const unsigned int xid, struct cifs_tcon *tcon, + u64 persistent_fid, u64 volatile_fid) +{ + __u8 delete_pending = 1; + void *data; + unsigned int size; + + data = &delete_pending; + size = 1; /* sizeof __u8 */ + + return send_set_info(xid, tcon, persistent_fid, volatile_fid, + current->tgid, FILE_DISPOSITION_INFORMATION, 1, &data, + &size); +} + +int SMB2_set_hardlink(const unsigned int xid, struct cifs_tcon *tcon, u64 persistent_fid, u64 volatile_fid, __le16 *target_file) { diff --git a/fs/cifs/smb2proto.h b/fs/cifs/smb2proto.h index 4f07dc93608d..eb2cde2f64ba 100644 --- a/fs/cifs/smb2proto.h +++ b/fs/cifs/smb2proto.h @@ -141,6 +141,8 @@ extern int SMB2_query_directory(const unsigned int xid, struct cifs_tcon *tcon, extern int SMB2_rename(const unsigned int xid, struct cifs_tcon *tcon, u64 persistent_fid, u64 volatile_fid, __le16 *target_file); +extern int SMB2_rmdir(const unsigned int xid, struct cifs_tcon *tcon, + u64 persistent_fid, u64 volatile_fid); extern int SMB2_set_hardlink(const unsigned int xid, struct cifs_tcon *tcon, u64 persistent_fid, u64 volatile_fid, __le16 *target_file); diff --git a/fs/cifs/smb2transport.c b/fs/cifs/smb2transport.c index 8732a43b1008..bc9a7b634643 100644 --- a/fs/cifs/smb2transport.c +++ b/fs/cifs/smb2transport.c @@ -135,11 +135,10 @@ smb2_find_smb_ses(struct smb2_hdr *smb2hdr, struct TCP_Server_Info *server) int smb2_calc_signature(struct smb_rqst *rqst, struct TCP_Server_Info *server) { - int i, rc; + int rc; unsigned char smb2_signature[SMB2_HMACSHA256_SIZE]; unsigned char *sigptr = smb2_signature; struct kvec *iov = rqst->rq_iov; - int n_vec = rqst->rq_nvec; struct smb2_hdr *smb2_pdu = (struct smb2_hdr *)iov[0].iov_base; struct cifs_ses *ses; @@ -171,53 +170,11 @@ smb2_calc_signature(struct smb_rqst *rqst, struct TCP_Server_Info *server) return rc; } - for (i = 0; i < n_vec; i++) { - if (iov[i].iov_len == 0) - continue; - if (iov[i].iov_base == NULL) { - cifs_dbg(VFS, "null iovec entry\n"); - return -EIO; - } - /* - * The first entry includes a length field (which does not get - * signed that occupies the first 4 bytes before the header). - */ - if (i == 0) { - if (iov[0].iov_len <= 8) /* cmd field at offset 9 */ - break; /* nothing to sign or corrupt header */ - rc = - crypto_shash_update( - &server->secmech.sdeschmacsha256->shash, - iov[i].iov_base + 4, iov[i].iov_len - 4); - } else { - rc = - crypto_shash_update( - &server->secmech.sdeschmacsha256->shash, - iov[i].iov_base, iov[i].iov_len); - } - if (rc) { - cifs_dbg(VFS, "%s: Could not update with payload\n", - __func__); - return rc; - } - } - - /* now hash over the rq_pages array */ - for (i = 0; i < rqst->rq_npages; i++) { - struct kvec p_iov; - - cifs_rqst_page_to_kvec(rqst, i, &p_iov); - crypto_shash_update(&server->secmech.sdeschmacsha256->shash, - p_iov.iov_base, p_iov.iov_len); - kunmap(rqst->rq_pages[i]); - } - - rc = crypto_shash_final(&server->secmech.sdeschmacsha256->shash, - sigptr); - if (rc) - cifs_dbg(VFS, "%s: Could not generate sha256 hash\n", __func__); + rc = __cifs_calc_signature(rqst, server, sigptr, + &server->secmech.sdeschmacsha256->shash); - memcpy(smb2_pdu->Signature, sigptr, SMB2_SIGNATURE_SIZE); + if (!rc) + memcpy(smb2_pdu->Signature, sigptr, SMB2_SIGNATURE_SIZE); return rc; } @@ -395,12 +352,10 @@ generate_smb311signingkey(struct cifs_ses *ses) int smb3_calc_signature(struct smb_rqst *rqst, struct TCP_Server_Info *server) { - int i; int rc = 0; unsigned char smb3_signature[SMB2_CMACAES_SIZE]; unsigned char *sigptr = smb3_signature; struct kvec *iov = rqst->rq_iov; - int n_vec = rqst->rq_nvec; struct smb2_hdr *smb2_pdu = (struct smb2_hdr *)iov[0].iov_base; struct cifs_ses *ses; @@ -431,54 +386,12 @@ smb3_calc_signature(struct smb_rqst *rqst, struct TCP_Server_Info *server) cifs_dbg(VFS, "%s: Could not init cmac aes\n", __func__); return rc; } + + rc = __cifs_calc_signature(rqst, server, sigptr, + &server->secmech.sdesccmacaes->shash); - for (i = 0; i < n_vec; i++) { - if (iov[i].iov_len == 0) - continue; - if (iov[i].iov_base == NULL) { - cifs_dbg(VFS, "null iovec entry"); - return -EIO; - } - /* - * The first entry includes a length field (which does not get - * signed that occupies the first 4 bytes before the header). - */ - if (i == 0) { - if (iov[0].iov_len <= 8) /* cmd field at offset 9 */ - break; /* nothing to sign or corrupt header */ - rc = - crypto_shash_update( - &server->secmech.sdesccmacaes->shash, - iov[i].iov_base + 4, iov[i].iov_len - 4); - } else { - rc = - crypto_shash_update( - &server->secmech.sdesccmacaes->shash, - iov[i].iov_base, iov[i].iov_len); - } - if (rc) { - cifs_dbg(VFS, "%s: Couldn't update cmac aes with payload\n", - __func__); - return rc; - } - } - - /* now hash over the rq_pages array */ - for (i = 0; i < rqst->rq_npages; i++) { - struct kvec p_iov; - - cifs_rqst_page_to_kvec(rqst, i, &p_iov); - crypto_shash_update(&server->secmech.sdesccmacaes->shash, - p_iov.iov_base, p_iov.iov_len); - kunmap(rqst->rq_pages[i]); - } - - rc = crypto_shash_final(&server->secmech.sdesccmacaes->shash, - sigptr); - if (rc) - cifs_dbg(VFS, "%s: Could not generate cmac aes\n", __func__); - - memcpy(smb2_pdu->Signature, sigptr, SMB2_SIGNATURE_SIZE); + if (!rc) + memcpy(smb2_pdu->Signature, sigptr, SMB2_SIGNATURE_SIZE); return rc; } diff --git a/fs/cifs/transport.c b/fs/cifs/transport.c index 87abe8ed074c..206a597b2293 100644 --- a/fs/cifs/transport.c +++ b/fs/cifs/transport.c @@ -124,41 +124,32 @@ cifs_delete_mid(struct mid_q_entry *mid) /* * smb_send_kvec - send an array of kvecs to the server * @server: Server to send the data to - * @iov: Pointer to array of kvecs - * @n_vec: length of kvec array + * @smb_msg: Message to send * @sent: amount of data sent on socket is stored here * * Our basic "send data to server" function. Should be called with srv_mutex * held. The caller is responsible for handling the results. */ static int -smb_send_kvec(struct TCP_Server_Info *server, struct kvec *iov, size_t n_vec, - size_t *sent) +smb_send_kvec(struct TCP_Server_Info *server, struct msghdr *smb_msg, + size_t *sent) { int rc = 0; - int i = 0; - struct msghdr smb_msg; - unsigned int remaining; - size_t first_vec = 0; + int retries = 0; struct socket *ssocket = server->ssocket; *sent = 0; - smb_msg.msg_name = (struct sockaddr *) &server->dstaddr; - smb_msg.msg_namelen = sizeof(struct sockaddr); - smb_msg.msg_control = NULL; - smb_msg.msg_controllen = 0; + smb_msg->msg_name = (struct sockaddr *) &server->dstaddr; + smb_msg->msg_namelen = sizeof(struct sockaddr); + smb_msg->msg_control = NULL; + smb_msg->msg_controllen = 0; if (server->noblocksnd) - smb_msg.msg_flags = MSG_DONTWAIT + MSG_NOSIGNAL; + smb_msg->msg_flags = MSG_DONTWAIT + MSG_NOSIGNAL; else - smb_msg.msg_flags = MSG_NOSIGNAL; - - remaining = 0; - for (i = 0; i < n_vec; i++) - remaining += iov[i].iov_len; + smb_msg->msg_flags = MSG_NOSIGNAL; - i = 0; - while (remaining) { + while (msg_data_left(smb_msg)) { /* * If blocking send, we try 3 times, since each can block * for 5 seconds. For nonblocking we have to try more @@ -177,35 +168,21 @@ smb_send_kvec(struct TCP_Server_Info *server, struct kvec *iov, size_t n_vec, * after the retries we will kill the socket and * reconnect which may clear the network problem. */ - rc = kernel_sendmsg(ssocket, &smb_msg, &iov[first_vec], - n_vec - first_vec, remaining); + rc = sock_sendmsg(ssocket, smb_msg); if (rc == -EAGAIN) { - i++; - if (i >= 14 || (!server->noblocksnd && (i > 2))) { + retries++; + if (retries >= 14 || + (!server->noblocksnd && (retries > 2))) { cifs_dbg(VFS, "sends on sock %p stuck for 15 seconds\n", ssocket); - rc = -EAGAIN; - break; + return -EAGAIN; } - msleep(1 << i); + msleep(1 << retries); continue; } if (rc < 0) - break; - - /* send was at least partially successful */ - *sent += rc; - - if (rc == remaining) { - remaining = 0; - break; - } - - if (rc > remaining) { - cifs_dbg(VFS, "sent %d requested %d\n", rc, remaining); - break; - } + return rc; if (rc == 0) { /* should never happen, letting socket clear before @@ -215,59 +192,11 @@ smb_send_kvec(struct TCP_Server_Info *server, struct kvec *iov, size_t n_vec, continue; } - remaining -= rc; - - /* the line below resets i */ - for (i = first_vec; i < n_vec; i++) { - if (iov[i].iov_len) { - if (rc > iov[i].iov_len) { - rc -= iov[i].iov_len; - iov[i].iov_len = 0; - } else { - iov[i].iov_base += rc; - iov[i].iov_len -= rc; - first_vec = i; - break; - } - } - } - - i = 0; /* in case we get ENOSPC on the next send */ - rc = 0; + /* send was at least partially successful */ + *sent += rc; + retries = 0; /* in case we get ENOSPC on the next send */ } - return rc; -} - -/** - * rqst_page_to_kvec - Turn a slot in the smb_rqst page array into a kvec - * @rqst: pointer to smb_rqst - * @idx: index into the array of the page - * @iov: pointer to struct kvec that will hold the result - * - * Helper function to convert a slot in the rqst->rq_pages array into a kvec. - * The page will be kmapped and the address placed into iov_base. The length - * will then be adjusted according to the ptailoff. - */ -void -cifs_rqst_page_to_kvec(struct smb_rqst *rqst, unsigned int idx, - struct kvec *iov) -{ - /* - * FIXME: We could avoid this kmap altogether if we used - * kernel_sendpage instead of kernel_sendmsg. That will only - * work if signing is disabled though as sendpage inlines the - * page directly into the fraglist. If userspace modifies the - * page after we calculate the signature, then the server will - * reject it and may break the connection. kernel_sendmsg does - * an extra copy of the data and avoids that issue. - */ - iov->iov_base = kmap(rqst->rq_pages[idx]); - - /* if last page, don't send beyond this offset into page */ - if (idx == (rqst->rq_npages - 1)) - iov->iov_len = rqst->rq_tailsz; - else - iov->iov_len = rqst->rq_pagesz; + return 0; } static unsigned long @@ -299,8 +228,9 @@ smb_send_rqst(struct TCP_Server_Info *server, struct smb_rqst *rqst) unsigned int smb_buf_length = get_rfc1002_length(iov[0].iov_base); unsigned long send_length; unsigned int i; - size_t total_len = 0, sent; + size_t total_len = 0, sent, size; struct socket *ssocket = server->ssocket; + struct msghdr smb_msg; int val = 1; if (ssocket == NULL) @@ -321,7 +251,13 @@ smb_send_rqst(struct TCP_Server_Info *server, struct smb_rqst *rqst) kernel_setsockopt(ssocket, SOL_TCP, TCP_CORK, (char *)&val, sizeof(val)); - rc = smb_send_kvec(server, iov, n_vec, &sent); + size = 0; + for (i = 0; i < n_vec; i++) + size += iov[i].iov_len; + + iov_iter_kvec(&smb_msg.msg_iter, WRITE | ITER_KVEC, iov, n_vec, size); + + rc = smb_send_kvec(server, &smb_msg, &sent); if (rc < 0) goto uncork; @@ -329,11 +265,16 @@ smb_send_rqst(struct TCP_Server_Info *server, struct smb_rqst *rqst) /* now walk the page array and send each page in it */ for (i = 0; i < rqst->rq_npages; i++) { - struct kvec p_iov; - - cifs_rqst_page_to_kvec(rqst, i, &p_iov); - rc = smb_send_kvec(server, &p_iov, 1, &sent); - kunmap(rqst->rq_pages[i]); + size_t len = i == rqst->rq_npages - 1 + ? rqst->rq_tailsz + : rqst->rq_pagesz; + struct bio_vec bvec = { + .bv_page = rqst->rq_pages[i], + .bv_len = len + }; + iov_iter_bvec(&smb_msg.msg_iter, WRITE | ITER_BVEC, + &bvec, 1, len); + rc = smb_send_kvec(server, &smb_msg, &sent); if (rc < 0) break; diff --git a/fs/cifs/xattr.c b/fs/cifs/xattr.c index f5dc2f0df4ad..c8b77aa24a1d 100644 --- a/fs/cifs/xattr.c +++ b/fs/cifs/xattr.c @@ -32,92 +32,24 @@ #include "cifs_unicode.h" #define MAX_EA_VALUE_SIZE 65535 -#define CIFS_XATTR_DOS_ATTRIB "user.DosAttrib" #define CIFS_XATTR_CIFS_ACL "system.cifs_acl" /* BB need to add server (Samba e.g) support for security and trusted prefix */ -int cifs_removexattr(struct dentry *direntry, const char *ea_name) -{ - int rc = -EOPNOTSUPP; -#ifdef CONFIG_CIFS_XATTR - unsigned int xid; - struct cifs_sb_info *cifs_sb; - struct tcon_link *tlink; - struct cifs_tcon *pTcon; - struct super_block *sb; - char *full_path = NULL; - - if (direntry == NULL) - return -EIO; - if (d_really_is_negative(direntry)) - return -EIO; - sb = d_inode(direntry)->i_sb; - if (sb == NULL) - return -EIO; - - cifs_sb = CIFS_SB(sb); - tlink = cifs_sb_tlink(cifs_sb); - if (IS_ERR(tlink)) - return PTR_ERR(tlink); - pTcon = tlink_tcon(tlink); - - xid = get_xid(); - - full_path = build_path_from_dentry(direntry); - if (full_path == NULL) { - rc = -ENOMEM; - goto remove_ea_exit; - } - if (ea_name == NULL) { - cifs_dbg(FYI, "Null xattr names not supported\n"); - } else if (strncmp(ea_name, XATTR_USER_PREFIX, XATTR_USER_PREFIX_LEN) - && (strncmp(ea_name, XATTR_OS2_PREFIX, XATTR_OS2_PREFIX_LEN))) { - cifs_dbg(FYI, - "illegal xattr request %s (only user namespace supported)\n", - ea_name); - /* BB what if no namespace prefix? */ - /* Should we just pass them to server, except for - system and perhaps security prefixes? */ - } else { - if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NO_XATTR) - goto remove_ea_exit; +enum { XATTR_USER, XATTR_CIFS_ACL, XATTR_ACL_ACCESS, XATTR_ACL_DEFAULT }; - ea_name += XATTR_USER_PREFIX_LEN; /* skip past user. prefix */ - if (pTcon->ses->server->ops->set_EA) - rc = pTcon->ses->server->ops->set_EA(xid, pTcon, - full_path, ea_name, NULL, (__u16)0, - cifs_sb->local_nls, cifs_remap(cifs_sb)); - } -remove_ea_exit: - kfree(full_path); - free_xid(xid); - cifs_put_tlink(tlink); -#endif - return rc; -} - -int cifs_setxattr(struct dentry *direntry, const char *ea_name, - const void *ea_value, size_t value_size, int flags) +static int cifs_xattr_set(const struct xattr_handler *handler, + struct dentry *dentry, const char *name, + const void *value, size_t size, int flags) { int rc = -EOPNOTSUPP; -#ifdef CONFIG_CIFS_XATTR unsigned int xid; - struct cifs_sb_info *cifs_sb; + struct super_block *sb = dentry->d_sb; + struct cifs_sb_info *cifs_sb = CIFS_SB(sb); struct tcon_link *tlink; struct cifs_tcon *pTcon; - struct super_block *sb; char *full_path; - if (direntry == NULL) - return -EIO; - if (d_really_is_negative(direntry)) - return -EIO; - sb = d_inode(direntry)->i_sb; - if (sb == NULL) - return -EIO; - - cifs_sb = CIFS_SB(sb); tlink = cifs_sb_tlink(cifs_sb); if (IS_ERR(tlink)) return PTR_ERR(tlink); @@ -125,10 +57,10 @@ int cifs_setxattr(struct dentry *direntry, const char *ea_name, xid = get_xid(); - full_path = build_path_from_dentry(direntry); + full_path = build_path_from_dentry(dentry); if (full_path == NULL) { rc = -ENOMEM; - goto set_ea_exit; + goto out; } /* return dos attributes as pseudo xattr */ /* return alt name if available as pseudo attr */ @@ -136,123 +68,93 @@ int cifs_setxattr(struct dentry *direntry, const char *ea_name, /* if proc/fs/cifs/streamstoxattr is set then search server for EAs or streams to returns as xattrs */ - if (value_size > MAX_EA_VALUE_SIZE) { + if (size > MAX_EA_VALUE_SIZE) { cifs_dbg(FYI, "size of EA value too large\n"); rc = -EOPNOTSUPP; - goto set_ea_exit; + goto out; } - if (ea_name == NULL) { - cifs_dbg(FYI, "Null xattr names not supported\n"); - } else if (strncmp(ea_name, XATTR_USER_PREFIX, XATTR_USER_PREFIX_LEN) - == 0) { + switch (handler->flags) { + case XATTR_USER: if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NO_XATTR) - goto set_ea_exit; - if (strncmp(ea_name, CIFS_XATTR_DOS_ATTRIB, 14) == 0) - cifs_dbg(FYI, "attempt to set cifs inode metadata\n"); + goto out; - ea_name += XATTR_USER_PREFIX_LEN; /* skip past user. prefix */ if (pTcon->ses->server->ops->set_EA) rc = pTcon->ses->server->ops->set_EA(xid, pTcon, - full_path, ea_name, ea_value, (__u16)value_size, + full_path, name, value, (__u16)size, cifs_sb->local_nls, cifs_remap(cifs_sb)); - } else if (strncmp(ea_name, XATTR_OS2_PREFIX, XATTR_OS2_PREFIX_LEN) - == 0) { - if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NO_XATTR) - goto set_ea_exit; + break; - ea_name += XATTR_OS2_PREFIX_LEN; /* skip past os2. prefix */ - if (pTcon->ses->server->ops->set_EA) - rc = pTcon->ses->server->ops->set_EA(xid, pTcon, - full_path, ea_name, ea_value, (__u16)value_size, - cifs_sb->local_nls, cifs_remap(cifs_sb)); - } else if (strncmp(ea_name, CIFS_XATTR_CIFS_ACL, - strlen(CIFS_XATTR_CIFS_ACL)) == 0) { + case XATTR_CIFS_ACL: { #ifdef CONFIG_CIFS_ACL struct cifs_ntsd *pacl; - pacl = kmalloc(value_size, GFP_KERNEL); + + if (!value) + goto out; + pacl = kmalloc(size, GFP_KERNEL); if (!pacl) { rc = -ENOMEM; } else { - memcpy(pacl, ea_value, value_size); - if (pTcon->ses->server->ops->set_acl) + memcpy(pacl, value, size); + if (value && + pTcon->ses->server->ops->set_acl) rc = pTcon->ses->server->ops->set_acl(pacl, - value_size, d_inode(direntry), + size, d_inode(dentry), full_path, CIFS_ACL_DACL); else rc = -EOPNOTSUPP; if (rc == 0) /* force revalidate of the inode */ - CIFS_I(d_inode(direntry))->time = 0; + CIFS_I(d_inode(dentry))->time = 0; kfree(pacl); } -#else - cifs_dbg(FYI, "Set CIFS ACL not supported yet\n"); #endif /* CONFIG_CIFS_ACL */ - } else { - int temp; - temp = strncmp(ea_name, XATTR_NAME_POSIX_ACL_ACCESS, - strlen(XATTR_NAME_POSIX_ACL_ACCESS)); - if (temp == 0) { + break; + } + + case XATTR_ACL_ACCESS: #ifdef CONFIG_CIFS_POSIX - if (sb->s_flags & MS_POSIXACL) - rc = CIFSSMBSetPosixACL(xid, pTcon, full_path, - ea_value, (const int)value_size, - ACL_TYPE_ACCESS, cifs_sb->local_nls, - cifs_remap(cifs_sb)); - cifs_dbg(FYI, "set POSIX ACL rc %d\n", rc); -#else - cifs_dbg(FYI, "set POSIX ACL not supported\n"); -#endif - } else if (strncmp(ea_name, XATTR_NAME_POSIX_ACL_DEFAULT, - strlen(XATTR_NAME_POSIX_ACL_DEFAULT)) == 0) { + if (!value) + goto out; + if (sb->s_flags & MS_POSIXACL) + rc = CIFSSMBSetPosixACL(xid, pTcon, full_path, + value, (const int)size, + ACL_TYPE_ACCESS, cifs_sb->local_nls, + cifs_remap(cifs_sb)); +#endif /* CONFIG_CIFS_POSIX */ + break; + + case XATTR_ACL_DEFAULT: #ifdef CONFIG_CIFS_POSIX - if (sb->s_flags & MS_POSIXACL) - rc = CIFSSMBSetPosixACL(xid, pTcon, full_path, - ea_value, (const int)value_size, - ACL_TYPE_DEFAULT, cifs_sb->local_nls, - cifs_remap(cifs_sb)); - cifs_dbg(FYI, "set POSIX default ACL rc %d\n", rc); -#else - cifs_dbg(FYI, "set default POSIX ACL not supported\n"); -#endif - } else { - cifs_dbg(FYI, "illegal xattr request %s (only user namespace supported)\n", - ea_name); - /* BB what if no namespace prefix? */ - /* Should we just pass them to server, except for - system and perhaps security prefixes? */ - } + if (!value) + goto out; + if (sb->s_flags & MS_POSIXACL) + rc = CIFSSMBSetPosixACL(xid, pTcon, full_path, + value, (const int)size, + ACL_TYPE_DEFAULT, cifs_sb->local_nls, + cifs_remap(cifs_sb)); +#endif /* CONFIG_CIFS_POSIX */ + break; } -set_ea_exit: +out: kfree(full_path); free_xid(xid); cifs_put_tlink(tlink); -#endif return rc; } -ssize_t cifs_getxattr(struct dentry *direntry, const char *ea_name, - void *ea_value, size_t buf_size) +static int cifs_xattr_get(const struct xattr_handler *handler, + struct dentry *dentry, struct inode *inode, + const char *name, void *value, size_t size) { ssize_t rc = -EOPNOTSUPP; -#ifdef CONFIG_CIFS_XATTR unsigned int xid; - struct cifs_sb_info *cifs_sb; + struct super_block *sb = dentry->d_sb; + struct cifs_sb_info *cifs_sb = CIFS_SB(sb); struct tcon_link *tlink; struct cifs_tcon *pTcon; - struct super_block *sb; char *full_path; - if (direntry == NULL) - return -EIO; - if (d_really_is_negative(direntry)) - return -EIO; - sb = d_inode(direntry)->i_sb; - if (sb == NULL) - return -EIO; - - cifs_sb = CIFS_SB(sb); tlink = cifs_sb_tlink(cifs_sb); if (IS_ERR(tlink)) return PTR_ERR(tlink); @@ -260,98 +162,72 @@ ssize_t cifs_getxattr(struct dentry *direntry, const char *ea_name, xid = get_xid(); - full_path = build_path_from_dentry(direntry); + full_path = build_path_from_dentry(dentry); if (full_path == NULL) { rc = -ENOMEM; - goto get_ea_exit; + goto out; } /* return dos attributes as pseudo xattr */ /* return alt name if available as pseudo attr */ - if (ea_name == NULL) { - cifs_dbg(FYI, "Null xattr names not supported\n"); - } else if (strncmp(ea_name, XATTR_USER_PREFIX, XATTR_USER_PREFIX_LEN) - == 0) { + switch (handler->flags) { + case XATTR_USER: if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NO_XATTR) - goto get_ea_exit; + goto out; - if (strncmp(ea_name, CIFS_XATTR_DOS_ATTRIB, 14) == 0) { - cifs_dbg(FYI, "attempt to query cifs inode metadata\n"); - /* revalidate/getattr then populate from inode */ - } /* BB add else when above is implemented */ - ea_name += XATTR_USER_PREFIX_LEN; /* skip past user. prefix */ if (pTcon->ses->server->ops->query_all_EAs) rc = pTcon->ses->server->ops->query_all_EAs(xid, pTcon, - full_path, ea_name, ea_value, buf_size, + full_path, name, value, size, cifs_sb->local_nls, cifs_remap(cifs_sb)); - } else if (strncmp(ea_name, XATTR_OS2_PREFIX, XATTR_OS2_PREFIX_LEN) == 0) { - if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NO_XATTR) - goto get_ea_exit; + break; - ea_name += XATTR_OS2_PREFIX_LEN; /* skip past os2. prefix */ - if (pTcon->ses->server->ops->query_all_EAs) - rc = pTcon->ses->server->ops->query_all_EAs(xid, pTcon, - full_path, ea_name, ea_value, buf_size, - cifs_sb->local_nls, cifs_remap(cifs_sb)); - } else if (strncmp(ea_name, XATTR_NAME_POSIX_ACL_ACCESS, - strlen(XATTR_NAME_POSIX_ACL_ACCESS)) == 0) { + case XATTR_CIFS_ACL: { +#ifdef CONFIG_CIFS_ACL + u32 acllen; + struct cifs_ntsd *pacl; + + if (pTcon->ses->server->ops->get_acl == NULL) + goto out; /* rc already EOPNOTSUPP */ + + pacl = pTcon->ses->server->ops->get_acl(cifs_sb, + inode, full_path, &acllen); + if (IS_ERR(pacl)) { + rc = PTR_ERR(pacl); + cifs_dbg(VFS, "%s: error %zd getting sec desc\n", + __func__, rc); + } else { + if (value) { + if (acllen > size) + acllen = -ERANGE; + else + memcpy(value, pacl, acllen); + } + rc = acllen; + kfree(pacl); + } +#endif /* CONFIG_CIFS_ACL */ + break; + } + + case XATTR_ACL_ACCESS: #ifdef CONFIG_CIFS_POSIX if (sb->s_flags & MS_POSIXACL) rc = CIFSSMBGetPosixACL(xid, pTcon, full_path, - ea_value, buf_size, ACL_TYPE_ACCESS, + value, size, ACL_TYPE_ACCESS, cifs_sb->local_nls, cifs_remap(cifs_sb)); -#else - cifs_dbg(FYI, "Query POSIX ACL not supported yet\n"); -#endif /* CONFIG_CIFS_POSIX */ - } else if (strncmp(ea_name, XATTR_NAME_POSIX_ACL_DEFAULT, - strlen(XATTR_NAME_POSIX_ACL_DEFAULT)) == 0) { +#endif /* CONFIG_CIFS_POSIX */ + break; + + case XATTR_ACL_DEFAULT: #ifdef CONFIG_CIFS_POSIX if (sb->s_flags & MS_POSIXACL) rc = CIFSSMBGetPosixACL(xid, pTcon, full_path, - ea_value, buf_size, ACL_TYPE_DEFAULT, + value, size, ACL_TYPE_DEFAULT, cifs_sb->local_nls, cifs_remap(cifs_sb)); -#else - cifs_dbg(FYI, "Query POSIX default ACL not supported yet\n"); -#endif /* CONFIG_CIFS_POSIX */ - } else if (strncmp(ea_name, CIFS_XATTR_CIFS_ACL, - strlen(CIFS_XATTR_CIFS_ACL)) == 0) { -#ifdef CONFIG_CIFS_ACL - u32 acllen; - struct cifs_ntsd *pacl; - - if (pTcon->ses->server->ops->get_acl == NULL) - goto get_ea_exit; /* rc already EOPNOTSUPP */ - - pacl = pTcon->ses->server->ops->get_acl(cifs_sb, - d_inode(direntry), full_path, &acllen); - if (IS_ERR(pacl)) { - rc = PTR_ERR(pacl); - cifs_dbg(VFS, "%s: error %zd getting sec desc\n", - __func__, rc); - } else { - if (ea_value) { - if (acllen > buf_size) - acllen = -ERANGE; - else - memcpy(ea_value, pacl, acllen); - } - rc = acllen; - kfree(pacl); - } -#else - cifs_dbg(FYI, "Query CIFS ACL not supported yet\n"); -#endif /* CONFIG_CIFS_ACL */ - } else if (strncmp(ea_name, - XATTR_TRUSTED_PREFIX, XATTR_TRUSTED_PREFIX_LEN) == 0) { - cifs_dbg(FYI, "Trusted xattr namespace not supported yet\n"); - } else if (strncmp(ea_name, - XATTR_SECURITY_PREFIX, XATTR_SECURITY_PREFIX_LEN) == 0) { - cifs_dbg(FYI, "Security xattr namespace not supported yet\n"); - } else - cifs_dbg(FYI, - "illegal xattr request %s (only user namespace supported)\n", - ea_name); +#endif /* CONFIG_CIFS_POSIX */ + break; + } /* We could add an additional check for streams ie if proc/fs/cifs/streamstoxattr is set then @@ -361,34 +237,22 @@ ssize_t cifs_getxattr(struct dentry *direntry, const char *ea_name, if (rc == -EINVAL) rc = -EOPNOTSUPP; -get_ea_exit: +out: kfree(full_path); free_xid(xid); cifs_put_tlink(tlink); -#endif return rc; } ssize_t cifs_listxattr(struct dentry *direntry, char *data, size_t buf_size) { ssize_t rc = -EOPNOTSUPP; -#ifdef CONFIG_CIFS_XATTR unsigned int xid; - struct cifs_sb_info *cifs_sb; + struct cifs_sb_info *cifs_sb = CIFS_SB(direntry->d_sb); struct tcon_link *tlink; struct cifs_tcon *pTcon; - struct super_block *sb; char *full_path; - if (direntry == NULL) - return -EIO; - if (d_really_is_negative(direntry)) - return -EIO; - sb = d_inode(direntry)->i_sb; - if (sb == NULL) - return -EIO; - - cifs_sb = CIFS_SB(sb); if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NO_XATTR) return -EOPNOTSUPP; @@ -419,6 +283,50 @@ list_ea_exit: kfree(full_path); free_xid(xid); cifs_put_tlink(tlink); -#endif return rc; } + +static const struct xattr_handler cifs_user_xattr_handler = { + .prefix = XATTR_USER_PREFIX, + .flags = XATTR_USER, + .get = cifs_xattr_get, + .set = cifs_xattr_set, +}; + +/* os2.* attributes are treated like user.* attributes */ +static const struct xattr_handler cifs_os2_xattr_handler = { + .prefix = XATTR_OS2_PREFIX, + .flags = XATTR_USER, + .get = cifs_xattr_get, + .set = cifs_xattr_set, +}; + +static const struct xattr_handler cifs_cifs_acl_xattr_handler = { + .name = CIFS_XATTR_CIFS_ACL, + .flags = XATTR_CIFS_ACL, + .get = cifs_xattr_get, + .set = cifs_xattr_set, +}; + +static const struct xattr_handler cifs_posix_acl_access_xattr_handler = { + .name = XATTR_NAME_POSIX_ACL_ACCESS, + .flags = XATTR_ACL_ACCESS, + .get = cifs_xattr_get, + .set = cifs_xattr_set, +}; + +static const struct xattr_handler cifs_posix_acl_default_xattr_handler = { + .name = XATTR_NAME_POSIX_ACL_DEFAULT, + .flags = XATTR_ACL_DEFAULT, + .get = cifs_xattr_get, + .set = cifs_xattr_set, +}; + +const struct xattr_handler *cifs_xattr_handlers[] = { + &cifs_user_xattr_handler, + &cifs_os2_xattr_handler, + &cifs_cifs_acl_xattr_handler, + &cifs_posix_acl_access_xattr_handler, + &cifs_posix_acl_default_xattr_handler, + NULL +}; diff --git a/fs/coda/dir.c b/fs/coda/dir.c index 42e731b8c80a..6fb8672c0892 100644 --- a/fs/coda/dir.c +++ b/fs/coda/dir.c @@ -424,16 +424,22 @@ static int coda_readdir(struct file *coda_file, struct dir_context *ctx) BUG_ON(!cfi || cfi->cfi_magic != CODA_MAGIC); host_file = cfi->cfi_container; - if (host_file->f_op->iterate) { + if (host_file->f_op->iterate || host_file->f_op->iterate_shared) { struct inode *host_inode = file_inode(host_file); - - inode_lock(host_inode); ret = -ENOENT; if (!IS_DEADDIR(host_inode)) { - ret = host_file->f_op->iterate(host_file, ctx); - file_accessed(host_file); + if (host_file->f_op->iterate_shared) { + inode_lock_shared(host_inode); + ret = host_file->f_op->iterate_shared(host_file, ctx); + file_accessed(host_file); + inode_unlock_shared(host_inode); + } else { + inode_lock(host_inode); + ret = host_file->f_op->iterate(host_file, ctx); + file_accessed(host_file); + inode_unlock(host_inode); + } } - inode_unlock(host_inode); return ret; } /* Venus: we must read Venus dirents from a file */ diff --git a/fs/compat.c b/fs/compat.c index f940cb20562f..be6e48b0a46c 100644 --- a/fs/compat.c +++ b/fs/compat.c @@ -884,7 +884,7 @@ COMPAT_SYSCALL_DEFINE3(old_readdir, unsigned int, fd, struct compat_old_linux_dirent __user *, dirent, unsigned int, count) { int error; - struct fd f = fdget(fd); + struct fd f = fdget_pos(fd); struct compat_readdir_callback buf = { .ctx.actor = compat_fillonedir, .dirent = dirent @@ -897,7 +897,7 @@ COMPAT_SYSCALL_DEFINE3(old_readdir, unsigned int, fd, if (buf.result) error = buf.result; - fdput(f); + fdput_pos(f); return error; } @@ -977,7 +977,7 @@ COMPAT_SYSCALL_DEFINE3(getdents, unsigned int, fd, if (!access_ok(VERIFY_WRITE, dirent, count)) return -EFAULT; - f = fdget(fd); + f = fdget_pos(fd); if (!f.file) return -EBADF; @@ -991,7 +991,7 @@ COMPAT_SYSCALL_DEFINE3(getdents, unsigned int, fd, else error = count - buf.count; } - fdput(f); + fdput_pos(f); return error; } @@ -1066,7 +1066,7 @@ COMPAT_SYSCALL_DEFINE3(getdents64, unsigned int, fd, if (!access_ok(VERIFY_WRITE, dirent, count)) return -EFAULT; - f = fdget(fd); + f = fdget_pos(fd); if (!f.file) return -EBADF; @@ -1081,7 +1081,7 @@ COMPAT_SYSCALL_DEFINE3(getdents64, unsigned int, fd, else error = count - buf.count; } - fdput(f); + fdput_pos(f); return error; } #endif /* __ARCH_WANT_COMPAT_SYS_GETDENTS64 */ diff --git a/fs/configfs/dir.c b/fs/configfs/dir.c index ea59c891fc53..56fb26127fef 100644 --- a/fs/configfs/dir.c +++ b/fs/configfs/dir.c @@ -494,7 +494,7 @@ out: * If there is an error, the caller will reset the flags via * configfs_detach_rollback(). */ -static int configfs_detach_prep(struct dentry *dentry, struct mutex **wait_mutex) +static int configfs_detach_prep(struct dentry *dentry, struct dentry **wait) { struct configfs_dirent *parent_sd = dentry->d_fsdata; struct configfs_dirent *sd; @@ -515,8 +515,8 @@ static int configfs_detach_prep(struct dentry *dentry, struct mutex **wait_mutex if (sd->s_type & CONFIGFS_USET_DEFAULT) { /* Abort if racing with mkdir() */ if (sd->s_type & CONFIGFS_USET_IN_MKDIR) { - if (wait_mutex) - *wait_mutex = &d_inode(sd->s_dentry)->i_mutex; + if (wait) + *wait= dget(sd->s_dentry); return -EAGAIN; } @@ -524,7 +524,7 @@ static int configfs_detach_prep(struct dentry *dentry, struct mutex **wait_mutex * Yup, recursive. If there's a problem, blame * deep nesting of default_groups */ - ret = configfs_detach_prep(sd->s_dentry, wait_mutex); + ret = configfs_detach_prep(sd->s_dentry, wait); if (!ret) continue; } else @@ -1458,7 +1458,7 @@ static int configfs_rmdir(struct inode *dir, struct dentry *dentry) * the new link is temporarily attached */ do { - struct mutex *wait_mutex; + struct dentry *wait; mutex_lock(&configfs_symlink_mutex); spin_lock(&configfs_dirent_lock); @@ -1469,7 +1469,7 @@ static int configfs_rmdir(struct inode *dir, struct dentry *dentry) */ ret = sd->s_dependent_count ? -EBUSY : 0; if (!ret) { - ret = configfs_detach_prep(dentry, &wait_mutex); + ret = configfs_detach_prep(dentry, &wait); if (ret) configfs_detach_rollback(dentry); } @@ -1483,8 +1483,9 @@ static int configfs_rmdir(struct inode *dir, struct dentry *dentry) } /* Wait until the racing operation terminates */ - mutex_lock(wait_mutex); - mutex_unlock(wait_mutex); + inode_lock(d_inode(wait)); + inode_unlock(d_inode(wait)); + dput(wait); } } while (ret == -EAGAIN); @@ -1632,11 +1633,9 @@ static int configfs_readdir(struct file *file, struct dir_context *ctx) if (!dir_emit_dots(file, ctx)) return 0; - if (ctx->pos == 2) { - spin_lock(&configfs_dirent_lock); + spin_lock(&configfs_dirent_lock); + if (ctx->pos == 2) list_move(q, &parent_sd->s_children); - spin_unlock(&configfs_dirent_lock); - } for (p = q->next; p != &parent_sd->s_children; p = p->next) { struct configfs_dirent *next; const char *name; @@ -1647,9 +1646,6 @@ static int configfs_readdir(struct file *file, struct dir_context *ctx) if (!next->s_element) continue; - name = configfs_get_name(next); - len = strlen(name); - /* * We'll have a dentry and an inode for * PINNED items and for open attribute @@ -1663,7 +1659,6 @@ static int configfs_readdir(struct file *file, struct dir_context *ctx) * they close it. Beyond that, we don't * care. */ - spin_lock(&configfs_dirent_lock); dentry = next->s_dentry; if (dentry) inode = d_inode(dentry); @@ -1673,15 +1668,18 @@ static int configfs_readdir(struct file *file, struct dir_context *ctx) if (!inode) ino = iunique(sb, 2); + name = configfs_get_name(next); + len = strlen(name); + if (!dir_emit(ctx, name, len, ino, dt_type(next))) return 0; spin_lock(&configfs_dirent_lock); list_move(q, p); - spin_unlock(&configfs_dirent_lock); p = q; ctx->pos++; } + spin_unlock(&configfs_dirent_lock); return 0; } @@ -1689,7 +1687,6 @@ static loff_t configfs_dir_lseek(struct file *file, loff_t offset, int whence) { struct dentry * dentry = file->f_path.dentry; - inode_lock(d_inode(dentry)); switch (whence) { case 1: offset += file->f_pos; @@ -1697,7 +1694,6 @@ static loff_t configfs_dir_lseek(struct file *file, loff_t offset, int whence) if (offset >= 0) break; default: - inode_unlock(d_inode(dentry)); return -EINVAL; } if (offset != file->f_pos) { @@ -1723,7 +1719,6 @@ static loff_t configfs_dir_lseek(struct file *file, loff_t offset, int whence) spin_unlock(&configfs_dirent_lock); } } - inode_unlock(d_inode(dentry)); return offset; } @@ -1732,7 +1727,7 @@ const struct file_operations configfs_dir_operations = { .release = configfs_dir_close, .llseek = configfs_dir_lseek, .read = generic_read_dir, - .iterate = configfs_readdir, + .iterate_shared = configfs_readdir, }; /** diff --git a/fs/configfs/inode.c b/fs/configfs/inode.c index 03d124ae27d7..0387968e6f47 100644 --- a/fs/configfs/inode.c +++ b/fs/configfs/inode.c @@ -156,7 +156,7 @@ static void configfs_set_inode_lock_class(struct configfs_dirent *sd, if (depth > 0) { if (depth <= ARRAY_SIZE(default_group_class)) { - lockdep_set_class(&inode->i_mutex, + lockdep_set_class(&inode->i_rwsem, &default_group_class[depth - 1]); } else { /* diff --git a/fs/coredump.c b/fs/coredump.c index 47c32c3bfa1d..38a7ab87e10a 100644 --- a/fs/coredump.c +++ b/fs/coredump.c @@ -413,7 +413,9 @@ static int coredump_wait(int exit_code, struct core_state *core_state) core_state->dumper.task = tsk; core_state->dumper.next = NULL; - down_write(&mm->mmap_sem); + if (down_write_killable(&mm->mmap_sem)) + return -EINTR; + if (!mm->core_state) core_waiters = zap_threads(tsk, mm, core_state, exit_code); up_write(&mm->mmap_sem); @@ -803,12 +805,9 @@ int dump_skip(struct coredump_params *cprm, size_t nr) static char zeroes[PAGE_SIZE]; struct file *file = cprm->file; if (file->f_op->llseek && file->f_op->llseek != no_llseek) { - if (cprm->written + nr > cprm->limit) - return 0; if (dump_interrupted() || file->f_op->llseek(file, nr, SEEK_CUR) < 0) return 0; - cprm->written += nr; return 1; } else { while (nr > PAGE_SIZE) { @@ -823,7 +822,7 @@ EXPORT_SYMBOL(dump_skip); int dump_align(struct coredump_params *cprm, int align) { - unsigned mod = cprm->written & (align - 1); + unsigned mod = cprm->file->f_pos & (align - 1); if (align & (align - 1)) return 0; return mod ? dump_skip(cprm, align - mod) : 1; diff --git a/fs/cramfs/inode.c b/fs/cramfs/inode.c index 3a32ddf98095..7919967488cb 100644 --- a/fs/cramfs/inode.c +++ b/fs/cramfs/inode.c @@ -561,7 +561,7 @@ static const struct address_space_operations cramfs_aops = { static const struct file_operations cramfs_directory_operations = { .llseek = generic_file_llseek, .read = generic_read_dir, - .iterate = cramfs_readdir, + .iterate_shared = cramfs_readdir, }; static const struct inode_operations cramfs_dir_inode_operations = { diff --git a/fs/crypto/keyinfo.c b/fs/crypto/keyinfo.c index 06f5aa478bf2..1ac263eddc4e 100644 --- a/fs/crypto/keyinfo.c +++ b/fs/crypto/keyinfo.c @@ -78,6 +78,67 @@ out: return res; } +static int validate_user_key(struct fscrypt_info *crypt_info, + struct fscrypt_context *ctx, u8 *raw_key, + u8 *prefix, int prefix_size) +{ + u8 *full_key_descriptor; + struct key *keyring_key; + struct fscrypt_key *master_key; + const struct user_key_payload *ukp; + int full_key_len = prefix_size + (FS_KEY_DESCRIPTOR_SIZE * 2) + 1; + int res; + + full_key_descriptor = kmalloc(full_key_len, GFP_NOFS); + if (!full_key_descriptor) + return -ENOMEM; + + memcpy(full_key_descriptor, prefix, prefix_size); + sprintf(full_key_descriptor + prefix_size, + "%*phN", FS_KEY_DESCRIPTOR_SIZE, + ctx->master_key_descriptor); + full_key_descriptor[full_key_len - 1] = '\0'; + keyring_key = request_key(&key_type_logon, full_key_descriptor, NULL); + kfree(full_key_descriptor); + if (IS_ERR(keyring_key)) + return PTR_ERR(keyring_key); + + if (keyring_key->type != &key_type_logon) { + printk_once(KERN_WARNING + "%s: key type must be logon\n", __func__); + res = -ENOKEY; + goto out; + } + down_read(&keyring_key->sem); + ukp = user_key_payload(keyring_key); + if (ukp->datalen != sizeof(struct fscrypt_key)) { + res = -EINVAL; + up_read(&keyring_key->sem); + goto out; + } + master_key = (struct fscrypt_key *)ukp->data; + BUILD_BUG_ON(FS_AES_128_ECB_KEY_SIZE != FS_KEY_DERIVATION_NONCE_SIZE); + + if (master_key->size != FS_AES_256_XTS_KEY_SIZE) { + printk_once(KERN_WARNING + "%s: key size incorrect: %d\n", + __func__, master_key->size); + res = -ENOKEY; + up_read(&keyring_key->sem); + goto out; + } + res = derive_key_aes(ctx->nonce, master_key->raw, raw_key); + up_read(&keyring_key->sem); + if (res) + goto out; + + crypt_info->ci_keyring_key = keyring_key; + return 0; +out: + key_put(keyring_key); + return res; +} + static void put_crypt_info(struct fscrypt_info *ci) { if (!ci) @@ -91,12 +152,7 @@ static void put_crypt_info(struct fscrypt_info *ci) int get_crypt_info(struct inode *inode) { struct fscrypt_info *crypt_info; - u8 full_key_descriptor[FS_KEY_DESC_PREFIX_SIZE + - (FS_KEY_DESCRIPTOR_SIZE * 2) + 1]; - struct key *keyring_key = NULL; - struct fscrypt_key *master_key; struct fscrypt_context ctx; - const struct user_key_payload *ukp; struct crypto_skcipher *ctfm; const char *cipher_str; u8 raw_key[FS_MAX_KEY_SIZE]; @@ -167,48 +223,24 @@ retry: memset(raw_key, 0x42, FS_AES_256_XTS_KEY_SIZE); goto got_key; } - memcpy(full_key_descriptor, FS_KEY_DESC_PREFIX, - FS_KEY_DESC_PREFIX_SIZE); - sprintf(full_key_descriptor + FS_KEY_DESC_PREFIX_SIZE, - "%*phN", FS_KEY_DESCRIPTOR_SIZE, - ctx.master_key_descriptor); - full_key_descriptor[FS_KEY_DESC_PREFIX_SIZE + - (2 * FS_KEY_DESCRIPTOR_SIZE)] = '\0'; - keyring_key = request_key(&key_type_logon, full_key_descriptor, NULL); - if (IS_ERR(keyring_key)) { - res = PTR_ERR(keyring_key); - keyring_key = NULL; - goto out; - } - crypt_info->ci_keyring_key = keyring_key; - if (keyring_key->type != &key_type_logon) { - printk_once(KERN_WARNING - "%s: key type must be logon\n", __func__); - res = -ENOKEY; - goto out; - } - down_read(&keyring_key->sem); - ukp = user_key_payload(keyring_key); - if (ukp->datalen != sizeof(struct fscrypt_key)) { - res = -EINVAL; - up_read(&keyring_key->sem); - goto out; - } - master_key = (struct fscrypt_key *)ukp->data; - BUILD_BUG_ON(FS_AES_128_ECB_KEY_SIZE != FS_KEY_DERIVATION_NONCE_SIZE); - if (master_key->size != FS_AES_256_XTS_KEY_SIZE) { - printk_once(KERN_WARNING - "%s: key size incorrect: %d\n", - __func__, master_key->size); - res = -ENOKEY; - up_read(&keyring_key->sem); + res = validate_user_key(crypt_info, &ctx, raw_key, + FS_KEY_DESC_PREFIX, FS_KEY_DESC_PREFIX_SIZE); + if (res && inode->i_sb->s_cop->key_prefix) { + u8 *prefix = NULL; + int prefix_size, res2; + + prefix_size = inode->i_sb->s_cop->key_prefix(inode, &prefix); + res2 = validate_user_key(crypt_info, &ctx, raw_key, + prefix, prefix_size); + if (res2) { + if (res2 == -ENOKEY) + res = -ENOKEY; + goto out; + } + } else if (res) { goto out; } - res = derive_key_aes(ctx.nonce, master_key->raw, raw_key); - up_read(&keyring_key->sem); - if (res) - goto out; got_key: ctfm = crypto_alloc_skcipher(cipher_str, 0, 0); if (!ctfm || IS_ERR(ctfm)) { @@ -32,6 +32,15 @@ #include <linux/pfn_t.h> #include <linux/sizes.h> +#define RADIX_DAX_MASK 0xf +#define RADIX_DAX_SHIFT 4 +#define RADIX_DAX_PTE (0x4 | RADIX_TREE_EXCEPTIONAL_ENTRY) +#define RADIX_DAX_PMD (0x8 | RADIX_TREE_EXCEPTIONAL_ENTRY) +#define RADIX_DAX_TYPE(entry) ((unsigned long)entry & RADIX_DAX_MASK) +#define RADIX_DAX_SECTOR(entry) (((unsigned long)entry >> RADIX_DAX_SHIFT)) +#define RADIX_DAX_ENTRY(sector, pmd) ((void *)((unsigned long)sector << \ + RADIX_DAX_SHIFT | (pmd ? RADIX_DAX_PMD : RADIX_DAX_PTE))) + static long dax_map_atomic(struct block_device *bdev, struct blk_dax_ctl *dax) { struct request_queue *q = bdev->bd_queue; @@ -244,7 +253,6 @@ static ssize_t dax_io(struct inode *inode, struct iov_iter *iter, * @iocb: The control block for this I/O * @inode: The file which the I/O is directed at * @iter: The addresses to do I/O from or to - * @pos: The file offset where the I/O starts * @get_block: The filesystem method used to translate file offsets to blocks * @end_io: A filesystem callback for I/O completion * @flags: See below @@ -257,11 +265,12 @@ static ssize_t dax_io(struct inode *inode, struct iov_iter *iter, * is in progress. */ ssize_t dax_do_io(struct kiocb *iocb, struct inode *inode, - struct iov_iter *iter, loff_t pos, get_block_t get_block, + struct iov_iter *iter, get_block_t get_block, dio_iodone_t end_io, int flags) { struct buffer_head bh; ssize_t retval = -EINVAL; + loff_t pos = iocb->ki_pos; loff_t end = pos + iov_iter_count(iter); memset(&bh, 0, sizeof(bh)); diff --git a/fs/dcache.c b/fs/dcache.c index d5ecc6e477da..c622872c12c5 100644 --- a/fs/dcache.c +++ b/fs/dcache.c @@ -111,6 +111,17 @@ static inline struct hlist_bl_head *d_hash(const struct dentry *parent, return dentry_hashtable + hash_32(hash, d_hash_shift); } +#define IN_LOOKUP_SHIFT 10 +static struct hlist_bl_head in_lookup_hashtable[1 << IN_LOOKUP_SHIFT]; + +static inline struct hlist_bl_head *in_lookup_hash(const struct dentry *parent, + unsigned int hash) +{ + hash += (unsigned long) parent / L1_CACHE_BYTES; + return in_lookup_hashtable + hash_32(hash, IN_LOOKUP_SHIFT); +} + + /* Statistics gathering. */ struct dentry_stat_t dentry_stat = { .age_limit = 45, @@ -761,6 +772,8 @@ repeat: /* Slow case: now with the dentry lock held */ rcu_read_unlock(); + WARN_ON(d_in_lookup(dentry)); + /* Unreachable? Get rid of it */ if (unlikely(d_unhashed(dentry))) goto kill_it; @@ -1558,7 +1571,11 @@ struct dentry *__d_alloc(struct super_block *sb, const struct qstr *name) * be overwriting an internal NUL character */ dentry->d_iname[DNAME_INLINE_LEN-1] = 0; - if (name->len > DNAME_INLINE_LEN-1) { + if (unlikely(!name)) { + static const struct qstr anon = QSTR_INIT("/", 1); + name = &anon; + dname = dentry->d_iname; + } else if (name->len > DNAME_INLINE_LEN-1) { size_t size = offsetof(struct external_name, name[1]); struct external_name *p = kmalloc(size + name->len, GFP_KERNEL_ACCOUNT); @@ -1746,6 +1763,7 @@ type_determined: static void __d_instantiate(struct dentry *dentry, struct inode *inode) { unsigned add_flags = d_flags_for_inode(inode); + WARN_ON(d_in_lookup(dentry)); spin_lock(&dentry->d_lock); hlist_add_head(&dentry->d_u.d_alias, &inode->i_dentry); @@ -1775,11 +1793,11 @@ void d_instantiate(struct dentry *entry, struct inode * inode) { BUG_ON(!hlist_unhashed(&entry->d_u.d_alias)); if (inode) { + security_d_instantiate(entry, inode); spin_lock(&inode->i_lock); __d_instantiate(entry, inode); spin_unlock(&inode->i_lock); } - security_d_instantiate(entry, inode); } EXPORT_SYMBOL(d_instantiate); @@ -1796,6 +1814,7 @@ int d_instantiate_no_diralias(struct dentry *entry, struct inode *inode) { BUG_ON(!hlist_unhashed(&entry->d_u.d_alias)); + security_d_instantiate(entry, inode); spin_lock(&inode->i_lock); if (S_ISDIR(inode->i_mode) && !hlist_empty(&inode->i_dentry)) { spin_unlock(&inode->i_lock); @@ -1804,7 +1823,6 @@ int d_instantiate_no_diralias(struct dentry *entry, struct inode *inode) } __d_instantiate(entry, inode); spin_unlock(&inode->i_lock); - security_d_instantiate(entry, inode); return 0; } @@ -1815,9 +1833,7 @@ struct dentry *d_make_root(struct inode *root_inode) struct dentry *res = NULL; if (root_inode) { - static const struct qstr name = QSTR_INIT("/", 1); - - res = __d_alloc(root_inode->i_sb, &name); + res = __d_alloc(root_inode->i_sb, NULL); if (res) d_instantiate(res, root_inode); else @@ -1858,7 +1874,6 @@ EXPORT_SYMBOL(d_find_any_alias); static struct dentry *__d_obtain_alias(struct inode *inode, int disconnected) { - static const struct qstr anonstring = QSTR_INIT("/", 1); struct dentry *tmp; struct dentry *res; unsigned add_flags; @@ -1872,12 +1887,13 @@ static struct dentry *__d_obtain_alias(struct inode *inode, int disconnected) if (res) goto out_iput; - tmp = __d_alloc(inode->i_sb, &anonstring); + tmp = __d_alloc(inode->i_sb, NULL); if (!tmp) { res = ERR_PTR(-ENOMEM); goto out_iput; } + security_d_instantiate(tmp, inode); spin_lock(&inode->i_lock); res = __d_find_any_alias(inode); if (res) { @@ -1900,13 +1916,10 @@ static struct dentry *__d_obtain_alias(struct inode *inode, int disconnected) hlist_bl_unlock(&tmp->d_sb->s_anon); spin_unlock(&tmp->d_lock); spin_unlock(&inode->i_lock); - security_d_instantiate(tmp, inode); return tmp; out_iput: - if (res && !IS_ERR(res)) - security_d_instantiate(res, inode); iput(inode); return res; } @@ -1975,28 +1988,36 @@ EXPORT_SYMBOL(d_obtain_root); struct dentry *d_add_ci(struct dentry *dentry, struct inode *inode, struct qstr *name) { - struct dentry *found; - struct dentry *new; + struct dentry *found, *res; /* * First check if a dentry matching the name already exists, * if not go ahead and create it now. */ found = d_hash_and_lookup(dentry->d_parent, name); - if (!found) { - new = d_alloc(dentry->d_parent, name); - if (!new) { - found = ERR_PTR(-ENOMEM); - } else { - found = d_splice_alias(inode, new); - if (found) { - dput(new); - return found; - } - return new; + if (found) { + iput(inode); + return found; + } + if (d_in_lookup(dentry)) { + found = d_alloc_parallel(dentry->d_parent, name, + dentry->d_wait); + if (IS_ERR(found) || !d_in_lookup(found)) { + iput(inode); + return found; } + } else { + found = d_alloc(dentry->d_parent, name); + if (!found) { + iput(inode); + return ERR_PTR(-ENOMEM); + } + } + res = d_splice_alias(inode, found); + if (res) { + dput(found); + return res; } - iput(inode); return found; } EXPORT_SYMBOL(d_add_ci); @@ -2363,17 +2384,194 @@ void d_rehash(struct dentry * entry) } EXPORT_SYMBOL(d_rehash); +static inline unsigned start_dir_add(struct inode *dir) +{ + + for (;;) { + unsigned n = dir->i_dir_seq; + if (!(n & 1) && cmpxchg(&dir->i_dir_seq, n, n + 1) == n) + return n; + cpu_relax(); + } +} + +static inline void end_dir_add(struct inode *dir, unsigned n) +{ + smp_store_release(&dir->i_dir_seq, n + 2); +} + +static void d_wait_lookup(struct dentry *dentry) +{ + if (d_in_lookup(dentry)) { + DECLARE_WAITQUEUE(wait, current); + add_wait_queue(dentry->d_wait, &wait); + do { + set_current_state(TASK_UNINTERRUPTIBLE); + spin_unlock(&dentry->d_lock); + schedule(); + spin_lock(&dentry->d_lock); + } while (d_in_lookup(dentry)); + } +} + +struct dentry *d_alloc_parallel(struct dentry *parent, + const struct qstr *name, + wait_queue_head_t *wq) +{ + unsigned int len = name->len; + unsigned int hash = name->hash; + const unsigned char *str = name->name; + struct hlist_bl_head *b = in_lookup_hash(parent, hash); + struct hlist_bl_node *node; + struct dentry *new = d_alloc(parent, name); + struct dentry *dentry; + unsigned seq, r_seq, d_seq; + + if (unlikely(!new)) + return ERR_PTR(-ENOMEM); + +retry: + rcu_read_lock(); + seq = smp_load_acquire(&parent->d_inode->i_dir_seq) & ~1; + r_seq = read_seqbegin(&rename_lock); + dentry = __d_lookup_rcu(parent, name, &d_seq); + if (unlikely(dentry)) { + if (!lockref_get_not_dead(&dentry->d_lockref)) { + rcu_read_unlock(); + goto retry; + } + if (read_seqcount_retry(&dentry->d_seq, d_seq)) { + rcu_read_unlock(); + dput(dentry); + goto retry; + } + rcu_read_unlock(); + dput(new); + return dentry; + } + if (unlikely(read_seqretry(&rename_lock, r_seq))) { + rcu_read_unlock(); + goto retry; + } + hlist_bl_lock(b); + if (unlikely(parent->d_inode->i_dir_seq != seq)) { + hlist_bl_unlock(b); + rcu_read_unlock(); + goto retry; + } + rcu_read_unlock(); + /* + * No changes for the parent since the beginning of d_lookup(). + * Since all removals from the chain happen with hlist_bl_lock(), + * any potential in-lookup matches are going to stay here until + * we unlock the chain. All fields are stable in everything + * we encounter. + */ + hlist_bl_for_each_entry(dentry, node, b, d_u.d_in_lookup_hash) { + if (dentry->d_name.hash != hash) + continue; + if (dentry->d_parent != parent) + continue; + if (d_unhashed(dentry)) + continue; + if (parent->d_flags & DCACHE_OP_COMPARE) { + int tlen = dentry->d_name.len; + const char *tname = dentry->d_name.name; + if (parent->d_op->d_compare(parent, dentry, tlen, tname, name)) + continue; + } else { + if (dentry->d_name.len != len) + continue; + if (dentry_cmp(dentry, str, len)) + continue; + } + dget(dentry); + hlist_bl_unlock(b); + /* somebody is doing lookup for it right now; wait for it */ + spin_lock(&dentry->d_lock); + d_wait_lookup(dentry); + /* + * it's not in-lookup anymore; in principle we should repeat + * everything from dcache lookup, but it's likely to be what + * d_lookup() would've found anyway. If it is, just return it; + * otherwise we really have to repeat the whole thing. + */ + if (unlikely(dentry->d_name.hash != hash)) + goto mismatch; + if (unlikely(dentry->d_parent != parent)) + goto mismatch; + if (unlikely(d_unhashed(dentry))) + goto mismatch; + if (parent->d_flags & DCACHE_OP_COMPARE) { + int tlen = dentry->d_name.len; + const char *tname = dentry->d_name.name; + if (parent->d_op->d_compare(parent, dentry, tlen, tname, name)) + goto mismatch; + } else { + if (unlikely(dentry->d_name.len != len)) + goto mismatch; + if (unlikely(dentry_cmp(dentry, str, len))) + goto mismatch; + } + /* OK, it *is* a hashed match; return it */ + spin_unlock(&dentry->d_lock); + dput(new); + return dentry; + } + /* we can't take ->d_lock here; it's OK, though. */ + new->d_flags |= DCACHE_PAR_LOOKUP; + new->d_wait = wq; + hlist_bl_add_head_rcu(&new->d_u.d_in_lookup_hash, b); + hlist_bl_unlock(b); + return new; +mismatch: + spin_unlock(&dentry->d_lock); + dput(dentry); + goto retry; +} +EXPORT_SYMBOL(d_alloc_parallel); + +void __d_lookup_done(struct dentry *dentry) +{ + struct hlist_bl_head *b = in_lookup_hash(dentry->d_parent, + dentry->d_name.hash); + hlist_bl_lock(b); + dentry->d_flags &= ~DCACHE_PAR_LOOKUP; + __hlist_bl_del(&dentry->d_u.d_in_lookup_hash); + wake_up_all(dentry->d_wait); + dentry->d_wait = NULL; + hlist_bl_unlock(b); + INIT_HLIST_NODE(&dentry->d_u.d_alias); + INIT_LIST_HEAD(&dentry->d_lru); +} +EXPORT_SYMBOL(__d_lookup_done); /* inode->i_lock held if inode is non-NULL */ static inline void __d_add(struct dentry *dentry, struct inode *inode) { + struct inode *dir = NULL; + unsigned n; + spin_lock(&dentry->d_lock); + if (unlikely(d_in_lookup(dentry))) { + dir = dentry->d_parent->d_inode; + n = start_dir_add(dir); + __d_lookup_done(dentry); + } if (inode) { - __d_instantiate(dentry, inode); + unsigned add_flags = d_flags_for_inode(inode); + hlist_add_head(&dentry->d_u.d_alias, &inode->i_dentry); + raw_write_seqcount_begin(&dentry->d_seq); + __d_set_inode_and_type(dentry, inode, add_flags); + raw_write_seqcount_end(&dentry->d_seq); + __fsnotify_d_instantiate(dentry); + } + _d_rehash(dentry); + if (dir) + end_dir_add(dir, n); + spin_unlock(&dentry->d_lock); + if (inode) spin_unlock(&inode->i_lock); - } - security_d_instantiate(dentry, inode); - d_rehash(dentry); } /** @@ -2387,8 +2585,10 @@ static inline void __d_add(struct dentry *dentry, struct inode *inode) void d_add(struct dentry *entry, struct inode *inode) { - if (inode) + if (inode) { + security_d_instantiate(entry, inode); spin_lock(&inode->i_lock); + } __d_add(entry, inode); } EXPORT_SYMBOL(d_add); @@ -2598,6 +2798,8 @@ static void dentry_unlock_for_move(struct dentry *dentry, struct dentry *target) static void __d_move(struct dentry *dentry, struct dentry *target, bool exchange) { + struct inode *dir = NULL; + unsigned n; if (!dentry->d_inode) printk(KERN_WARNING "VFS: moving negative dcache entry\n"); @@ -2605,6 +2807,11 @@ static void __d_move(struct dentry *dentry, struct dentry *target, BUG_ON(d_ancestor(target, dentry)); dentry_lock_for_move(dentry, target); + if (unlikely(d_in_lookup(target))) { + dir = target->d_parent->d_inode; + n = start_dir_add(dir); + __d_lookup_done(target); + } write_seqcount_begin(&dentry->d_seq); write_seqcount_begin_nested(&target->d_seq, DENTRY_D_LOCK_NESTED); @@ -2654,6 +2861,8 @@ static void __d_move(struct dentry *dentry, struct dentry *target, write_seqcount_end(&target->d_seq); write_seqcount_end(&dentry->d_seq); + if (dir) + end_dir_add(dir, n); dentry_unlock_for_move(dentry, target); } @@ -2724,7 +2933,8 @@ struct dentry *d_ancestor(struct dentry *p1, struct dentry *p2) static int __d_unalias(struct inode *inode, struct dentry *dentry, struct dentry *alias) { - struct mutex *m1 = NULL, *m2 = NULL; + struct mutex *m1 = NULL; + struct rw_semaphore *m2 = NULL; int ret = -ESTALE; /* If alias and dentry share a parent, then no extra locks required */ @@ -2735,15 +2945,15 @@ static int __d_unalias(struct inode *inode, if (!mutex_trylock(&dentry->d_sb->s_vfs_rename_mutex)) goto out_err; m1 = &dentry->d_sb->s_vfs_rename_mutex; - if (!inode_trylock(alias->d_parent->d_inode)) + if (!inode_trylock_shared(alias->d_parent->d_inode)) goto out_err; - m2 = &alias->d_parent->d_inode->i_mutex; + m2 = &alias->d_parent->d_inode->i_rwsem; out_unalias: __d_move(alias, dentry, false); ret = 0; out_err: if (m2) - mutex_unlock(m2); + up_read(m2); if (m1) mutex_unlock(m1); return ret; @@ -2782,6 +2992,7 @@ struct dentry *d_splice_alias(struct inode *inode, struct dentry *dentry) if (!inode) goto out; + security_d_instantiate(dentry, inode); spin_lock(&inode->i_lock); if (S_ISDIR(inode->i_mode)) { struct dentry *new = __d_find_any_alias(inode); @@ -2809,7 +3020,6 @@ struct dentry *d_splice_alias(struct inode *inode, struct dentry *dentry) } else { __d_move(new, dentry, false); write_sequnlock(&rename_lock); - security_d_instantiate(new, inode); } iput(inode); return new; diff --git a/fs/debugfs/file.c b/fs/debugfs/file.c index d2ba12e23ed9..9c1c9a01b7e5 100644 --- a/fs/debugfs/file.c +++ b/fs/debugfs/file.c @@ -22,6 +22,12 @@ #include <linux/slab.h> #include <linux/atomic.h> #include <linux/device.h> +#include <linux/srcu.h> +#include <asm/poll.h> + +#include "internal.h" + +struct poll_table_struct; static ssize_t default_read_file(struct file *file, char __user *buf, size_t count, loff_t *ppos) @@ -35,27 +41,293 @@ static ssize_t default_write_file(struct file *file, const char __user *buf, return count; } -const struct file_operations debugfs_file_operations = { +const struct file_operations debugfs_noop_file_operations = { .read = default_read_file, .write = default_write_file, .open = simple_open, .llseek = noop_llseek, }; -static struct dentry *debugfs_create_mode(const char *name, umode_t mode, - struct dentry *parent, void *value, - const struct file_operations *fops, - const struct file_operations *fops_ro, - const struct file_operations *fops_wo) +/** + * debugfs_use_file_start - mark the beginning of file data access + * @dentry: the dentry object whose data is being accessed. + * @srcu_idx: a pointer to some memory to store a SRCU index in. + * + * Up to a matching call to debugfs_use_file_finish(), any + * successive call into the file removing functions debugfs_remove() + * and debugfs_remove_recursive() will block. Since associated private + * file data may only get freed after a successful return of any of + * the removal functions, you may safely access it after a successful + * call to debugfs_use_file_start() without worrying about + * lifetime issues. + * + * If -%EIO is returned, the file has already been removed and thus, + * it is not safe to access any of its data. If, on the other hand, + * it is allowed to access the file data, zero is returned. + * + * Regardless of the return code, any call to + * debugfs_use_file_start() must be followed by a matching call + * to debugfs_use_file_finish(). + */ +int debugfs_use_file_start(const struct dentry *dentry, int *srcu_idx) + __acquires(&debugfs_srcu) +{ + *srcu_idx = srcu_read_lock(&debugfs_srcu); + barrier(); + if (d_unlinked(dentry)) + return -EIO; + return 0; +} +EXPORT_SYMBOL_GPL(debugfs_use_file_start); + +/** + * debugfs_use_file_finish - mark the end of file data access + * @srcu_idx: the SRCU index "created" by a former call to + * debugfs_use_file_start(). + * + * Allow any ongoing concurrent call into debugfs_remove() or + * debugfs_remove_recursive() blocked by a former call to + * debugfs_use_file_start() to proceed and return to its caller. + */ +void debugfs_use_file_finish(int srcu_idx) __releases(&debugfs_srcu) +{ + srcu_read_unlock(&debugfs_srcu, srcu_idx); +} +EXPORT_SYMBOL_GPL(debugfs_use_file_finish); + +#define F_DENTRY(filp) ((filp)->f_path.dentry) + +#define REAL_FOPS_DEREF(dentry) \ + ((const struct file_operations *)(dentry)->d_fsdata) + +static int open_proxy_open(struct inode *inode, struct file *filp) +{ + const struct dentry *dentry = F_DENTRY(filp); + const struct file_operations *real_fops = NULL; + int srcu_idx, r; + + r = debugfs_use_file_start(dentry, &srcu_idx); + if (r) { + r = -ENOENT; + goto out; + } + + real_fops = REAL_FOPS_DEREF(dentry); + real_fops = fops_get(real_fops); + if (!real_fops) { + /* Huh? Module did not clean up after itself at exit? */ + WARN(1, "debugfs file owner did not clean up at exit: %pd", + dentry); + r = -ENXIO; + goto out; + } + replace_fops(filp, real_fops); + + if (real_fops->open) + r = real_fops->open(inode, filp); + +out: + fops_put(real_fops); + debugfs_use_file_finish(srcu_idx); + return r; +} + +const struct file_operations debugfs_open_proxy_file_operations = { + .open = open_proxy_open, +}; + +#define PROTO(args...) args +#define ARGS(args...) args + +#define FULL_PROXY_FUNC(name, ret_type, filp, proto, args) \ +static ret_type full_proxy_ ## name(proto) \ +{ \ + const struct dentry *dentry = F_DENTRY(filp); \ + const struct file_operations *real_fops = \ + REAL_FOPS_DEREF(dentry); \ + int srcu_idx; \ + ret_type r; \ + \ + r = debugfs_use_file_start(dentry, &srcu_idx); \ + if (likely(!r)) \ + r = real_fops->name(args); \ + debugfs_use_file_finish(srcu_idx); \ + return r; \ +} + +FULL_PROXY_FUNC(llseek, loff_t, filp, + PROTO(struct file *filp, loff_t offset, int whence), + ARGS(filp, offset, whence)); + +FULL_PROXY_FUNC(read, ssize_t, filp, + PROTO(struct file *filp, char __user *buf, size_t size, + loff_t *ppos), + ARGS(filp, buf, size, ppos)); + +FULL_PROXY_FUNC(write, ssize_t, filp, + PROTO(struct file *filp, const char __user *buf, size_t size, + loff_t *ppos), + ARGS(filp, buf, size, ppos)); + +FULL_PROXY_FUNC(unlocked_ioctl, long, filp, + PROTO(struct file *filp, unsigned int cmd, unsigned long arg), + ARGS(filp, cmd, arg)); + +static unsigned int full_proxy_poll(struct file *filp, + struct poll_table_struct *wait) +{ + const struct dentry *dentry = F_DENTRY(filp); + const struct file_operations *real_fops = REAL_FOPS_DEREF(dentry); + int srcu_idx; + unsigned int r = 0; + + if (debugfs_use_file_start(dentry, &srcu_idx)) { + debugfs_use_file_finish(srcu_idx); + return POLLHUP; + } + + r = real_fops->poll(filp, wait); + debugfs_use_file_finish(srcu_idx); + return r; +} + +static int full_proxy_release(struct inode *inode, struct file *filp) +{ + const struct dentry *dentry = F_DENTRY(filp); + const struct file_operations *real_fops = REAL_FOPS_DEREF(dentry); + const struct file_operations *proxy_fops = filp->f_op; + int r = 0; + + /* + * We must not protect this against removal races here: the + * original releaser should be called unconditionally in order + * not to leak any resources. Releasers must not assume that + * ->i_private is still being meaningful here. + */ + if (real_fops->release) + r = real_fops->release(inode, filp); + + replace_fops(filp, d_inode(dentry)->i_fop); + kfree((void *)proxy_fops); + fops_put(real_fops); + return 0; +} + +static void __full_proxy_fops_init(struct file_operations *proxy_fops, + const struct file_operations *real_fops) +{ + proxy_fops->release = full_proxy_release; + if (real_fops->llseek) + proxy_fops->llseek = full_proxy_llseek; + if (real_fops->read) + proxy_fops->read = full_proxy_read; + if (real_fops->write) + proxy_fops->write = full_proxy_write; + if (real_fops->poll) + proxy_fops->poll = full_proxy_poll; + if (real_fops->unlocked_ioctl) + proxy_fops->unlocked_ioctl = full_proxy_unlocked_ioctl; +} + +static int full_proxy_open(struct inode *inode, struct file *filp) +{ + const struct dentry *dentry = F_DENTRY(filp); + const struct file_operations *real_fops = NULL; + struct file_operations *proxy_fops = NULL; + int srcu_idx, r; + + r = debugfs_use_file_start(dentry, &srcu_idx); + if (r) { + r = -ENOENT; + goto out; + } + + real_fops = REAL_FOPS_DEREF(dentry); + real_fops = fops_get(real_fops); + if (!real_fops) { + /* Huh? Module did not cleanup after itself at exit? */ + WARN(1, "debugfs file owner did not clean up at exit: %pd", + dentry); + r = -ENXIO; + goto out; + } + + proxy_fops = kzalloc(sizeof(*proxy_fops), GFP_KERNEL); + if (!proxy_fops) { + r = -ENOMEM; + goto free_proxy; + } + __full_proxy_fops_init(proxy_fops, real_fops); + replace_fops(filp, proxy_fops); + + if (real_fops->open) { + r = real_fops->open(inode, filp); + + if (filp->f_op != proxy_fops) { + /* No protection against file removal anymore. */ + WARN(1, "debugfs file owner replaced proxy fops: %pd", + dentry); + goto free_proxy; + } + } + + goto out; +free_proxy: + kfree(proxy_fops); + fops_put(real_fops); +out: + debugfs_use_file_finish(srcu_idx); + return r; +} + +const struct file_operations debugfs_full_proxy_file_operations = { + .open = full_proxy_open, +}; + +ssize_t debugfs_attr_read(struct file *file, char __user *buf, + size_t len, loff_t *ppos) +{ + ssize_t ret; + int srcu_idx; + + ret = debugfs_use_file_start(F_DENTRY(file), &srcu_idx); + if (likely(!ret)) + ret = simple_attr_read(file, buf, len, ppos); + debugfs_use_file_finish(srcu_idx); + return ret; +} +EXPORT_SYMBOL_GPL(debugfs_attr_read); + +ssize_t debugfs_attr_write(struct file *file, const char __user *buf, + size_t len, loff_t *ppos) +{ + ssize_t ret; + int srcu_idx; + + ret = debugfs_use_file_start(F_DENTRY(file), &srcu_idx); + if (likely(!ret)) + ret = simple_attr_write(file, buf, len, ppos); + debugfs_use_file_finish(srcu_idx); + return ret; +} +EXPORT_SYMBOL_GPL(debugfs_attr_write); + +static struct dentry *debugfs_create_mode_unsafe(const char *name, umode_t mode, + struct dentry *parent, void *value, + const struct file_operations *fops, + const struct file_operations *fops_ro, + const struct file_operations *fops_wo) { /* if there are no write bits set, make read only */ if (!(mode & S_IWUGO)) - return debugfs_create_file(name, mode, parent, value, fops_ro); + return debugfs_create_file_unsafe(name, mode, parent, value, + fops_ro); /* if there are no read bits set, make write only */ if (!(mode & S_IRUGO)) - return debugfs_create_file(name, mode, parent, value, fops_wo); + return debugfs_create_file_unsafe(name, mode, parent, value, + fops_wo); - return debugfs_create_file(name, mode, parent, value, fops); + return debugfs_create_file_unsafe(name, mode, parent, value, fops); } static int debugfs_u8_set(void *data, u64 val) @@ -68,9 +340,9 @@ static int debugfs_u8_get(void *data, u64 *val) *val = *(u8 *)data; return 0; } -DEFINE_SIMPLE_ATTRIBUTE(fops_u8, debugfs_u8_get, debugfs_u8_set, "%llu\n"); -DEFINE_SIMPLE_ATTRIBUTE(fops_u8_ro, debugfs_u8_get, NULL, "%llu\n"); -DEFINE_SIMPLE_ATTRIBUTE(fops_u8_wo, NULL, debugfs_u8_set, "%llu\n"); +DEFINE_DEBUGFS_ATTRIBUTE(fops_u8, debugfs_u8_get, debugfs_u8_set, "%llu\n"); +DEFINE_DEBUGFS_ATTRIBUTE(fops_u8_ro, debugfs_u8_get, NULL, "%llu\n"); +DEFINE_DEBUGFS_ATTRIBUTE(fops_u8_wo, NULL, debugfs_u8_set, "%llu\n"); /** * debugfs_create_u8 - create a debugfs file that is used to read and write an unsigned 8-bit value @@ -99,7 +371,7 @@ DEFINE_SIMPLE_ATTRIBUTE(fops_u8_wo, NULL, debugfs_u8_set, "%llu\n"); struct dentry *debugfs_create_u8(const char *name, umode_t mode, struct dentry *parent, u8 *value) { - return debugfs_create_mode(name, mode, parent, value, &fops_u8, + return debugfs_create_mode_unsafe(name, mode, parent, value, &fops_u8, &fops_u8_ro, &fops_u8_wo); } EXPORT_SYMBOL_GPL(debugfs_create_u8); @@ -114,9 +386,9 @@ static int debugfs_u16_get(void *data, u64 *val) *val = *(u16 *)data; return 0; } -DEFINE_SIMPLE_ATTRIBUTE(fops_u16, debugfs_u16_get, debugfs_u16_set, "%llu\n"); -DEFINE_SIMPLE_ATTRIBUTE(fops_u16_ro, debugfs_u16_get, NULL, "%llu\n"); -DEFINE_SIMPLE_ATTRIBUTE(fops_u16_wo, NULL, debugfs_u16_set, "%llu\n"); +DEFINE_DEBUGFS_ATTRIBUTE(fops_u16, debugfs_u16_get, debugfs_u16_set, "%llu\n"); +DEFINE_DEBUGFS_ATTRIBUTE(fops_u16_ro, debugfs_u16_get, NULL, "%llu\n"); +DEFINE_DEBUGFS_ATTRIBUTE(fops_u16_wo, NULL, debugfs_u16_set, "%llu\n"); /** * debugfs_create_u16 - create a debugfs file that is used to read and write an unsigned 16-bit value @@ -145,7 +417,7 @@ DEFINE_SIMPLE_ATTRIBUTE(fops_u16_wo, NULL, debugfs_u16_set, "%llu\n"); struct dentry *debugfs_create_u16(const char *name, umode_t mode, struct dentry *parent, u16 *value) { - return debugfs_create_mode(name, mode, parent, value, &fops_u16, + return debugfs_create_mode_unsafe(name, mode, parent, value, &fops_u16, &fops_u16_ro, &fops_u16_wo); } EXPORT_SYMBOL_GPL(debugfs_create_u16); @@ -160,9 +432,9 @@ static int debugfs_u32_get(void *data, u64 *val) *val = *(u32 *)data; return 0; } -DEFINE_SIMPLE_ATTRIBUTE(fops_u32, debugfs_u32_get, debugfs_u32_set, "%llu\n"); -DEFINE_SIMPLE_ATTRIBUTE(fops_u32_ro, debugfs_u32_get, NULL, "%llu\n"); -DEFINE_SIMPLE_ATTRIBUTE(fops_u32_wo, NULL, debugfs_u32_set, "%llu\n"); +DEFINE_DEBUGFS_ATTRIBUTE(fops_u32, debugfs_u32_get, debugfs_u32_set, "%llu\n"); +DEFINE_DEBUGFS_ATTRIBUTE(fops_u32_ro, debugfs_u32_get, NULL, "%llu\n"); +DEFINE_DEBUGFS_ATTRIBUTE(fops_u32_wo, NULL, debugfs_u32_set, "%llu\n"); /** * debugfs_create_u32 - create a debugfs file that is used to read and write an unsigned 32-bit value @@ -191,7 +463,7 @@ DEFINE_SIMPLE_ATTRIBUTE(fops_u32_wo, NULL, debugfs_u32_set, "%llu\n"); struct dentry *debugfs_create_u32(const char *name, umode_t mode, struct dentry *parent, u32 *value) { - return debugfs_create_mode(name, mode, parent, value, &fops_u32, + return debugfs_create_mode_unsafe(name, mode, parent, value, &fops_u32, &fops_u32_ro, &fops_u32_wo); } EXPORT_SYMBOL_GPL(debugfs_create_u32); @@ -207,9 +479,9 @@ static int debugfs_u64_get(void *data, u64 *val) *val = *(u64 *)data; return 0; } -DEFINE_SIMPLE_ATTRIBUTE(fops_u64, debugfs_u64_get, debugfs_u64_set, "%llu\n"); -DEFINE_SIMPLE_ATTRIBUTE(fops_u64_ro, debugfs_u64_get, NULL, "%llu\n"); -DEFINE_SIMPLE_ATTRIBUTE(fops_u64_wo, NULL, debugfs_u64_set, "%llu\n"); +DEFINE_DEBUGFS_ATTRIBUTE(fops_u64, debugfs_u64_get, debugfs_u64_set, "%llu\n"); +DEFINE_DEBUGFS_ATTRIBUTE(fops_u64_ro, debugfs_u64_get, NULL, "%llu\n"); +DEFINE_DEBUGFS_ATTRIBUTE(fops_u64_wo, NULL, debugfs_u64_set, "%llu\n"); /** * debugfs_create_u64 - create a debugfs file that is used to read and write an unsigned 64-bit value @@ -238,7 +510,7 @@ DEFINE_SIMPLE_ATTRIBUTE(fops_u64_wo, NULL, debugfs_u64_set, "%llu\n"); struct dentry *debugfs_create_u64(const char *name, umode_t mode, struct dentry *parent, u64 *value) { - return debugfs_create_mode(name, mode, parent, value, &fops_u64, + return debugfs_create_mode_unsafe(name, mode, parent, value, &fops_u64, &fops_u64_ro, &fops_u64_wo); } EXPORT_SYMBOL_GPL(debugfs_create_u64); @@ -254,9 +526,10 @@ static int debugfs_ulong_get(void *data, u64 *val) *val = *(unsigned long *)data; return 0; } -DEFINE_SIMPLE_ATTRIBUTE(fops_ulong, debugfs_ulong_get, debugfs_ulong_set, "%llu\n"); -DEFINE_SIMPLE_ATTRIBUTE(fops_ulong_ro, debugfs_ulong_get, NULL, "%llu\n"); -DEFINE_SIMPLE_ATTRIBUTE(fops_ulong_wo, NULL, debugfs_ulong_set, "%llu\n"); +DEFINE_DEBUGFS_ATTRIBUTE(fops_ulong, debugfs_ulong_get, debugfs_ulong_set, + "%llu\n"); +DEFINE_DEBUGFS_ATTRIBUTE(fops_ulong_ro, debugfs_ulong_get, NULL, "%llu\n"); +DEFINE_DEBUGFS_ATTRIBUTE(fops_ulong_wo, NULL, debugfs_ulong_set, "%llu\n"); /** * debugfs_create_ulong - create a debugfs file that is used to read and write @@ -286,26 +559,30 @@ DEFINE_SIMPLE_ATTRIBUTE(fops_ulong_wo, NULL, debugfs_ulong_set, "%llu\n"); struct dentry *debugfs_create_ulong(const char *name, umode_t mode, struct dentry *parent, unsigned long *value) { - return debugfs_create_mode(name, mode, parent, value, &fops_ulong, - &fops_ulong_ro, &fops_ulong_wo); + return debugfs_create_mode_unsafe(name, mode, parent, value, + &fops_ulong, &fops_ulong_ro, + &fops_ulong_wo); } EXPORT_SYMBOL_GPL(debugfs_create_ulong); -DEFINE_SIMPLE_ATTRIBUTE(fops_x8, debugfs_u8_get, debugfs_u8_set, "0x%02llx\n"); -DEFINE_SIMPLE_ATTRIBUTE(fops_x8_ro, debugfs_u8_get, NULL, "0x%02llx\n"); -DEFINE_SIMPLE_ATTRIBUTE(fops_x8_wo, NULL, debugfs_u8_set, "0x%02llx\n"); +DEFINE_DEBUGFS_ATTRIBUTE(fops_x8, debugfs_u8_get, debugfs_u8_set, "0x%02llx\n"); +DEFINE_DEBUGFS_ATTRIBUTE(fops_x8_ro, debugfs_u8_get, NULL, "0x%02llx\n"); +DEFINE_DEBUGFS_ATTRIBUTE(fops_x8_wo, NULL, debugfs_u8_set, "0x%02llx\n"); -DEFINE_SIMPLE_ATTRIBUTE(fops_x16, debugfs_u16_get, debugfs_u16_set, "0x%04llx\n"); -DEFINE_SIMPLE_ATTRIBUTE(fops_x16_ro, debugfs_u16_get, NULL, "0x%04llx\n"); -DEFINE_SIMPLE_ATTRIBUTE(fops_x16_wo, NULL, debugfs_u16_set, "0x%04llx\n"); +DEFINE_DEBUGFS_ATTRIBUTE(fops_x16, debugfs_u16_get, debugfs_u16_set, + "0x%04llx\n"); +DEFINE_DEBUGFS_ATTRIBUTE(fops_x16_ro, debugfs_u16_get, NULL, "0x%04llx\n"); +DEFINE_DEBUGFS_ATTRIBUTE(fops_x16_wo, NULL, debugfs_u16_set, "0x%04llx\n"); -DEFINE_SIMPLE_ATTRIBUTE(fops_x32, debugfs_u32_get, debugfs_u32_set, "0x%08llx\n"); -DEFINE_SIMPLE_ATTRIBUTE(fops_x32_ro, debugfs_u32_get, NULL, "0x%08llx\n"); -DEFINE_SIMPLE_ATTRIBUTE(fops_x32_wo, NULL, debugfs_u32_set, "0x%08llx\n"); +DEFINE_DEBUGFS_ATTRIBUTE(fops_x32, debugfs_u32_get, debugfs_u32_set, + "0x%08llx\n"); +DEFINE_DEBUGFS_ATTRIBUTE(fops_x32_ro, debugfs_u32_get, NULL, "0x%08llx\n"); +DEFINE_DEBUGFS_ATTRIBUTE(fops_x32_wo, NULL, debugfs_u32_set, "0x%08llx\n"); -DEFINE_SIMPLE_ATTRIBUTE(fops_x64, debugfs_u64_get, debugfs_u64_set, "0x%016llx\n"); -DEFINE_SIMPLE_ATTRIBUTE(fops_x64_ro, debugfs_u64_get, NULL, "0x%016llx\n"); -DEFINE_SIMPLE_ATTRIBUTE(fops_x64_wo, NULL, debugfs_u64_set, "0x%016llx\n"); +DEFINE_DEBUGFS_ATTRIBUTE(fops_x64, debugfs_u64_get, debugfs_u64_set, + "0x%016llx\n"); +DEFINE_DEBUGFS_ATTRIBUTE(fops_x64_ro, debugfs_u64_get, NULL, "0x%016llx\n"); +DEFINE_DEBUGFS_ATTRIBUTE(fops_x64_wo, NULL, debugfs_u64_set, "0x%016llx\n"); /* * debugfs_create_x{8,16,32,64} - create a debugfs file that is used to read and write an unsigned {8,16,32,64}-bit value @@ -328,7 +605,7 @@ DEFINE_SIMPLE_ATTRIBUTE(fops_x64_wo, NULL, debugfs_u64_set, "0x%016llx\n"); struct dentry *debugfs_create_x8(const char *name, umode_t mode, struct dentry *parent, u8 *value) { - return debugfs_create_mode(name, mode, parent, value, &fops_x8, + return debugfs_create_mode_unsafe(name, mode, parent, value, &fops_x8, &fops_x8_ro, &fops_x8_wo); } EXPORT_SYMBOL_GPL(debugfs_create_x8); @@ -346,7 +623,7 @@ EXPORT_SYMBOL_GPL(debugfs_create_x8); struct dentry *debugfs_create_x16(const char *name, umode_t mode, struct dentry *parent, u16 *value) { - return debugfs_create_mode(name, mode, parent, value, &fops_x16, + return debugfs_create_mode_unsafe(name, mode, parent, value, &fops_x16, &fops_x16_ro, &fops_x16_wo); } EXPORT_SYMBOL_GPL(debugfs_create_x16); @@ -364,7 +641,7 @@ EXPORT_SYMBOL_GPL(debugfs_create_x16); struct dentry *debugfs_create_x32(const char *name, umode_t mode, struct dentry *parent, u32 *value) { - return debugfs_create_mode(name, mode, parent, value, &fops_x32, + return debugfs_create_mode_unsafe(name, mode, parent, value, &fops_x32, &fops_x32_ro, &fops_x32_wo); } EXPORT_SYMBOL_GPL(debugfs_create_x32); @@ -382,7 +659,7 @@ EXPORT_SYMBOL_GPL(debugfs_create_x32); struct dentry *debugfs_create_x64(const char *name, umode_t mode, struct dentry *parent, u64 *value) { - return debugfs_create_mode(name, mode, parent, value, &fops_x64, + return debugfs_create_mode_unsafe(name, mode, parent, value, &fops_x64, &fops_x64_ro, &fops_x64_wo); } EXPORT_SYMBOL_GPL(debugfs_create_x64); @@ -398,10 +675,10 @@ static int debugfs_size_t_get(void *data, u64 *val) *val = *(size_t *)data; return 0; } -DEFINE_SIMPLE_ATTRIBUTE(fops_size_t, debugfs_size_t_get, debugfs_size_t_set, - "%llu\n"); /* %llu and %zu are more or less the same */ -DEFINE_SIMPLE_ATTRIBUTE(fops_size_t_ro, debugfs_size_t_get, NULL, "%llu\n"); -DEFINE_SIMPLE_ATTRIBUTE(fops_size_t_wo, NULL, debugfs_size_t_set, "%llu\n"); +DEFINE_DEBUGFS_ATTRIBUTE(fops_size_t, debugfs_size_t_get, debugfs_size_t_set, + "%llu\n"); /* %llu and %zu are more or less the same */ +DEFINE_DEBUGFS_ATTRIBUTE(fops_size_t_ro, debugfs_size_t_get, NULL, "%llu\n"); +DEFINE_DEBUGFS_ATTRIBUTE(fops_size_t_wo, NULL, debugfs_size_t_set, "%llu\n"); /** * debugfs_create_size_t - create a debugfs file that is used to read and write an size_t value @@ -416,8 +693,9 @@ DEFINE_SIMPLE_ATTRIBUTE(fops_size_t_wo, NULL, debugfs_size_t_set, "%llu\n"); struct dentry *debugfs_create_size_t(const char *name, umode_t mode, struct dentry *parent, size_t *value) { - return debugfs_create_mode(name, mode, parent, value, &fops_size_t, - &fops_size_t_ro, &fops_size_t_wo); + return debugfs_create_mode_unsafe(name, mode, parent, value, + &fops_size_t, &fops_size_t_ro, + &fops_size_t_wo); } EXPORT_SYMBOL_GPL(debugfs_create_size_t); @@ -431,10 +709,12 @@ static int debugfs_atomic_t_get(void *data, u64 *val) *val = atomic_read((atomic_t *)data); return 0; } -DEFINE_SIMPLE_ATTRIBUTE(fops_atomic_t, debugfs_atomic_t_get, +DEFINE_DEBUGFS_ATTRIBUTE(fops_atomic_t, debugfs_atomic_t_get, debugfs_atomic_t_set, "%lld\n"); -DEFINE_SIMPLE_ATTRIBUTE(fops_atomic_t_ro, debugfs_atomic_t_get, NULL, "%lld\n"); -DEFINE_SIMPLE_ATTRIBUTE(fops_atomic_t_wo, NULL, debugfs_atomic_t_set, "%lld\n"); +DEFINE_DEBUGFS_ATTRIBUTE(fops_atomic_t_ro, debugfs_atomic_t_get, NULL, + "%lld\n"); +DEFINE_DEBUGFS_ATTRIBUTE(fops_atomic_t_wo, NULL, debugfs_atomic_t_set, + "%lld\n"); /** * debugfs_create_atomic_t - create a debugfs file that is used to read and @@ -450,8 +730,9 @@ DEFINE_SIMPLE_ATTRIBUTE(fops_atomic_t_wo, NULL, debugfs_atomic_t_set, "%lld\n"); struct dentry *debugfs_create_atomic_t(const char *name, umode_t mode, struct dentry *parent, atomic_t *value) { - return debugfs_create_mode(name, mode, parent, value, &fops_atomic_t, - &fops_atomic_t_ro, &fops_atomic_t_wo); + return debugfs_create_mode_unsafe(name, mode, parent, value, + &fops_atomic_t, &fops_atomic_t_ro, + &fops_atomic_t_wo); } EXPORT_SYMBOL_GPL(debugfs_create_atomic_t); @@ -459,9 +740,17 @@ ssize_t debugfs_read_file_bool(struct file *file, char __user *user_buf, size_t count, loff_t *ppos) { char buf[3]; - bool *val = file->private_data; + bool val; + int r, srcu_idx; - if (*val) + r = debugfs_use_file_start(F_DENTRY(file), &srcu_idx); + if (likely(!r)) + val = *(bool *)file->private_data; + debugfs_use_file_finish(srcu_idx); + if (r) + return r; + + if (val) buf[0] = 'Y'; else buf[0] = 'N'; @@ -477,6 +766,7 @@ ssize_t debugfs_write_file_bool(struct file *file, const char __user *user_buf, char buf[32]; size_t buf_size; bool bv; + int r, srcu_idx; bool *val = file->private_data; buf_size = min(count, (sizeof(buf)-1)); @@ -484,8 +774,14 @@ ssize_t debugfs_write_file_bool(struct file *file, const char __user *user_buf, return -EFAULT; buf[buf_size] = '\0'; - if (strtobool(buf, &bv) == 0) - *val = bv; + if (strtobool(buf, &bv) == 0) { + r = debugfs_use_file_start(F_DENTRY(file), &srcu_idx); + if (likely(!r)) + *val = bv; + debugfs_use_file_finish(srcu_idx); + if (r) + return r; + } return count; } @@ -537,7 +833,7 @@ static const struct file_operations fops_bool_wo = { struct dentry *debugfs_create_bool(const char *name, umode_t mode, struct dentry *parent, bool *value) { - return debugfs_create_mode(name, mode, parent, value, &fops_bool, + return debugfs_create_mode_unsafe(name, mode, parent, value, &fops_bool, &fops_bool_ro, &fops_bool_wo); } EXPORT_SYMBOL_GPL(debugfs_create_bool); @@ -546,8 +842,15 @@ static ssize_t read_file_blob(struct file *file, char __user *user_buf, size_t count, loff_t *ppos) { struct debugfs_blob_wrapper *blob = file->private_data; - return simple_read_from_buffer(user_buf, count, ppos, blob->data, - blob->size); + ssize_t r; + int srcu_idx; + + r = debugfs_use_file_start(F_DENTRY(file), &srcu_idx); + if (likely(!r)) + r = simple_read_from_buffer(user_buf, count, ppos, blob->data, + blob->size); + debugfs_use_file_finish(srcu_idx); + return r; } static const struct file_operations fops_blob = { @@ -584,7 +887,7 @@ struct dentry *debugfs_create_blob(const char *name, umode_t mode, struct dentry *parent, struct debugfs_blob_wrapper *blob) { - return debugfs_create_file(name, mode, parent, blob, &fops_blob); + return debugfs_create_file_unsafe(name, mode, parent, blob, &fops_blob); } EXPORT_SYMBOL_GPL(debugfs_create_blob); @@ -689,7 +992,8 @@ struct dentry *debugfs_create_u32_array(const char *name, umode_t mode, data->array = array; data->elements = elements; - return debugfs_create_file(name, mode, parent, data, &u32_array_fops); + return debugfs_create_file_unsafe(name, mode, parent, data, + &u32_array_fops); } EXPORT_SYMBOL_GPL(debugfs_create_u32_array); diff --git a/fs/debugfs/inode.c b/fs/debugfs/inode.c index 8580831ed237..4bc1f68243c1 100644 --- a/fs/debugfs/inode.c +++ b/fs/debugfs/inode.c @@ -27,9 +27,14 @@ #include <linux/parser.h> #include <linux/magic.h> #include <linux/slab.h> +#include <linux/srcu.h> + +#include "internal.h" #define DEBUGFS_DEFAULT_MODE 0700 +DEFINE_SRCU(debugfs_srcu); + static struct vfsmount *debugfs_mount; static int debugfs_mount_count; static bool debugfs_registered; @@ -39,7 +44,8 @@ static struct inode *debugfs_get_inode(struct super_block *sb) struct inode *inode = new_inode(sb); if (inode) { inode->i_ino = get_next_ino(); - inode->i_atime = inode->i_mtime = inode->i_ctime = CURRENT_TIME; + inode->i_atime = inode->i_mtime = + inode->i_ctime = current_fs_time(sb); } return inode; } @@ -294,6 +300,37 @@ static struct dentry *end_creating(struct dentry *dentry) return dentry; } +static struct dentry *__debugfs_create_file(const char *name, umode_t mode, + struct dentry *parent, void *data, + const struct file_operations *proxy_fops, + const struct file_operations *real_fops) +{ + struct dentry *dentry; + struct inode *inode; + + if (!(mode & S_IFMT)) + mode |= S_IFREG; + BUG_ON(!S_ISREG(mode)); + dentry = start_creating(name, parent); + + if (IS_ERR(dentry)) + return NULL; + + inode = debugfs_get_inode(dentry->d_sb); + if (unlikely(!inode)) + return failed_creating(dentry); + + inode->i_mode = mode; + inode->i_private = data; + + inode->i_fop = proxy_fops; + dentry->d_fsdata = (void *)real_fops; + + d_instantiate(dentry, inode); + fsnotify_create(d_inode(dentry->d_parent), dentry); + return end_creating(dentry); +} + /** * debugfs_create_file - create a file in the debugfs filesystem * @name: a pointer to a string containing the name of the file to create. @@ -324,29 +361,52 @@ struct dentry *debugfs_create_file(const char *name, umode_t mode, struct dentry *parent, void *data, const struct file_operations *fops) { - struct dentry *dentry; - struct inode *inode; - if (!(mode & S_IFMT)) - mode |= S_IFREG; - BUG_ON(!S_ISREG(mode)); - dentry = start_creating(name, parent); - - if (IS_ERR(dentry)) - return NULL; + return __debugfs_create_file(name, mode, parent, data, + fops ? &debugfs_full_proxy_file_operations : + &debugfs_noop_file_operations, + fops); +} +EXPORT_SYMBOL_GPL(debugfs_create_file); - inode = debugfs_get_inode(dentry->d_sb); - if (unlikely(!inode)) - return failed_creating(dentry); +/** + * debugfs_create_file_unsafe - create a file in the debugfs filesystem + * @name: a pointer to a string containing the name of the file to create. + * @mode: the permission that the file should have. + * @parent: a pointer to the parent dentry for this file. This should be a + * directory dentry if set. If this parameter is NULL, then the + * file will be created in the root of the debugfs filesystem. + * @data: a pointer to something that the caller will want to get to later + * on. The inode.i_private pointer will point to this value on + * the open() call. + * @fops: a pointer to a struct file_operations that should be used for + * this file. + * + * debugfs_create_file_unsafe() is completely analogous to + * debugfs_create_file(), the only difference being that the fops + * handed it will not get protected against file removals by the + * debugfs core. + * + * It is your responsibility to protect your struct file_operation + * methods against file removals by means of debugfs_use_file_start() + * and debugfs_use_file_finish(). ->open() is still protected by + * debugfs though. + * + * Any struct file_operations defined by means of + * DEFINE_DEBUGFS_ATTRIBUTE() is protected against file removals and + * thus, may be used here. + */ +struct dentry *debugfs_create_file_unsafe(const char *name, umode_t mode, + struct dentry *parent, void *data, + const struct file_operations *fops) +{ - inode->i_mode = mode; - inode->i_fop = fops ? fops : &debugfs_file_operations; - inode->i_private = data; - d_instantiate(dentry, inode); - fsnotify_create(d_inode(dentry->d_parent), dentry); - return end_creating(dentry); + return __debugfs_create_file(name, mode, parent, data, + fops ? &debugfs_open_proxy_file_operations : + &debugfs_noop_file_operations, + fops); } -EXPORT_SYMBOL_GPL(debugfs_create_file); +EXPORT_SYMBOL_GPL(debugfs_create_file_unsafe); /** * debugfs_create_file_size - create a file in the debugfs filesystem @@ -461,7 +521,11 @@ struct dentry *debugfs_create_automount(const char *name, inode->i_flags |= S_AUTOMOUNT; inode->i_private = data; dentry->d_fsdata = (void *)f; + /* directory inodes start off with i_nlink == 2 (for "." entry) */ + inc_nlink(inode); d_instantiate(dentry, inode); + inc_nlink(d_inode(dentry->d_parent)); + fsnotify_mkdir(d_inode(dentry->d_parent), dentry); return end_creating(dentry); } EXPORT_SYMBOL(debugfs_create_automount); @@ -565,6 +629,8 @@ void debugfs_remove(struct dentry *dentry) inode_unlock(d_inode(parent)); if (!ret) simple_release_fs(&debugfs_mount, &debugfs_mount_count); + + synchronize_srcu(&debugfs_srcu); } EXPORT_SYMBOL_GPL(debugfs_remove); @@ -642,6 +708,8 @@ void debugfs_remove_recursive(struct dentry *dentry) if (!__debugfs_remove(child, parent)) simple_release_fs(&debugfs_mount, &debugfs_mount_count); inode_unlock(d_inode(parent)); + + synchronize_srcu(&debugfs_srcu); } EXPORT_SYMBOL_GPL(debugfs_remove_recursive); diff --git a/fs/debugfs/internal.h b/fs/debugfs/internal.h new file mode 100644 index 000000000000..bba52634b995 --- /dev/null +++ b/fs/debugfs/internal.h @@ -0,0 +1,26 @@ +/* + * internal.h - declarations internal to debugfs + * + * Copyright (C) 2016 Nicolai Stange <nicstange@gmail.com> + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License version + * 2 as published by the Free Software Foundation. + * + */ + +#ifndef _DEBUGFS_INTERNAL_H_ +#define _DEBUGFS_INTERNAL_H_ + +struct file_operations; + +/* declared over in file.c */ +extern const struct file_operations debugfs_noop_file_operations; +extern const struct file_operations debugfs_open_proxy_file_operations; +extern const struct file_operations debugfs_full_proxy_file_operations; + +struct dentry *debugfs_create_file_unsafe(const char *name, umode_t mode, + struct dentry *parent, void *data, + const struct file_operations *fops); + +#endif /* _DEBUGFS_INTERNAL_H_ */ diff --git a/fs/devpts/inode.c b/fs/devpts/inode.c index 655f21f99160..0b2954d7172d 100644 --- a/fs/devpts/inode.c +++ b/fs/devpts/inode.c @@ -128,6 +128,7 @@ static const match_table_t tokens = { struct pts_fs_info { struct ida allocated_ptys; struct pts_mount_opts mount_opts; + struct super_block *sb; struct dentry *ptmx_dentry; }; @@ -358,7 +359,7 @@ static const struct super_operations devpts_sops = { .show_options = devpts_show_options, }; -static void *new_pts_fs_info(void) +static void *new_pts_fs_info(struct super_block *sb) { struct pts_fs_info *fsi; @@ -369,6 +370,7 @@ static void *new_pts_fs_info(void) ida_init(&fsi->allocated_ptys); fsi->mount_opts.mode = DEVPTS_DEFAULT_MODE; fsi->mount_opts.ptmxmode = DEVPTS_DEFAULT_PTMX_MODE; + fsi->sb = sb; return fsi; } @@ -384,7 +386,7 @@ devpts_fill_super(struct super_block *s, void *data, int silent) s->s_op = &devpts_sops; s->s_time_gran = 1; - s->s_fs_info = new_pts_fs_info(); + s->s_fs_info = new_pts_fs_info(s); if (!s->s_fs_info) goto fail; @@ -524,17 +526,14 @@ static struct file_system_type devpts_fs_type = { * to the System V naming convention */ -int devpts_new_index(struct inode *ptmx_inode) +int devpts_new_index(struct pts_fs_info *fsi) { - struct super_block *sb = pts_sb_from_inode(ptmx_inode); - struct pts_fs_info *fsi; int index; int ida_ret; - if (!sb) + if (!fsi) return -ENODEV; - fsi = DEVPTS_SB(sb); retry: if (!ida_pre_get(&fsi->allocated_ptys, GFP_KERNEL)) return -ENOMEM; @@ -564,11 +563,8 @@ retry: return index; } -void devpts_kill_index(struct inode *ptmx_inode, int idx) +void devpts_kill_index(struct pts_fs_info *fsi, int idx) { - struct super_block *sb = pts_sb_from_inode(ptmx_inode); - struct pts_fs_info *fsi = DEVPTS_SB(sb); - mutex_lock(&allocated_ptys_lock); ida_remove(&fsi->allocated_ptys, idx); pty_count--; @@ -578,21 +574,25 @@ void devpts_kill_index(struct inode *ptmx_inode, int idx) /* * pty code needs to hold extra references in case of last /dev/tty close */ - -void devpts_add_ref(struct inode *ptmx_inode) +struct pts_fs_info *devpts_get_ref(struct inode *ptmx_inode, struct file *file) { - struct super_block *sb = pts_sb_from_inode(ptmx_inode); + struct super_block *sb; + struct pts_fs_info *fsi; + + sb = pts_sb_from_inode(ptmx_inode); + if (!sb) + return NULL; + fsi = DEVPTS_SB(sb); + if (!fsi) + return NULL; atomic_inc(&sb->s_active); - ihold(ptmx_inode); + return fsi; } -void devpts_del_ref(struct inode *ptmx_inode) +void devpts_put_ref(struct pts_fs_info *fsi) { - struct super_block *sb = pts_sb_from_inode(ptmx_inode); - - iput(ptmx_inode); - deactivate_super(sb); + deactivate_super(fsi->sb); } /** @@ -604,22 +604,20 @@ void devpts_del_ref(struct inode *ptmx_inode) * * The created inode is returned. Remove it from /dev/pts/ by devpts_pty_kill. */ -struct inode *devpts_pty_new(struct inode *ptmx_inode, dev_t device, int index, - void *priv) +struct dentry *devpts_pty_new(struct pts_fs_info *fsi, int index, void *priv) { struct dentry *dentry; - struct super_block *sb = pts_sb_from_inode(ptmx_inode); + struct super_block *sb; struct inode *inode; struct dentry *root; - struct pts_fs_info *fsi; struct pts_mount_opts *opts; char s[12]; - if (!sb) + if (!fsi) return ERR_PTR(-ENODEV); + sb = fsi->sb; root = sb->s_root; - fsi = DEVPTS_SB(sb); opts = &fsi->mount_opts; inode = new_inode(sb); @@ -630,25 +628,21 @@ struct inode *devpts_pty_new(struct inode *ptmx_inode, dev_t device, int index, inode->i_uid = opts->setuid ? opts->uid : current_fsuid(); inode->i_gid = opts->setgid ? opts->gid : current_fsgid(); inode->i_mtime = inode->i_atime = inode->i_ctime = CURRENT_TIME; - init_special_inode(inode, S_IFCHR|opts->mode, device); - inode->i_private = priv; + init_special_inode(inode, S_IFCHR|opts->mode, MKDEV(UNIX98_PTY_SLAVE_MAJOR, index)); sprintf(s, "%d", index); - inode_lock(d_inode(root)); - dentry = d_alloc_name(root, s); if (dentry) { + dentry->d_fsdata = priv; d_add(dentry, inode); fsnotify_create(d_inode(root), dentry); } else { iput(inode); - inode = ERR_PTR(-ENOMEM); + dentry = ERR_PTR(-ENOMEM); } - inode_unlock(d_inode(root)); - - return inode; + return dentry; } /** @@ -657,24 +651,10 @@ struct inode *devpts_pty_new(struct inode *ptmx_inode, dev_t device, int index, * * Returns whatever was passed as priv in devpts_pty_new for a given inode. */ -void *devpts_get_priv(struct inode *pts_inode) +void *devpts_get_priv(struct dentry *dentry) { - struct dentry *dentry; - void *priv = NULL; - - BUG_ON(pts_inode->i_rdev == MKDEV(TTYAUX_MAJOR, PTMX_MINOR)); - - /* Ensure dentry has not been deleted by devpts_pty_kill() */ - dentry = d_find_alias(pts_inode); - if (!dentry) - return NULL; - - if (pts_inode->i_sb->s_magic == DEVPTS_SUPER_MAGIC) - priv = pts_inode->i_private; - - dput(dentry); - - return priv; + WARN_ON_ONCE(dentry->d_sb->s_magic != DEVPTS_SUPER_MAGIC); + return dentry->d_fsdata; } /** @@ -683,24 +663,14 @@ void *devpts_get_priv(struct inode *pts_inode) * * This is an inverse operation of devpts_pty_new. */ -void devpts_pty_kill(struct inode *inode) +void devpts_pty_kill(struct dentry *dentry) { - struct super_block *sb = pts_sb_from_inode(inode); - struct dentry *root = sb->s_root; - struct dentry *dentry; + WARN_ON_ONCE(dentry->d_sb->s_magic != DEVPTS_SUPER_MAGIC); - BUG_ON(inode->i_rdev == MKDEV(TTYAUX_MAJOR, PTMX_MINOR)); - - inode_lock(d_inode(root)); - - dentry = d_find_alias(inode); - - drop_nlink(inode); + dentry->d_fsdata = NULL; + drop_nlink(dentry->d_inode); d_delete(dentry); dput(dentry); /* d_alloc_name() in devpts_pty_new() */ - dput(dentry); /* d_find_alias above */ - - inode_unlock(d_inode(root)); } static int __init init_devpts_fs(void) diff --git a/fs/direct-io.c b/fs/direct-io.c index 472037732daf..3bf3f20f8ecc 100644 --- a/fs/direct-io.c +++ b/fs/direct-io.c @@ -224,9 +224,9 @@ static inline struct page *dio_get_page(struct dio *dio, * filesystems can use it to hold additional state between get_block calls and * dio_complete. */ -static ssize_t dio_complete(struct dio *dio, loff_t offset, ssize_t ret, - bool is_async) +static ssize_t dio_complete(struct dio *dio, ssize_t ret, bool is_async) { + loff_t offset = dio->iocb->ki_pos; ssize_t transferred = 0; /* @@ -256,6 +256,7 @@ static ssize_t dio_complete(struct dio *dio, loff_t offset, ssize_t ret, if (dio->end_io) { int err; + // XXX: ki_pos?? err = dio->end_io(dio->iocb, offset, ret, dio->private); if (err) ret = err; @@ -265,15 +266,15 @@ static ssize_t dio_complete(struct dio *dio, loff_t offset, ssize_t ret, inode_dio_end(dio->inode); if (is_async) { - if (dio->rw & WRITE) { - int err; - - err = generic_write_sync(dio->iocb->ki_filp, offset, - transferred); - if (err < 0 && ret > 0) - ret = err; - } + /* + * generic_write_sync expects ki_pos to have been updated + * already, but the submission path only does this for + * synchronous I/O. + */ + dio->iocb->ki_pos += transferred; + if (dio->rw & WRITE) + ret = generic_write_sync(dio->iocb, transferred); dio->iocb->ki_complete(dio->iocb, ret, 0); } @@ -285,7 +286,7 @@ static void dio_aio_complete_work(struct work_struct *work) { struct dio *dio = container_of(work, struct dio, complete_work); - dio_complete(dio, dio->iocb->ki_pos, 0, true); + dio_complete(dio, 0, true); } static int dio_bio_complete(struct dio *dio, struct bio *bio); @@ -314,7 +315,7 @@ static void dio_bio_end_aio(struct bio *bio) queue_work(dio->inode->i_sb->s_dio_done_wq, &dio->complete_work); } else { - dio_complete(dio, dio->iocb->ki_pos, 0, true); + dio_complete(dio, 0, true); } } } @@ -1113,7 +1114,7 @@ static inline int drop_refcount(struct dio *dio) static inline ssize_t do_blockdev_direct_IO(struct kiocb *iocb, struct inode *inode, struct block_device *bdev, struct iov_iter *iter, - loff_t offset, get_block_t get_block, dio_iodone_t end_io, + get_block_t get_block, dio_iodone_t end_io, dio_submit_t submit_io, int flags) { unsigned i_blkbits = ACCESS_ONCE(inode->i_blkbits); @@ -1121,6 +1122,7 @@ do_blockdev_direct_IO(struct kiocb *iocb, struct inode *inode, unsigned blocksize_mask = (1 << blkbits) - 1; ssize_t retval = -EINVAL; size_t count = iov_iter_count(iter); + loff_t offset = iocb->ki_pos; loff_t end = offset + count; struct dio *dio; struct dio_submit sdio = { 0, }; @@ -1318,7 +1320,7 @@ do_blockdev_direct_IO(struct kiocb *iocb, struct inode *inode, dio_await_completion(dio); if (drop_refcount(dio) == 0) { - retval = dio_complete(dio, offset, retval, false); + retval = dio_complete(dio, retval, false); } else BUG_ON(retval != -EIOCBQUEUED); @@ -1328,7 +1330,7 @@ out: ssize_t __blockdev_direct_IO(struct kiocb *iocb, struct inode *inode, struct block_device *bdev, struct iov_iter *iter, - loff_t offset, get_block_t get_block, + get_block_t get_block, dio_iodone_t end_io, dio_submit_t submit_io, int flags) { @@ -1344,7 +1346,7 @@ ssize_t __blockdev_direct_IO(struct kiocb *iocb, struct inode *inode, prefetch(bdev->bd_queue); prefetch((char *)bdev->bd_queue + SMP_CACHE_BYTES); - return do_blockdev_direct_IO(iocb, inode, bdev, iter, offset, get_block, + return do_blockdev_direct_IO(iocb, inode, bdev, iter, get_block, end_io, submit_io, flags); } diff --git a/fs/ecryptfs/crypto.c b/fs/ecryptfs/crypto.c index d09cb4cdd09f..ebd40f46ed4c 100644 --- a/fs/ecryptfs/crypto.c +++ b/fs/ecryptfs/crypto.c @@ -105,19 +105,7 @@ static int ecryptfs_calculate_md5(char *dst, struct crypto_shash *tfm; int rc = 0; - mutex_lock(&crypt_stat->cs_hash_tfm_mutex); tfm = crypt_stat->hash_tfm; - if (!tfm) { - tfm = crypto_alloc_shash(ECRYPTFS_DEFAULT_HASH, 0, 0); - if (IS_ERR(tfm)) { - rc = PTR_ERR(tfm); - ecryptfs_printk(KERN_ERR, "Error attempting to " - "allocate crypto context; rc = [%d]\n", - rc); - goto out; - } - crypt_stat->hash_tfm = tfm; - } rc = ecryptfs_hash_digest(tfm, src, len, dst); if (rc) { printk(KERN_ERR @@ -126,7 +114,6 @@ static int ecryptfs_calculate_md5(char *dst, goto out; } out: - mutex_unlock(&crypt_stat->cs_hash_tfm_mutex); return rc; } @@ -207,16 +194,29 @@ out: * * Initialize the crypt_stat structure. */ -void -ecryptfs_init_crypt_stat(struct ecryptfs_crypt_stat *crypt_stat) +int ecryptfs_init_crypt_stat(struct ecryptfs_crypt_stat *crypt_stat) { + struct crypto_shash *tfm; + int rc; + + tfm = crypto_alloc_shash(ECRYPTFS_DEFAULT_HASH, 0, 0); + if (IS_ERR(tfm)) { + rc = PTR_ERR(tfm); + ecryptfs_printk(KERN_ERR, "Error attempting to " + "allocate crypto context; rc = [%d]\n", + rc); + return rc; + } + memset((void *)crypt_stat, 0, sizeof(struct ecryptfs_crypt_stat)); INIT_LIST_HEAD(&crypt_stat->keysig_list); mutex_init(&crypt_stat->keysig_list_mutex); mutex_init(&crypt_stat->cs_mutex); mutex_init(&crypt_stat->cs_tfm_mutex); - mutex_init(&crypt_stat->cs_hash_tfm_mutex); + crypt_stat->hash_tfm = tfm; crypt_stat->flags |= ECRYPTFS_STRUCT_INITIALIZED; + + return 0; } /** @@ -1369,7 +1369,9 @@ int ecryptfs_read_xattr_region(char *page_virt, struct inode *ecryptfs_inode) ssize_t size; int rc = 0; - size = ecryptfs_getxattr_lower(lower_dentry, ECRYPTFS_XATTR_NAME, + size = ecryptfs_getxattr_lower(lower_dentry, + ecryptfs_inode_to_lower(ecryptfs_inode), + ECRYPTFS_XATTR_NAME, page_virt, ECRYPTFS_DEFAULT_EXTENT_SIZE); if (size < 0) { if (unlikely(ecryptfs_verbosity > 0)) @@ -1391,6 +1393,7 @@ int ecryptfs_read_and_validate_xattr_region(struct dentry *dentry, int rc; rc = ecryptfs_getxattr_lower(ecryptfs_dentry_to_lower(dentry), + ecryptfs_inode_to_lower(inode), ECRYPTFS_XATTR_NAME, file_size, ECRYPTFS_SIZE_AND_MARKER_BYTES); if (rc < ECRYPTFS_SIZE_AND_MARKER_BYTES) diff --git a/fs/ecryptfs/ecryptfs_kernel.h b/fs/ecryptfs/ecryptfs_kernel.h index d123fbaa28e0..3ec495db7e82 100644 --- a/fs/ecryptfs/ecryptfs_kernel.h +++ b/fs/ecryptfs/ecryptfs_kernel.h @@ -242,7 +242,6 @@ struct ecryptfs_crypt_stat { struct list_head keysig_list; struct mutex keysig_list_mutex; struct mutex cs_tfm_mutex; - struct mutex cs_hash_tfm_mutex; struct mutex cs_mutex; }; @@ -577,7 +576,7 @@ int virt_to_scatterlist(const void *addr, int size, struct scatterlist *sg, int sg_size); int ecryptfs_compute_root_iv(struct ecryptfs_crypt_stat *crypt_stat); void ecryptfs_rotate_iv(unsigned char *iv); -void ecryptfs_init_crypt_stat(struct ecryptfs_crypt_stat *crypt_stat); +int ecryptfs_init_crypt_stat(struct ecryptfs_crypt_stat *crypt_stat); void ecryptfs_destroy_crypt_stat(struct ecryptfs_crypt_stat *crypt_stat); void ecryptfs_destroy_mount_crypt_stat( struct ecryptfs_mount_crypt_stat *mount_crypt_stat); @@ -607,8 +606,8 @@ ecryptfs_parse_packet_set(struct ecryptfs_crypt_stat *crypt_stat, unsigned char *src, struct dentry *ecryptfs_dentry); int ecryptfs_truncate(struct dentry *dentry, loff_t new_length); ssize_t -ecryptfs_getxattr_lower(struct dentry *lower_dentry, const char *name, - void *value, size_t size); +ecryptfs_getxattr_lower(struct dentry *lower_dentry, struct inode *lower_inode, + const char *name, void *value, size_t size); int ecryptfs_setxattr(struct dentry *dentry, const char *name, const void *value, size_t size, int flags); diff --git a/fs/ecryptfs/file.c b/fs/ecryptfs/file.c index feef8a9c4de7..7000b96b783e 100644 --- a/fs/ecryptfs/file.c +++ b/fs/ecryptfs/file.c @@ -112,7 +112,6 @@ static int ecryptfs_readdir(struct file *file, struct dir_context *ctx) .sb = inode->i_sb, }; lower_file = ecryptfs_file_to_lower(file); - lower_file->f_pos = ctx->pos; rc = iterate_dir(lower_file, &buf.ctx); ctx->pos = buf.ctx.pos; if (rc < 0) @@ -223,14 +222,6 @@ static int ecryptfs_open(struct inode *inode, struct file *file) } ecryptfs_set_file_lower( file, ecryptfs_inode_to_private(inode)->lower_file); - if (d_is_dir(ecryptfs_dentry)) { - ecryptfs_printk(KERN_DEBUG, "This is a directory\n"); - mutex_lock(&crypt_stat->cs_mutex); - crypt_stat->flags &= ~(ECRYPTFS_ENCRYPTED); - mutex_unlock(&crypt_stat->cs_mutex); - rc = 0; - goto out; - } rc = read_or_initialize_metadata(ecryptfs_dentry); if (rc) goto out_put; @@ -247,6 +238,45 @@ out: return rc; } +/** + * ecryptfs_dir_open + * @inode: inode speciying file to open + * @file: Structure to return filled in + * + * Opens the file specified by inode. + * + * Returns zero on success; non-zero otherwise + */ +static int ecryptfs_dir_open(struct inode *inode, struct file *file) +{ + struct dentry *ecryptfs_dentry = file->f_path.dentry; + /* Private value of ecryptfs_dentry allocated in + * ecryptfs_lookup() */ + struct ecryptfs_file_info *file_info; + struct file *lower_file; + + /* Released in ecryptfs_release or end of function if failure */ + file_info = kmem_cache_zalloc(ecryptfs_file_info_cache, GFP_KERNEL); + ecryptfs_set_file_private(file, file_info); + if (unlikely(!file_info)) { + ecryptfs_printk(KERN_ERR, + "Error attempting to allocate memory\n"); + return -ENOMEM; + } + lower_file = dentry_open(ecryptfs_dentry_to_lower_path(ecryptfs_dentry), + file->f_flags, current_cred()); + if (IS_ERR(lower_file)) { + printk(KERN_ERR "%s: Error attempting to initialize " + "the lower file for the dentry with name " + "[%pd]; rc = [%ld]\n", __func__, + ecryptfs_dentry, PTR_ERR(lower_file)); + kmem_cache_free(ecryptfs_file_info_cache, file_info); + return PTR_ERR(lower_file); + } + ecryptfs_set_file_lower(file, lower_file); + return 0; +} + static int ecryptfs_flush(struct file *file, fl_owner_t td) { struct file *lower_file = ecryptfs_file_to_lower(file); @@ -267,6 +297,19 @@ static int ecryptfs_release(struct inode *inode, struct file *file) return 0; } +static int ecryptfs_dir_release(struct inode *inode, struct file *file) +{ + fput(ecryptfs_file_to_lower(file)); + kmem_cache_free(ecryptfs_file_info_cache, + ecryptfs_file_to_private(file)); + return 0; +} + +static loff_t ecryptfs_dir_llseek(struct file *file, loff_t offset, int whence) +{ + return vfs_llseek(ecryptfs_file_to_lower(file), offset, whence); +} + static int ecryptfs_fsync(struct file *file, loff_t start, loff_t end, int datasync) { @@ -340,26 +383,22 @@ ecryptfs_compat_ioctl(struct file *file, unsigned int cmd, unsigned long arg) #endif const struct file_operations ecryptfs_dir_fops = { - .iterate = ecryptfs_readdir, + .iterate_shared = ecryptfs_readdir, .read = generic_read_dir, .unlocked_ioctl = ecryptfs_unlocked_ioctl, #ifdef CONFIG_COMPAT .compat_ioctl = ecryptfs_compat_ioctl, #endif - .open = ecryptfs_open, - .flush = ecryptfs_flush, - .release = ecryptfs_release, + .open = ecryptfs_dir_open, + .release = ecryptfs_dir_release, .fsync = ecryptfs_fsync, - .fasync = ecryptfs_fasync, - .splice_read = generic_file_splice_read, - .llseek = default_llseek, + .llseek = ecryptfs_dir_llseek, }; const struct file_operations ecryptfs_main_fops = { .llseek = generic_file_llseek, .read_iter = ecryptfs_read_update_atime, .write_iter = generic_file_write_iter, - .iterate = ecryptfs_readdir, .unlocked_ioctl = ecryptfs_unlocked_ioctl, #ifdef CONFIG_COMPAT .compat_ioctl = ecryptfs_compat_ioctl, diff --git a/fs/ecryptfs/inode.c b/fs/ecryptfs/inode.c index 224b49e71aa4..318b04689d76 100644 --- a/fs/ecryptfs/inode.c +++ b/fs/ecryptfs/inode.c @@ -324,9 +324,8 @@ static int ecryptfs_i_size_read(struct dentry *dentry, struct inode *inode) /** * ecryptfs_lookup_interpose - Dentry interposition for a lookup */ -static int ecryptfs_lookup_interpose(struct dentry *dentry, - struct dentry *lower_dentry, - struct inode *dir_inode) +static struct dentry *ecryptfs_lookup_interpose(struct dentry *dentry, + struct dentry *lower_dentry) { struct inode *inode, *lower_inode = d_inode(lower_dentry); struct ecryptfs_dentry_info *dentry_info; @@ -339,11 +338,12 @@ static int ecryptfs_lookup_interpose(struct dentry *dentry, "to allocate ecryptfs_dentry_info struct\n", __func__); dput(lower_dentry); - return -ENOMEM; + return ERR_PTR(-ENOMEM); } lower_mnt = mntget(ecryptfs_dentry_to_lower_mnt(dentry->d_parent)); - fsstack_copy_attr_atime(dir_inode, d_inode(lower_dentry->d_parent)); + fsstack_copy_attr_atime(d_inode(dentry->d_parent), + d_inode(lower_dentry->d_parent)); BUG_ON(!d_count(lower_dentry)); ecryptfs_set_dentry_private(dentry, dentry_info); @@ -353,27 +353,25 @@ static int ecryptfs_lookup_interpose(struct dentry *dentry, if (d_really_is_negative(lower_dentry)) { /* We want to add because we couldn't find in lower */ d_add(dentry, NULL); - return 0; + return NULL; } - inode = __ecryptfs_get_inode(lower_inode, dir_inode->i_sb); + inode = __ecryptfs_get_inode(lower_inode, dentry->d_sb); if (IS_ERR(inode)) { printk(KERN_ERR "%s: Error interposing; rc = [%ld]\n", __func__, PTR_ERR(inode)); - return PTR_ERR(inode); + return ERR_CAST(inode); } if (S_ISREG(inode->i_mode)) { rc = ecryptfs_i_size_read(dentry, inode); if (rc) { make_bad_inode(inode); - return rc; + return ERR_PTR(rc); } } if (inode->i_state & I_NEW) unlock_new_inode(inode); - d_add(dentry, inode); - - return rc; + return d_splice_alias(inode, dentry); } /** @@ -390,55 +388,42 @@ static struct dentry *ecryptfs_lookup(struct inode *ecryptfs_dir_inode, unsigned int flags) { char *encrypted_and_encoded_name = NULL; - size_t encrypted_and_encoded_name_size; - struct ecryptfs_mount_crypt_stat *mount_crypt_stat = NULL; + struct ecryptfs_mount_crypt_stat *mount_crypt_stat; struct dentry *lower_dir_dentry, *lower_dentry; + const char *name = ecryptfs_dentry->d_name.name; + size_t len = ecryptfs_dentry->d_name.len; + struct dentry *res; int rc = 0; lower_dir_dentry = ecryptfs_dentry_to_lower(ecryptfs_dentry->d_parent); - lower_dentry = lookup_one_len_unlocked(ecryptfs_dentry->d_name.name, - lower_dir_dentry, - ecryptfs_dentry->d_name.len); - if (IS_ERR(lower_dentry)) { - rc = PTR_ERR(lower_dentry); - ecryptfs_printk(KERN_DEBUG, "%s: lookup_one_len() returned " - "[%d] on lower_dentry = [%pd]\n", __func__, rc, - ecryptfs_dentry); - goto out; - } - if (d_really_is_positive(lower_dentry)) - goto interpose; + mount_crypt_stat = &ecryptfs_superblock_to_private( ecryptfs_dentry->d_sb)->mount_crypt_stat; - if (!(mount_crypt_stat - && (mount_crypt_stat->flags & ECRYPTFS_GLOBAL_ENCRYPT_FILENAMES))) - goto interpose; - dput(lower_dentry); - rc = ecryptfs_encrypt_and_encode_filename( - &encrypted_and_encoded_name, &encrypted_and_encoded_name_size, - mount_crypt_stat, ecryptfs_dentry->d_name.name, - ecryptfs_dentry->d_name.len); - if (rc) { - printk(KERN_ERR "%s: Error attempting to encrypt and encode " - "filename; rc = [%d]\n", __func__, rc); - goto out; + if (mount_crypt_stat + && (mount_crypt_stat->flags & ECRYPTFS_GLOBAL_ENCRYPT_FILENAMES)) { + rc = ecryptfs_encrypt_and_encode_filename( + &encrypted_and_encoded_name, &len, + mount_crypt_stat, name, len); + if (rc) { + printk(KERN_ERR "%s: Error attempting to encrypt and encode " + "filename; rc = [%d]\n", __func__, rc); + return ERR_PTR(rc); + } + name = encrypted_and_encoded_name; } - lower_dentry = lookup_one_len_unlocked(encrypted_and_encoded_name, - lower_dir_dentry, - encrypted_and_encoded_name_size); + + lower_dentry = lookup_one_len_unlocked(name, lower_dir_dentry, len); if (IS_ERR(lower_dentry)) { - rc = PTR_ERR(lower_dentry); ecryptfs_printk(KERN_DEBUG, "%s: lookup_one_len() returned " - "[%d] on lower_dentry = [%s]\n", __func__, rc, - encrypted_and_encoded_name); - goto out; + "[%ld] on lower_dentry = [%s]\n", __func__, + PTR_ERR(lower_dentry), + name); + res = ERR_CAST(lower_dentry); + } else { + res = ecryptfs_lookup_interpose(ecryptfs_dentry, lower_dentry); } -interpose: - rc = ecryptfs_lookup_interpose(ecryptfs_dentry, lower_dentry, - ecryptfs_dir_inode); -out: kfree(encrypted_and_encoded_name); - return ERR_PTR(rc); + return res; } static int ecryptfs_link(struct dentry *old_dentry, struct inode *dir, @@ -898,8 +883,11 @@ static int ecryptfs_setattr(struct dentry *dentry, struct iattr *ia) struct ecryptfs_crypt_stat *crypt_stat; crypt_stat = &ecryptfs_inode_to_private(d_inode(dentry))->crypt_stat; - if (!(crypt_stat->flags & ECRYPTFS_STRUCT_INITIALIZED)) - ecryptfs_init_crypt_stat(crypt_stat); + if (!(crypt_stat->flags & ECRYPTFS_STRUCT_INITIALIZED)) { + rc = ecryptfs_init_crypt_stat(crypt_stat); + if (rc) + return rc; + } inode = d_inode(dentry); lower_inode = ecryptfs_inode_to_lower(inode); lower_dentry = ecryptfs_dentry_to_lower(dentry); @@ -1033,29 +1021,30 @@ out: } ssize_t -ecryptfs_getxattr_lower(struct dentry *lower_dentry, const char *name, - void *value, size_t size) +ecryptfs_getxattr_lower(struct dentry *lower_dentry, struct inode *lower_inode, + const char *name, void *value, size_t size) { int rc = 0; - if (!d_inode(lower_dentry)->i_op->getxattr) { + if (!lower_inode->i_op->getxattr) { rc = -EOPNOTSUPP; goto out; } - inode_lock(d_inode(lower_dentry)); - rc = d_inode(lower_dentry)->i_op->getxattr(lower_dentry, name, value, - size); - inode_unlock(d_inode(lower_dentry)); + inode_lock(lower_inode); + rc = lower_inode->i_op->getxattr(lower_dentry, lower_inode, + name, value, size); + inode_unlock(lower_inode); out: return rc; } static ssize_t -ecryptfs_getxattr(struct dentry *dentry, const char *name, void *value, - size_t size) +ecryptfs_getxattr(struct dentry *dentry, struct inode *inode, + const char *name, void *value, size_t size) { - return ecryptfs_getxattr_lower(ecryptfs_dentry_to_lower(dentry), name, - value, size); + return ecryptfs_getxattr_lower(ecryptfs_dentry_to_lower(dentry), + ecryptfs_inode_to_lower(inode), + name, value, size); } static ssize_t diff --git a/fs/ecryptfs/mmap.c b/fs/ecryptfs/mmap.c index e6b1d80952b9..148d11b514fb 100644 --- a/fs/ecryptfs/mmap.c +++ b/fs/ecryptfs/mmap.c @@ -436,7 +436,8 @@ static int ecryptfs_write_inode_size_to_xattr(struct inode *ecryptfs_inode) goto out; } inode_lock(lower_inode); - size = lower_inode->i_op->getxattr(lower_dentry, ECRYPTFS_XATTR_NAME, + size = lower_inode->i_op->getxattr(lower_dentry, lower_inode, + ECRYPTFS_XATTR_NAME, xattr_virt, PAGE_SIZE); if (size < 0) size = 8; diff --git a/fs/ecryptfs/super.c b/fs/ecryptfs/super.c index 77a486d3a51b..85411ceb0508 100644 --- a/fs/ecryptfs/super.c +++ b/fs/ecryptfs/super.c @@ -55,7 +55,10 @@ static struct inode *ecryptfs_alloc_inode(struct super_block *sb) inode_info = kmem_cache_alloc(ecryptfs_inode_info_cache, GFP_KERNEL); if (unlikely(!inode_info)) goto out; - ecryptfs_init_crypt_stat(&inode_info->crypt_stat); + if (ecryptfs_init_crypt_stat(&inode_info->crypt_stat)) { + kmem_cache_free(ecryptfs_inode_info_cache, inode_info); + goto out; + } mutex_init(&inode_info->lower_file_mutex); atomic_set(&inode_info->lower_file_count, 0); inode_info->lower_file = NULL; diff --git a/fs/efivarfs/file.c b/fs/efivarfs/file.c index d48e0d261d78..5f22e74bbade 100644 --- a/fs/efivarfs/file.c +++ b/fs/efivarfs/file.c @@ -157,7 +157,7 @@ efivarfs_ioc_setxflags(struct file *file, void __user *arg) return 0; } -long +static long efivarfs_file_ioctl(struct file *file, unsigned int cmd, unsigned long p) { void __user *arg = (void __user *)p; diff --git a/fs/efivarfs/inode.c b/fs/efivarfs/inode.c index e2ab6d0497f2..1d73fc6dba13 100644 --- a/fs/efivarfs/inode.c +++ b/fs/efivarfs/inode.c @@ -11,6 +11,7 @@ #include <linux/fs.h> #include <linux/ctype.h> #include <linux/slab.h> +#include <linux/uuid.h> #include "internal.h" @@ -46,11 +47,7 @@ struct inode *efivarfs_get_inode(struct super_block *sb, */ bool efivarfs_valid_name(const char *str, int len) { - static const char dashes[EFI_VARIABLE_GUID_LEN] = { - [8] = 1, [13] = 1, [18] = 1, [23] = 1 - }; const char *s = str + len - EFI_VARIABLE_GUID_LEN; - int i; /* * We need a GUID, plus at least one letter for the variable name, @@ -68,37 +65,7 @@ bool efivarfs_valid_name(const char *str, int len) * * 12345678-1234-1234-1234-123456789abc */ - for (i = 0; i < EFI_VARIABLE_GUID_LEN; i++) { - if (dashes[i]) { - if (*s++ != '-') - return false; - } else { - if (!isxdigit(*s++)) - return false; - } - } - - return true; -} - -static void efivarfs_hex_to_guid(const char *str, efi_guid_t *guid) -{ - guid->b[0] = hex_to_bin(str[6]) << 4 | hex_to_bin(str[7]); - guid->b[1] = hex_to_bin(str[4]) << 4 | hex_to_bin(str[5]); - guid->b[2] = hex_to_bin(str[2]) << 4 | hex_to_bin(str[3]); - guid->b[3] = hex_to_bin(str[0]) << 4 | hex_to_bin(str[1]); - guid->b[4] = hex_to_bin(str[11]) << 4 | hex_to_bin(str[12]); - guid->b[5] = hex_to_bin(str[9]) << 4 | hex_to_bin(str[10]); - guid->b[6] = hex_to_bin(str[16]) << 4 | hex_to_bin(str[17]); - guid->b[7] = hex_to_bin(str[14]) << 4 | hex_to_bin(str[15]); - guid->b[8] = hex_to_bin(str[19]) << 4 | hex_to_bin(str[20]); - guid->b[9] = hex_to_bin(str[21]) << 4 | hex_to_bin(str[22]); - guid->b[10] = hex_to_bin(str[24]) << 4 | hex_to_bin(str[25]); - guid->b[11] = hex_to_bin(str[26]) << 4 | hex_to_bin(str[27]); - guid->b[12] = hex_to_bin(str[28]) << 4 | hex_to_bin(str[29]); - guid->b[13] = hex_to_bin(str[30]) << 4 | hex_to_bin(str[31]); - guid->b[14] = hex_to_bin(str[32]) << 4 | hex_to_bin(str[33]); - guid->b[15] = hex_to_bin(str[34]) << 4 | hex_to_bin(str[35]); + return uuid_is_valid(s); } static int efivarfs_create(struct inode *dir, struct dentry *dentry, @@ -119,8 +86,7 @@ static int efivarfs_create(struct inode *dir, struct dentry *dentry, /* length of the variable name itself: remove GUID and separator */ namelen = dentry->d_name.len - EFI_VARIABLE_GUID_LEN - 1; - efivarfs_hex_to_guid(dentry->d_name.name + namelen + 1, - &var->var.VendorGuid); + uuid_le_to_bin(dentry->d_name.name + namelen + 1, &var->var.VendorGuid); if (efivar_variable_is_removable(var->var.VendorGuid, dentry->d_name.name, namelen)) diff --git a/fs/efivarfs/super.c b/fs/efivarfs/super.c index 553c5d2db4a4..9cb54a38832d 100644 --- a/fs/efivarfs/super.c +++ b/fs/efivarfs/super.c @@ -216,8 +216,7 @@ static int efivarfs_fill_super(struct super_block *sb, void *data, int silent) INIT_LIST_HEAD(&efivarfs_list); - err = efivar_init(efivarfs_callback, (void *)sb, false, - true, &efivarfs_list); + err = efivar_init(efivarfs_callback, (void *)sb, true, &efivarfs_list); if (err) __efivar_entry_iter(efivarfs_destroy, &efivarfs_list, NULL, NULL); diff --git a/fs/efs/dir.c b/fs/efs/dir.c index ce63b24f7c3e..a7be96e5f1cb 100644 --- a/fs/efs/dir.c +++ b/fs/efs/dir.c @@ -12,7 +12,7 @@ static int efs_readdir(struct file *, struct dir_context *); const struct file_operations efs_dir_operations = { .llseek = generic_file_llseek, .read = generic_read_dir, - .iterate = efs_readdir, + .iterate_shared = efs_readdir, }; const struct inode_operations efs_dir_inode_operations = { @@ -100,4 +100,3 @@ static int efs_readdir(struct file *file, struct dir_context *ctx) ctx->pos = (block << EFS_DIRBSIZE_BITS) | slot; return 0; } - diff --git a/fs/efs/namei.c b/fs/efs/namei.c index 40ba9cc41bf7..d34a40edcdb2 100644 --- a/fs/efs/namei.c +++ b/fs/efs/namei.c @@ -113,7 +113,7 @@ struct dentry *efs_get_parent(struct dentry *child) ino = efs_find_entry(d_inode(child), "..", 2); if (ino) - parent = d_obtain_alias(efs_iget(d_inode(child)->i_sb, ino)); + parent = d_obtain_alias(efs_iget(child->d_sb, ino)); return parent; } diff --git a/fs/efs/super.c b/fs/efs/super.c index cb68dac4f9d3..368f7dd21c61 100644 --- a/fs/efs/super.c +++ b/fs/efs/super.c @@ -275,7 +275,7 @@ static int efs_fill_super(struct super_block *s, void *d, int silent) if (!bh) { pr_err("cannot read volume header\n"); - return -EINVAL; + return -EIO; } /* @@ -293,7 +293,7 @@ static int efs_fill_super(struct super_block *s, void *d, int silent) bh = sb_bread(s, sb->fs_start + EFS_SUPER); if (!bh) { pr_err("cannot read superblock\n"); - return -EINVAL; + return -EIO; } if (efs_validate_super(sb, (struct efs_super *) bh->b_data)) { diff --git a/fs/eventpoll.c b/fs/eventpoll.c index 8a74a2a52e0f..10db91218933 100644 --- a/fs/eventpoll.c +++ b/fs/eventpoll.c @@ -1583,15 +1583,15 @@ static int ep_send_events(struct eventpoll *ep, return ep_scan_ready_list(ep, ep_send_events_proc, &esed, 0, false); } -static inline struct timespec ep_set_mstimeout(long ms) +static inline struct timespec64 ep_set_mstimeout(long ms) { - struct timespec now, ts = { + struct timespec64 now, ts = { .tv_sec = ms / MSEC_PER_SEC, .tv_nsec = NSEC_PER_MSEC * (ms % MSEC_PER_SEC), }; - ktime_get_ts(&now); - return timespec_add_safe(now, ts); + ktime_get_ts64(&now); + return timespec64_add_safe(now, ts); } /** @@ -1621,11 +1621,11 @@ static int ep_poll(struct eventpoll *ep, struct epoll_event __user *events, ktime_t expires, *to = NULL; if (timeout > 0) { - struct timespec end_time = ep_set_mstimeout(timeout); + struct timespec64 end_time = ep_set_mstimeout(timeout); slack = select_estimate_accuracy(&end_time); to = &expires; - *to = timespec_to_ktime(end_time); + *to = timespec64_to_ktime(end_time); } else if (timeout == 0) { /* * Avoid the unnecessary trip to the wait queue loop, if the diff --git a/fs/exec.c b/fs/exec.c index c4010b8207a1..887c1c955df8 100644 --- a/fs/exec.c +++ b/fs/exec.c @@ -243,10 +243,6 @@ static void put_arg_page(struct page *page) put_page(page); } -static void free_arg_page(struct linux_binprm *bprm, int i) -{ -} - static void free_arg_pages(struct linux_binprm *bprm) { } @@ -267,7 +263,10 @@ static int __bprm_mm_init(struct linux_binprm *bprm) if (!vma) return -ENOMEM; - down_write(&mm->mmap_sem); + if (down_write_killable(&mm->mmap_sem)) { + err = -EINTR; + goto err_free; + } vma->vm_mm = mm; /* @@ -294,6 +293,7 @@ static int __bprm_mm_init(struct linux_binprm *bprm) return 0; err: up_write(&mm->mmap_sem); +err_free: bprm->vma = NULL; kmem_cache_free(vm_area_cachep, vma); return err; @@ -700,7 +700,9 @@ int setup_arg_pages(struct linux_binprm *bprm, bprm->loader -= stack_shift; bprm->exec -= stack_shift; - down_write(&mm->mmap_sem); + if (down_write_killable(&mm->mmap_sem)) + return -EINTR; + vm_flags = VM_STACK_FLAGS; /* @@ -850,15 +852,25 @@ int kernel_read_file(struct file *file, void **buf, loff_t *size, if (ret) return ret; + ret = deny_write_access(file); + if (ret) + return ret; + i_size = i_size_read(file_inode(file)); - if (max_size > 0 && i_size > max_size) - return -EFBIG; - if (i_size <= 0) - return -EINVAL; + if (max_size > 0 && i_size > max_size) { + ret = -EFBIG; + goto out; + } + if (i_size <= 0) { + ret = -EINVAL; + goto out; + } *buf = vmalloc(i_size); - if (!*buf) - return -ENOMEM; + if (!*buf) { + ret = -ENOMEM; + goto out; + } pos = 0; while (pos < i_size) { @@ -876,18 +888,21 @@ int kernel_read_file(struct file *file, void **buf, loff_t *size, if (pos != i_size) { ret = -EIO; - goto out; + goto out_free; } ret = security_kernel_post_read_file(file, *buf, i_size, id); if (!ret) *size = pos; -out: +out_free: if (ret < 0) { vfree(*buf); *buf = NULL; } + +out: + allow_write_access(file); return ret; } EXPORT_SYMBOL_GPL(kernel_read_file); @@ -1387,7 +1402,12 @@ static void bprm_fill_uid(struct linux_binprm *bprm) kuid_t uid; kgid_t gid; - /* clear any previous set[ug]id data from a previous binary */ + /* + * Since this can be called multiple times (via prepare_binprm), + * we must clear any previous work done when setting set[ug]id + * bits from any earlier bprm->file uses (for example when run + * first for a setuid script then again for its interpreter). + */ bprm->cred->euid = current_euid(); bprm->cred->egid = current_egid(); @@ -1481,9 +1501,6 @@ int remove_arg_zero(struct linux_binprm *bprm) kunmap_atomic(kaddr); put_arg_page(page); - - if (offset == PAGE_SIZE) - free_arg_page(bprm, (bprm->p >> PAGE_SHIFT) - 1); } while (offset == PAGE_SIZE); bprm->p++; diff --git a/fs/exofs/dir.c b/fs/exofs/dir.c index 547b93cbea63..f69a1b5826a5 100644 --- a/fs/exofs/dir.c +++ b/fs/exofs/dir.c @@ -79,7 +79,7 @@ static int exofs_commit_chunk(struct page *page, loff_t pos, unsigned len) return err; } -static void exofs_check_page(struct page *page) +static bool exofs_check_page(struct page *page) { struct inode *dir = page->mapping->host; unsigned chunk_size = exofs_chunk_size(dir); @@ -114,7 +114,7 @@ static void exofs_check_page(struct page *page) goto Eend; out: SetPageChecked(page); - return; + return true; Ebadsize: EXOFS_ERR("ERROR [exofs_check_page]: " @@ -150,8 +150,8 @@ Eend: dir->i_ino, (page->index<<PAGE_SHIFT)+offs, _LLU(le64_to_cpu(p->inode_no))); fail: - SetPageChecked(page); SetPageError(page); + return false; } static struct page *exofs_get_page(struct inode *dir, unsigned long n) @@ -161,10 +161,10 @@ static struct page *exofs_get_page(struct inode *dir, unsigned long n) if (!IS_ERR(page)) { kmap(page); - if (!PageChecked(page)) - exofs_check_page(page); - if (PageError(page)) - goto fail; + if (unlikely(!PageChecked(page))) { + if (PageError(page) || !exofs_check_page(page)) + goto fail; + } } return page; @@ -657,5 +657,5 @@ not_empty: const struct file_operations exofs_dir_operations = { .llseek = generic_file_llseek, .read = generic_read_dir, - .iterate = exofs_readdir, + .iterate_shared = exofs_readdir, }; diff --git a/fs/exofs/inode.c b/fs/exofs/inode.c index 49e1bd00b4ec..9dc4c6dbf3c9 100644 --- a/fs/exofs/inode.c +++ b/fs/exofs/inode.c @@ -960,8 +960,7 @@ static void exofs_invalidatepage(struct page *page, unsigned int offset, /* TODO: Should be easy enough to do proprly */ -static ssize_t exofs_direct_IO(struct kiocb *iocb, struct iov_iter *iter, - loff_t offset) +static ssize_t exofs_direct_IO(struct kiocb *iocb, struct iov_iter *iter) { return 0; } diff --git a/fs/exofs/super.c b/fs/exofs/super.c index 6658a50530a0..1076a4233b39 100644 --- a/fs/exofs/super.c +++ b/fs/exofs/super.c @@ -122,7 +122,7 @@ static int parse_options(char *options, struct exofs_mountopt *opts) if (match_int(&args[0], &option)) return -EINVAL; if (option <= 0) { - EXOFS_ERR("Timout must be > 0"); + EXOFS_ERR("Timeout must be > 0"); return -EINVAL; } opts->timeout = option * HZ; @@ -958,7 +958,7 @@ static struct dentry *exofs_get_parent(struct dentry *child) if (!ino) return ERR_PTR(-ESTALE); - return d_obtain_alias(exofs_iget(d_inode(child)->i_sb, ino)); + return d_obtain_alias(exofs_iget(child->d_sb, ino)); } static struct inode *exofs_nfs_get_inode(struct super_block *sb, diff --git a/fs/exportfs/expfs.c b/fs/exportfs/expfs.c index c46f1a190b8d..207ba8d627ca 100644 --- a/fs/exportfs/expfs.c +++ b/fs/exportfs/expfs.c @@ -143,14 +143,18 @@ static struct dentry *reconnect_one(struct vfsmount *mnt, if (err) goto out_err; dprintk("%s: found name: %s\n", __func__, nbuf); - inode_lock(parent->d_inode); - tmp = lookup_one_len(nbuf, parent, strlen(nbuf)); - inode_unlock(parent->d_inode); + tmp = lookup_one_len_unlocked(nbuf, parent, strlen(nbuf)); if (IS_ERR(tmp)) { dprintk("%s: lookup failed: %d\n", __func__, PTR_ERR(tmp)); goto out_err; } if (tmp != dentry) { + /* + * Somebody has renamed it since exportfs_get_name(); + * great, since it could've only been renamed if it + * got looked up and thus connected, and it would + * remain connected afterwards. We are done. + */ dput(tmp); goto out_reconnected; } @@ -308,7 +312,7 @@ static int get_name(const struct path *path, char *name, struct dentry *child) goto out; error = -EINVAL; - if (!file->f_op->iterate) + if (!file->f_op->iterate && !file->f_op->iterate_shared) goto out_close; buffer.sequence = 0; diff --git a/fs/ext2/acl.c b/fs/ext2/acl.c index 27695e6f4e46..42f1d1814083 100644 --- a/fs/ext2/acl.c +++ b/fs/ext2/acl.c @@ -172,9 +172,6 @@ ext2_get_acl(struct inode *inode, int type) acl = ERR_PTR(retval); kfree(value); - if (!IS_ERR(acl)) - set_cached_acl(inode, type, acl); - return acl; } diff --git a/fs/ext2/dir.c b/fs/ext2/dir.c index 7ff6fcfa685d..19efd1197fa5 100644 --- a/fs/ext2/dir.c +++ b/fs/ext2/dir.c @@ -110,7 +110,7 @@ static int ext2_commit_chunk(struct page *page, loff_t pos, unsigned len) return err; } -static void ext2_check_page(struct page *page, int quiet) +static bool ext2_check_page(struct page *page, int quiet) { struct inode *dir = page->mapping->host; struct super_block *sb = dir->i_sb; @@ -148,7 +148,7 @@ static void ext2_check_page(struct page *page, int quiet) goto Eend; out: SetPageChecked(page); - return; + return true; /* Too bad, we had an error */ @@ -190,8 +190,8 @@ Eend: (unsigned long) le32_to_cpu(p->inode)); } fail: - SetPageChecked(page); SetPageError(page); + return false; } static struct page * ext2_get_page(struct inode *dir, unsigned long n, @@ -201,10 +201,10 @@ static struct page * ext2_get_page(struct inode *dir, unsigned long n, struct page *page = read_mapping_page(mapping, n, NULL); if (!IS_ERR(page)) { kmap(page); - if (!PageChecked(page)) - ext2_check_page(page, quiet); - if (PageError(page)) - goto fail; + if (unlikely(!PageChecked(page))) { + if (PageError(page) || !ext2_check_page(page, quiet)) + goto fail; + } } return page; @@ -716,7 +716,7 @@ not_empty: const struct file_operations ext2_dir_operations = { .llseek = generic_file_llseek, .read = generic_read_dir, - .iterate = ext2_readdir, + .iterate_shared = ext2_readdir, .unlocked_ioctl = ext2_ioctl, #ifdef CONFIG_COMPAT .compat_ioctl = ext2_compat_ioctl, diff --git a/fs/ext2/inode.c b/fs/ext2/inode.c index 6bd58e6ff038..b675610391b8 100644 --- a/fs/ext2/inode.c +++ b/fs/ext2/inode.c @@ -854,20 +854,20 @@ static sector_t ext2_bmap(struct address_space *mapping, sector_t block) } static ssize_t -ext2_direct_IO(struct kiocb *iocb, struct iov_iter *iter, loff_t offset) +ext2_direct_IO(struct kiocb *iocb, struct iov_iter *iter) { struct file *file = iocb->ki_filp; struct address_space *mapping = file->f_mapping; struct inode *inode = mapping->host; size_t count = iov_iter_count(iter); + loff_t offset = iocb->ki_pos; ssize_t ret; if (IS_DAX(inode)) - ret = dax_do_io(iocb, inode, iter, offset, ext2_get_block, NULL, + ret = dax_do_io(iocb, inode, iter, ext2_get_block, NULL, DIO_LOCKING); else - ret = blockdev_direct_IO(iocb, inode, iter, offset, - ext2_get_block); + ret = blockdev_direct_IO(iocb, inode, iter, ext2_get_block); if (ret < 0 && iov_iter_rw(iter) == WRITE) ext2_write_failed(mapping, offset + count); return ret; diff --git a/fs/ext2/namei.c b/fs/ext2/namei.c index d34843925b23..d446203127fc 100644 --- a/fs/ext2/namei.c +++ b/fs/ext2/namei.c @@ -82,7 +82,7 @@ struct dentry *ext2_get_parent(struct dentry *child) unsigned long ino = ext2_inode_by_name(d_inode(child), &dotdot); if (!ino) return ERR_PTR(-ENOENT); - return d_obtain_alias(ext2_iget(d_inode(child)->i_sb, ino)); + return d_obtain_alias(ext2_iget(child->d_sb, ino)); } /* diff --git a/fs/ext2/xattr_security.c b/fs/ext2/xattr_security.c index ba97f243b050..7fd3b867ce65 100644 --- a/fs/ext2/xattr_security.c +++ b/fs/ext2/xattr_security.c @@ -9,10 +9,10 @@ static int ext2_xattr_security_get(const struct xattr_handler *handler, - struct dentry *dentry, const char *name, - void *buffer, size_t size) + struct dentry *unused, struct inode *inode, + const char *name, void *buffer, size_t size) { - return ext2_xattr_get(d_inode(dentry), EXT2_XATTR_INDEX_SECURITY, name, + return ext2_xattr_get(inode, EXT2_XATTR_INDEX_SECURITY, name, buffer, size); } diff --git a/fs/ext2/xattr_trusted.c b/fs/ext2/xattr_trusted.c index 2c94d1930626..0f85705ff519 100644 --- a/fs/ext2/xattr_trusted.c +++ b/fs/ext2/xattr_trusted.c @@ -16,10 +16,10 @@ ext2_xattr_trusted_list(struct dentry *dentry) static int ext2_xattr_trusted_get(const struct xattr_handler *handler, - struct dentry *dentry, const char *name, - void *buffer, size_t size) + struct dentry *unused, struct inode *inode, + const char *name, void *buffer, size_t size) { - return ext2_xattr_get(d_inode(dentry), EXT2_XATTR_INDEX_TRUSTED, name, + return ext2_xattr_get(inode, EXT2_XATTR_INDEX_TRUSTED, name, buffer, size); } diff --git a/fs/ext2/xattr_user.c b/fs/ext2/xattr_user.c index 72a2a96d677f..1fafd27037cc 100644 --- a/fs/ext2/xattr_user.c +++ b/fs/ext2/xattr_user.c @@ -18,12 +18,12 @@ ext2_xattr_user_list(struct dentry *dentry) static int ext2_xattr_user_get(const struct xattr_handler *handler, - struct dentry *dentry, const char *name, - void *buffer, size_t size) + struct dentry *unused, struct inode *inode, + const char *name, void *buffer, size_t size) { - if (!test_opt(dentry->d_sb, XATTR_USER)) + if (!test_opt(inode->i_sb, XATTR_USER)) return -EOPNOTSUPP; - return ext2_xattr_get(d_inode(dentry), EXT2_XATTR_INDEX_USER, + return ext2_xattr_get(inode, EXT2_XATTR_INDEX_USER, name, buffer, size); } diff --git a/fs/ext4/acl.c b/fs/ext4/acl.c index 69b1e73026a5..c6601a476c02 100644 --- a/fs/ext4/acl.c +++ b/fs/ext4/acl.c @@ -172,9 +172,6 @@ ext4_get_acl(struct inode *inode, int type) acl = ERR_PTR(retval); kfree(value); - if (!IS_ERR(acl)) - set_cached_acl(inode, type, acl); - return acl; } diff --git a/fs/ext4/dir.c b/fs/ext4/dir.c index 4173bfe21114..68323e3da3fa 100644 --- a/fs/ext4/dir.c +++ b/fs/ext4/dir.c @@ -271,7 +271,7 @@ static int ext4_readdir(struct file *file, struct dir_context *ctx) ctx->pos += ext4_rec_len_from_disk(de->rec_len, sb->s_blocksize); } - if ((ctx->pos < inode->i_size) && !dir_relax(inode)) + if ((ctx->pos < inode->i_size) && !dir_relax_shared(inode)) goto done; brelse(bh); bh = NULL; @@ -649,7 +649,7 @@ int ext4_check_all_de(struct inode *dir, struct buffer_head *bh, void *buf, const struct file_operations ext4_dir_operations = { .llseek = ext4_dir_llseek, .read = generic_read_dir, - .iterate = ext4_readdir, + .iterate_shared = ext4_readdir, .unlocked_ioctl = ext4_ioctl, #ifdef CONFIG_COMPAT .compat_ioctl = ext4_compat_ioctl, diff --git a/fs/ext4/file.c b/fs/ext4/file.c index 37e28082885a..d478110c32a6 100644 --- a/fs/ext4/file.c +++ b/fs/ext4/file.c @@ -169,13 +169,8 @@ ext4_file_write_iter(struct kiocb *iocb, struct iov_iter *from) ret = __generic_file_write_iter(iocb, from); inode_unlock(inode); - if (ret > 0) { - ssize_t err; - - err = generic_write_sync(file, iocb->ki_pos - ret, ret); - if (err < 0) - ret = err; - } + if (ret > 0) + ret = generic_write_sync(iocb, ret); if (o_direct) blk_finish_plug(&plug); diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c index f9ab1e8cc416..f7140ca66e3b 100644 --- a/fs/ext4/inode.c +++ b/fs/ext4/inode.c @@ -3327,13 +3327,13 @@ static int ext4_end_io_dio(struct kiocb *iocb, loff_t offset, * if the machine crashes during the write. * */ -static ssize_t ext4_direct_IO_write(struct kiocb *iocb, struct iov_iter *iter, - loff_t offset) +static ssize_t ext4_direct_IO_write(struct kiocb *iocb, struct iov_iter *iter) { struct file *file = iocb->ki_filp; struct inode *inode = file->f_mapping->host; struct ext4_inode_info *ei = EXT4_I(inode); ssize_t ret; + loff_t offset = iocb->ki_pos; size_t count = iov_iter_count(iter); int overwrite = 0; get_block_t *get_block_func = NULL; @@ -3424,11 +3424,11 @@ static ssize_t ext4_direct_IO_write(struct kiocb *iocb, struct iov_iter *iter, BUG_ON(ext4_encrypted_inode(inode) && S_ISREG(inode->i_mode)); #endif if (IS_DAX(inode)) { - ret = dax_do_io(iocb, inode, iter, offset, get_block_func, + ret = dax_do_io(iocb, inode, iter, get_block_func, ext4_end_io_dio, dio_flags); } else ret = __blockdev_direct_IO(iocb, inode, - inode->i_sb->s_bdev, iter, offset, + inode->i_sb->s_bdev, iter, get_block_func, ext4_end_io_dio, NULL, dio_flags); @@ -3495,8 +3495,7 @@ out: return ret; } -static ssize_t ext4_direct_IO_read(struct kiocb *iocb, struct iov_iter *iter, - loff_t offset) +static ssize_t ext4_direct_IO_read(struct kiocb *iocb, struct iov_iter *iter) { int unlocked = 0; struct inode *inode = iocb->ki_filp->f_mapping->host; @@ -3517,11 +3516,11 @@ static ssize_t ext4_direct_IO_read(struct kiocb *iocb, struct iov_iter *iter, unlocked = 1; } if (IS_DAX(inode)) { - ret = dax_do_io(iocb, inode, iter, offset, ext4_dio_get_block, + ret = dax_do_io(iocb, inode, iter, ext4_dio_get_block, NULL, unlocked ? 0 : DIO_LOCKING); } else { ret = __blockdev_direct_IO(iocb, inode, inode->i_sb->s_bdev, - iter, offset, ext4_dio_get_block, + iter, ext4_dio_get_block, NULL, NULL, unlocked ? 0 : DIO_LOCKING); } @@ -3530,12 +3529,12 @@ static ssize_t ext4_direct_IO_read(struct kiocb *iocb, struct iov_iter *iter, return ret; } -static ssize_t ext4_direct_IO(struct kiocb *iocb, struct iov_iter *iter, - loff_t offset) +static ssize_t ext4_direct_IO(struct kiocb *iocb, struct iov_iter *iter) { struct file *file = iocb->ki_filp; struct inode *inode = file->f_mapping->host; size_t count = iov_iter_count(iter); + loff_t offset = iocb->ki_pos; ssize_t ret; #ifdef CONFIG_EXT4_FS_ENCRYPTION @@ -3555,9 +3554,9 @@ static ssize_t ext4_direct_IO(struct kiocb *iocb, struct iov_iter *iter, trace_ext4_direct_IO_enter(inode, offset, count, iov_iter_rw(iter)); if (iov_iter_rw(iter) == READ) - ret = ext4_direct_IO_read(iocb, iter, offset); + ret = ext4_direct_IO_read(iocb, iter); else - ret = ext4_direct_IO_write(iocb, iter, offset); + ret = ext4_direct_IO_write(iocb, iter); trace_ext4_direct_IO_exit(inode, offset, count, iov_iter_rw(iter), ret); return ret; } diff --git a/fs/ext4/ioctl.c b/fs/ext4/ioctl.c index 0acf8cacb2be..28cc412852af 100644 --- a/fs/ext4/ioctl.c +++ b/fs/ext4/ioctl.c @@ -13,8 +13,8 @@ #include <linux/compat.h> #include <linux/mount.h> #include <linux/file.h> -#include <linux/random.h> #include <linux/quotaops.h> +#include <linux/uuid.h> #include <asm/uaccess.h> #include "ext4_jbd2.h" #include "ext4.h" diff --git a/fs/ext4/namei.c b/fs/ext4/namei.c index 672c99a2051b..ec4c39952e84 100644 --- a/fs/ext4/namei.c +++ b/fs/ext4/namei.c @@ -1643,13 +1643,13 @@ struct dentry *ext4_get_parent(struct dentry *child) ino = le32_to_cpu(de->inode); brelse(bh); - if (!ext4_valid_inum(d_inode(child)->i_sb, ino)) { + if (!ext4_valid_inum(child->d_sb, ino)) { EXT4_ERROR_INODE(d_inode(child), "bad parent inode number: %u", ino); return ERR_PTR(-EFSCORRUPTED); } - return d_obtain_alias(ext4_iget_normal(d_inode(child)->i_sb, ino)); + return d_obtain_alias(ext4_iget_normal(child->d_sb, ino)); } /* diff --git a/fs/ext4/xattr_security.c b/fs/ext4/xattr_security.c index 3e81bdca071a..123a7d010efe 100644 --- a/fs/ext4/xattr_security.c +++ b/fs/ext4/xattr_security.c @@ -13,10 +13,10 @@ static int ext4_xattr_security_get(const struct xattr_handler *handler, - struct dentry *dentry, const char *name, - void *buffer, size_t size) + struct dentry *unused, struct inode *inode, + const char *name, void *buffer, size_t size) { - return ext4_xattr_get(d_inode(dentry), EXT4_XATTR_INDEX_SECURITY, + return ext4_xattr_get(inode, EXT4_XATTR_INDEX_SECURITY, name, buffer, size); } diff --git a/fs/ext4/xattr_trusted.c b/fs/ext4/xattr_trusted.c index 2a3c6f9b8cb8..60652fa24cbc 100644 --- a/fs/ext4/xattr_trusted.c +++ b/fs/ext4/xattr_trusted.c @@ -20,10 +20,10 @@ ext4_xattr_trusted_list(struct dentry *dentry) static int ext4_xattr_trusted_get(const struct xattr_handler *handler, - struct dentry *dentry, const char *name, void *buffer, - size_t size) + struct dentry *unused, struct inode *inode, + const char *name, void *buffer, size_t size) { - return ext4_xattr_get(d_inode(dentry), EXT4_XATTR_INDEX_TRUSTED, + return ext4_xattr_get(inode, EXT4_XATTR_INDEX_TRUSTED, name, buffer, size); } diff --git a/fs/ext4/xattr_user.c b/fs/ext4/xattr_user.c index d152f431e432..17a446ffecd3 100644 --- a/fs/ext4/xattr_user.c +++ b/fs/ext4/xattr_user.c @@ -19,12 +19,12 @@ ext4_xattr_user_list(struct dentry *dentry) static int ext4_xattr_user_get(const struct xattr_handler *handler, - struct dentry *dentry, const char *name, - void *buffer, size_t size) + struct dentry *unused, struct inode *inode, + const char *name, void *buffer, size_t size) { - if (!test_opt(dentry->d_sb, XATTR_USER)) + if (!test_opt(inode->i_sb, XATTR_USER)) return -EOPNOTSUPP; - return ext4_xattr_get(d_inode(dentry), EXT4_XATTR_INDEX_USER, + return ext4_xattr_get(inode, EXT4_XATTR_INDEX_USER, name, buffer, size); } diff --git a/fs/f2fs/Kconfig b/fs/f2fs/Kconfig index 1f8982a957f1..378c221d68a9 100644 --- a/fs/f2fs/Kconfig +++ b/fs/f2fs/Kconfig @@ -94,3 +94,11 @@ config F2FS_IO_TRACE information and block IO patterns in the filesystem level. If unsure, say N. + +config F2FS_FAULT_INJECTION + bool "F2FS fault injection facility" + depends on F2FS_FS + help + Test F2FS to inject faults such as ENOMEM, ENOSPC, and so on. + + If unsure, say N. diff --git a/fs/f2fs/acl.c b/fs/f2fs/acl.c index c8f25f7241f0..a31c7e859af6 100644 --- a/fs/f2fs/acl.c +++ b/fs/f2fs/acl.c @@ -115,7 +115,7 @@ static void *f2fs_acl_to_disk(const struct posix_acl *acl, size_t *size) struct f2fs_acl_entry *entry; int i; - f2fs_acl = kmalloc(sizeof(struct f2fs_acl_header) + acl->a_count * + f2fs_acl = f2fs_kmalloc(sizeof(struct f2fs_acl_header) + acl->a_count * sizeof(struct f2fs_acl_entry), GFP_NOFS); if (!f2fs_acl) return ERR_PTR(-ENOMEM); @@ -175,7 +175,7 @@ static struct posix_acl *__f2fs_get_acl(struct inode *inode, int type, retval = f2fs_getxattr(inode, name_index, "", NULL, 0, dpage); if (retval > 0) { - value = kmalloc(retval, GFP_F2FS_ZERO); + value = f2fs_kmalloc(retval, GFP_F2FS_ZERO); if (!value) return ERR_PTR(-ENOMEM); retval = f2fs_getxattr(inode, name_index, "", value, @@ -190,9 +190,6 @@ static struct posix_acl *__f2fs_get_acl(struct inode *inode, int type, acl = ERR_PTR(retval); kfree(value); - if (!IS_ERR(acl)) - set_cached_acl(inode, type, acl); - return acl; } diff --git a/fs/f2fs/checkpoint.c b/fs/f2fs/checkpoint.c index 0955312e5ca0..389160049993 100644 --- a/fs/f2fs/checkpoint.c +++ b/fs/f2fs/checkpoint.c @@ -26,6 +26,14 @@ static struct kmem_cache *ino_entry_slab; struct kmem_cache *inode_entry_slab; +void f2fs_stop_checkpoint(struct f2fs_sb_info *sbi, bool end_io) +{ + set_ckpt_flags(sbi->ckpt, CP_ERROR_FLAG); + sbi->sb->s_flags |= MS_RDONLY; + if (!end_io) + f2fs_flush_merged_bios(sbi); +} + /* * We guarantee no failure on the returned page. */ @@ -34,7 +42,7 @@ struct page *grab_meta_page(struct f2fs_sb_info *sbi, pgoff_t index) struct address_space *mapping = META_MAPPING(sbi); struct page *page = NULL; repeat: - page = grab_cache_page(mapping, index); + page = f2fs_grab_cache_page(mapping, index, false); if (!page) { cond_resched(); goto repeat; @@ -64,7 +72,7 @@ static struct page *__get_meta_page(struct f2fs_sb_info *sbi, pgoff_t index, if (unlikely(!is_meta)) fio.rw &= ~REQ_META; repeat: - page = grab_cache_page(mapping, index); + page = f2fs_grab_cache_page(mapping, index, false); if (!page) { cond_resched(); goto repeat; @@ -91,7 +99,7 @@ repeat: * meta page. */ if (unlikely(!PageUptodate(page))) - f2fs_stop_checkpoint(sbi); + f2fs_stop_checkpoint(sbi, false); out: return page; } @@ -186,7 +194,8 @@ int ra_meta_pages(struct f2fs_sb_info *sbi, block_t start, int nrpages, BUG(); } - page = grab_cache_page(META_MAPPING(sbi), fio.new_blkaddr); + page = f2fs_grab_cache_page(META_MAPPING(sbi), + fio.new_blkaddr, false); if (!page) continue; if (PageUptodate(page)) { @@ -211,7 +220,7 @@ void ra_meta_pages_cond(struct f2fs_sb_info *sbi, pgoff_t index) bool readahead = false; page = find_get_page(META_MAPPING(sbi), index); - if (!page || (page && !PageUptodate(page))) + if (!page || !PageUptodate(page)) readahead = true; f2fs_put_page(page, 0); @@ -448,12 +457,12 @@ bool exist_written_data(struct f2fs_sb_info *sbi, nid_t ino, int mode) return e ? true : false; } -void release_ino_entry(struct f2fs_sb_info *sbi) +void release_ino_entry(struct f2fs_sb_info *sbi, bool all) { struct ino_entry *e, *tmp; int i; - for (i = APPEND_INO; i <= UPDATE_INO; i++) { + for (i = all ? ORPHAN_INO: APPEND_INO; i <= UPDATE_INO; i++) { struct inode_management *im = &sbi->im[i]; spin_lock(&im->ino_lock); @@ -473,6 +482,13 @@ int acquire_orphan_inode(struct f2fs_sb_info *sbi) int err = 0; spin_lock(&im->ino_lock); + +#ifdef CONFIG_F2FS_FAULT_INJECTION + if (time_to_inject(FAULT_ORPHAN)) { + spin_unlock(&im->ino_lock); + return -ENOSPC; + } +#endif if (unlikely(im->ino_num >= sbi->max_orphans)) err = -ENOSPC; else @@ -777,43 +793,32 @@ void update_dirty_page(struct inode *inode, struct page *page) !S_ISLNK(inode->i_mode)) return; - spin_lock(&sbi->inode_lock[type]); - __add_dirty_inode(inode, type); - inode_inc_dirty_pages(inode); - spin_unlock(&sbi->inode_lock[type]); + if (type != FILE_INODE || test_opt(sbi, DATA_FLUSH)) { + spin_lock(&sbi->inode_lock[type]); + __add_dirty_inode(inode, type); + spin_unlock(&sbi->inode_lock[type]); + } + inode_inc_dirty_pages(inode); SetPagePrivate(page); f2fs_trace_pid(page); } -void add_dirty_dir_inode(struct inode *inode) -{ - struct f2fs_sb_info *sbi = F2FS_I_SB(inode); - - spin_lock(&sbi->inode_lock[DIR_INODE]); - __add_dirty_inode(inode, DIR_INODE); - spin_unlock(&sbi->inode_lock[DIR_INODE]); -} - void remove_dirty_inode(struct inode *inode) { struct f2fs_sb_info *sbi = F2FS_I_SB(inode); - struct f2fs_inode_info *fi = F2FS_I(inode); enum inode_type type = S_ISDIR(inode->i_mode) ? DIR_INODE : FILE_INODE; if (!S_ISDIR(inode->i_mode) && !S_ISREG(inode->i_mode) && !S_ISLNK(inode->i_mode)) return; + if (type == FILE_INODE && !test_opt(sbi, DATA_FLUSH)) + return; + spin_lock(&sbi->inode_lock[type]); __remove_dirty_inode(inode, type); spin_unlock(&sbi->inode_lock[type]); - - /* Only from the recovery routine */ - if (is_inode_flag_set(fi, FI_DELAY_IPUT)) { - clear_inode_flag(fi, FI_DELAY_IPUT); - iput(inode); - } } int sync_dirty_inodes(struct f2fs_sb_info *sbi, enum inode_type type) @@ -892,7 +897,7 @@ retry_flush_nodes: if (get_pages(sbi, F2FS_DIRTY_NODES)) { up_write(&sbi->node_write); - err = sync_node_pages(sbi, 0, &wbc); + err = sync_node_pages(sbi, &wbc); if (err) { f2fs_unlock_all(sbi); goto out; @@ -917,7 +922,7 @@ static void wait_on_all_pages_writeback(struct f2fs_sb_info *sbi) for (;;) { prepare_to_wait(&sbi->cp_wait, &wait, TASK_UNINTERRUPTIBLE); - if (!get_pages(sbi, F2FS_WRITEBACK)) + if (!atomic_read(&sbi->nr_wb_bios)) break; io_schedule_timeout(5*HZ); @@ -1082,7 +1087,7 @@ static int do_checkpoint(struct f2fs_sb_info *sbi, struct cp_control *cpc) /* update user_block_counts */ sbi->last_valid_block_count = sbi->total_valid_block_count; - sbi->alloc_valid_block_count = 0; + percpu_counter_set(&sbi->alloc_valid_block_count, 0); /* Here, we only have one bio having CP pack */ sync_meta_pages(sbi, META_FLUSH, LONG_MAX); @@ -1098,7 +1103,7 @@ static int do_checkpoint(struct f2fs_sb_info *sbi, struct cp_control *cpc) invalidate_mapping_pages(META_MAPPING(sbi), discard_blk, discard_blk); - release_ino_entry(sbi); + release_ino_entry(sbi, false); if (unlikely(f2fs_cp_error(sbi))) return -EIO; diff --git a/fs/f2fs/data.c b/fs/f2fs/data.c index 5dafb9cef12e..9a8bbc1fb1fa 100644 --- a/fs/f2fs/data.c +++ b/fs/f2fs/data.c @@ -68,13 +68,12 @@ static void f2fs_write_end_io(struct bio *bio) if (unlikely(bio->bi_error)) { set_bit(AS_EIO, &page->mapping->flags); - f2fs_stop_checkpoint(sbi); + f2fs_stop_checkpoint(sbi, true); } end_page_writeback(page); - dec_page_count(sbi, F2FS_WRITEBACK); } - - if (!get_pages(sbi, F2FS_WRITEBACK) && wq_has_sleeper(&sbi->cp_wait)) + if (atomic_dec_and_test(&sbi->nr_wb_bios) && + wq_has_sleeper(&sbi->cp_wait)) wake_up(&sbi->cp_wait); bio_put(bio); @@ -98,6 +97,14 @@ static struct bio *__bio_alloc(struct f2fs_sb_info *sbi, block_t blk_addr, return bio; } +static inline void __submit_bio(struct f2fs_sb_info *sbi, int rw, + struct bio *bio) +{ + if (!is_read_io(rw)) + atomic_inc(&sbi->nr_wb_bios); + submit_bio(rw, bio); +} + static void __submit_merged_bio(struct f2fs_bio_info *io) { struct f2fs_io_info *fio = &io->fio; @@ -110,7 +117,7 @@ static void __submit_merged_bio(struct f2fs_bio_info *io) else trace_f2fs_submit_write_bio(io->sbi->sb, fio, io->bio); - submit_bio(fio->rw, io->bio); + __submit_bio(io->sbi, fio->rw, io->bio); io->bio = NULL; } @@ -228,7 +235,7 @@ int f2fs_submit_page_bio(struct f2fs_io_info *fio) return -EFAULT; } - submit_bio(fio->rw, bio); + __submit_bio(fio->sbi, fio->rw, bio); return 0; } @@ -248,9 +255,6 @@ void f2fs_submit_page_mbio(struct f2fs_io_info *fio) down_write(&io->io_rwsem); - if (!is_read) - inc_page_count(sbi, F2FS_WRITEBACK); - if (io->bio && (io->last_block_in_bio != fio->new_blkaddr - 1 || io->fio.rw != fio->rw)) __submit_merged_bio(io); @@ -278,6 +282,16 @@ alloc_new: trace_f2fs_submit_page_mbio(fio->page, fio); } +static void __set_data_blkaddr(struct dnode_of_data *dn) +{ + struct f2fs_node *rn = F2FS_NODE(dn->node_page); + __le32 *addr_array; + + /* Get physical address of data block */ + addr_array = blkaddr_in_node(rn); + addr_array[dn->ofs_in_node] = cpu_to_le32(dn->data_blkaddr); +} + /* * Lock ordering for the change of data block address: * ->data_page @@ -286,19 +300,9 @@ alloc_new: */ void set_data_blkaddr(struct dnode_of_data *dn) { - struct f2fs_node *rn; - __le32 *addr_array; - struct page *node_page = dn->node_page; - unsigned int ofs_in_node = dn->ofs_in_node; - - f2fs_wait_on_page_writeback(node_page, NODE, true); - - rn = F2FS_NODE(node_page); - - /* Get physical address of data block */ - addr_array = blkaddr_in_node(rn); - addr_array[ofs_in_node] = cpu_to_le32(dn->data_blkaddr); - if (set_page_dirty(node_page)) + f2fs_wait_on_page_writeback(dn->node_page, NODE, true); + __set_data_blkaddr(dn); + if (set_page_dirty(dn->node_page)) dn->node_changed = true; } @@ -309,24 +313,53 @@ void f2fs_update_data_blkaddr(struct dnode_of_data *dn, block_t blkaddr) f2fs_update_extent_cache(dn); } -int reserve_new_block(struct dnode_of_data *dn) +/* dn->ofs_in_node will be returned with up-to-date last block pointer */ +int reserve_new_blocks(struct dnode_of_data *dn, blkcnt_t count) { struct f2fs_sb_info *sbi = F2FS_I_SB(dn->inode); + if (!count) + return 0; + if (unlikely(is_inode_flag_set(F2FS_I(dn->inode), FI_NO_ALLOC))) return -EPERM; - if (unlikely(!inc_valid_block_count(sbi, dn->inode, 1))) + if (unlikely(!inc_valid_block_count(sbi, dn->inode, &count))) return -ENOSPC; - trace_f2fs_reserve_new_block(dn->inode, dn->nid, dn->ofs_in_node); + trace_f2fs_reserve_new_blocks(dn->inode, dn->nid, + dn->ofs_in_node, count); + + f2fs_wait_on_page_writeback(dn->node_page, NODE, true); + + for (; count > 0; dn->ofs_in_node++) { + block_t blkaddr = + datablock_addr(dn->node_page, dn->ofs_in_node); + if (blkaddr == NULL_ADDR) { + dn->data_blkaddr = NEW_ADDR; + __set_data_blkaddr(dn); + count--; + } + } + + if (set_page_dirty(dn->node_page)) + dn->node_changed = true; - dn->data_blkaddr = NEW_ADDR; - set_data_blkaddr(dn); mark_inode_dirty(dn->inode); sync_inode_page(dn); return 0; } +/* Should keep dn->ofs_in_node unchanged */ +int reserve_new_block(struct dnode_of_data *dn) +{ + unsigned int ofs_in_node = dn->ofs_in_node; + int ret; + + ret = reserve_new_blocks(dn, 1); + dn->ofs_in_node = ofs_in_node; + return ret; +} + int f2fs_reserve_block(struct dnode_of_data *dn, pgoff_t index) { bool need_put = dn->inode_page ? false : true; @@ -545,6 +578,7 @@ static int __allocate_data_block(struct dnode_of_data *dn) struct node_info ni; int seg = CURSEG_WARM_DATA; pgoff_t fofs; + blkcnt_t count = 1; if (unlikely(is_inode_flag_set(F2FS_I(dn->inode), FI_NO_ALLOC))) return -EPERM; @@ -553,7 +587,7 @@ static int __allocate_data_block(struct dnode_of_data *dn) if (dn->data_blkaddr == NEW_ADDR) goto alloc; - if (unlikely(!inc_valid_block_count(sbi, dn->inode, 1))) + if (unlikely(!inc_valid_block_count(sbi, dn->inode, &count))) return -ENOSPC; alloc: @@ -582,8 +616,8 @@ ssize_t f2fs_preallocate_blocks(struct kiocb *iocb, struct iov_iter *from) struct f2fs_map_blocks map; ssize_t ret = 0; - map.m_lblk = F2FS_BYTES_TO_BLK(iocb->ki_pos); - map.m_len = F2FS_BLK_ALIGN(iov_iter_count(from)); + map.m_lblk = F2FS_BLK_ALIGN(iocb->ki_pos); + map.m_len = F2FS_BYTES_TO_BLK(iov_iter_count(from)); map.m_next_pgofs = NULL; if (f2fs_encrypted_inode(inode)) @@ -621,8 +655,10 @@ int f2fs_map_blocks(struct inode *inode, struct f2fs_map_blocks *map, struct dnode_of_data dn; struct f2fs_sb_info *sbi = F2FS_I_SB(inode); int mode = create ? ALLOC_NODE : LOOKUP_NODE_RA; - pgoff_t pgofs, end_offset; + pgoff_t pgofs, end_offset, end; int err = 0, ofs = 1; + unsigned int ofs_in_node, last_ofs_in_node; + blkcnt_t prealloc; struct extent_info ei; bool allocated = false; block_t blkaddr; @@ -632,6 +668,7 @@ int f2fs_map_blocks(struct inode *inode, struct f2fs_map_blocks *map, /* it only supports block size == page size */ pgofs = (pgoff_t)map->m_lblk; + end = pgofs + maxblocks; if (!create && f2fs_lookup_extent_cache(inode, pgofs, &ei)) { map->m_pblk = ei.blk + pgofs - ei.fofs; @@ -648,6 +685,8 @@ next_dnode: set_new_dnode(&dn, inode, NULL, NULL, 0); err = get_dnode_of_data(&dn, pgofs, mode); if (err) { + if (flag == F2FS_GET_BLOCK_BMAP) + map->m_pblk = 0; if (err == -ENOENT) { err = 0; if (map->m_next_pgofs) @@ -657,6 +696,8 @@ next_dnode: goto unlock_out; } + prealloc = 0; + ofs_in_node = dn.ofs_in_node; end_offset = ADDRS_PER_PAGE(dn.node_page, inode); next_block: @@ -669,31 +710,41 @@ next_block: goto sync_out; } if (flag == F2FS_GET_BLOCK_PRE_AIO) { - if (blkaddr == NULL_ADDR) - err = reserve_new_block(&dn); + if (blkaddr == NULL_ADDR) { + prealloc++; + last_ofs_in_node = dn.ofs_in_node; + } } else { err = __allocate_data_block(&dn); + if (!err) { + set_inode_flag(F2FS_I(inode), + FI_APPEND_WRITE); + allocated = true; + } } if (err) goto sync_out; - allocated = true; map->m_flags = F2FS_MAP_NEW; blkaddr = dn.data_blkaddr; } else { + if (flag == F2FS_GET_BLOCK_BMAP) { + map->m_pblk = 0; + goto sync_out; + } if (flag == F2FS_GET_BLOCK_FIEMAP && blkaddr == NULL_ADDR) { if (map->m_next_pgofs) *map->m_next_pgofs = pgofs + 1; } if (flag != F2FS_GET_BLOCK_FIEMAP || - blkaddr != NEW_ADDR) { - if (flag == F2FS_GET_BLOCK_BMAP) - err = -ENOENT; + blkaddr != NEW_ADDR) goto sync_out; - } } } + if (flag == F2FS_GET_BLOCK_PRE_AIO) + goto skip; + if (map->m_len == 0) { /* preallocated unwritten block should be mapped for fiemap. */ if (blkaddr == NEW_ADDR) @@ -705,32 +756,49 @@ next_block: } else if ((map->m_pblk != NEW_ADDR && blkaddr == (map->m_pblk + ofs)) || (map->m_pblk == NEW_ADDR && blkaddr == NEW_ADDR) || - flag == F2FS_GET_BLOCK_PRE_DIO || - flag == F2FS_GET_BLOCK_PRE_AIO) { + flag == F2FS_GET_BLOCK_PRE_DIO) { ofs++; map->m_len++; } else { goto sync_out; } +skip: dn.ofs_in_node++; pgofs++; - if (map->m_len < maxblocks) { - if (dn.ofs_in_node < end_offset) - goto next_block; + /* preallocate blocks in batch for one dnode page */ + if (flag == F2FS_GET_BLOCK_PRE_AIO && + (pgofs == end || dn.ofs_in_node == end_offset)) { - if (allocated) - sync_inode_page(&dn); - f2fs_put_dnode(&dn); + dn.ofs_in_node = ofs_in_node; + err = reserve_new_blocks(&dn, prealloc); + if (err) + goto sync_out; - if (create) { - f2fs_unlock_op(sbi); - f2fs_balance_fs(sbi, allocated); + map->m_len += dn.ofs_in_node - ofs_in_node; + if (prealloc && dn.ofs_in_node != last_ofs_in_node + 1) { + err = -ENOSPC; + goto sync_out; } - allocated = false; - goto next_dnode; + dn.ofs_in_node = end_offset; + } + + if (pgofs >= end) + goto sync_out; + else if (dn.ofs_in_node < end_offset) + goto next_block; + + if (allocated) + sync_inode_page(&dn); + f2fs_put_dnode(&dn); + + if (create) { + f2fs_unlock_op(sbi); + f2fs_balance_fs(sbi, allocated); } + allocated = false; + goto next_dnode; sync_out: if (allocated) @@ -983,7 +1051,7 @@ got_it: */ if (bio && (last_block_in_bio != block_nr - 1)) { submit_and_realloc: - submit_bio(READ, bio); + __submit_bio(F2FS_I_SB(inode), READ, bio); bio = NULL; } if (bio == NULL) { @@ -1026,7 +1094,7 @@ set_error_page: goto next_page; confused: if (bio) { - submit_bio(READ, bio); + __submit_bio(F2FS_I_SB(inode), READ, bio); bio = NULL; } unlock_page(page); @@ -1036,7 +1104,7 @@ next_page: } BUG_ON(pages && !list_empty(pages)); if (bio) - submit_bio(READ, bio); + __submit_bio(F2FS_I_SB(inode), READ, bio); return 0; } @@ -1177,8 +1245,10 @@ write: goto redirty_out; if (f2fs_is_drop_cache(inode)) goto out; - if (f2fs_is_volatile_file(inode) && !wbc->for_reclaim && - available_free_memory(sbi, BASE_CHECK)) + /* we should not write 0'th page having journal header */ + if (f2fs_is_volatile_file(inode) && (!page->index || + (!wbc->for_reclaim && + available_free_memory(sbi, BASE_CHECK)))) goto redirty_out; /* Dentry blocks are controlled by checkpoint */ @@ -1480,7 +1550,8 @@ restart: if (pos + len <= MAX_INLINE_DATA) { read_inline_data(page, ipage); set_inode_flag(F2FS_I(inode), FI_DATA_EXIST); - set_inline_node(ipage); + if (inode->i_nlink) + set_inline_node(ipage); } else { err = f2fs_convert_inline_page(&dn, page); if (err) @@ -1496,7 +1567,7 @@ restart: } else { /* hole case */ err = get_dnode_of_data(&dn, index, LOOKUP_NODE); - if (err || (!err && dn.data_blkaddr == NULL_ADDR)) { + if (err || dn.data_blkaddr == NULL_ADDR) { f2fs_put_dnode(&dn); f2fs_lock_op(sbi); locked = true; @@ -1665,12 +1736,12 @@ static int check_direct_IO(struct inode *inode, struct iov_iter *iter, return 0; } -static ssize_t f2fs_direct_IO(struct kiocb *iocb, struct iov_iter *iter, - loff_t offset) +static ssize_t f2fs_direct_IO(struct kiocb *iocb, struct iov_iter *iter) { struct address_space *mapping = iocb->ki_filp->f_mapping; struct inode *inode = mapping->host; size_t count = iov_iter_count(iter); + loff_t offset = iocb->ki_pos; int err; err = check_direct_IO(inode, iter, offset); @@ -1682,9 +1753,13 @@ static ssize_t f2fs_direct_IO(struct kiocb *iocb, struct iov_iter *iter, trace_f2fs_direct_IO_enter(inode, offset, count, iov_iter_rw(iter)); - err = blockdev_direct_IO(iocb, inode, iter, offset, get_data_block_dio); - if (err < 0 && iov_iter_rw(iter) == WRITE) - f2fs_write_failed(mapping, offset + count); + err = blockdev_direct_IO(iocb, inode, iter, get_data_block_dio); + if (iov_iter_rw(iter) == WRITE) { + if (err > 0) + set_inode_flag(F2FS_I(inode), FI_UPDATE_WRITE); + else if (err < 0) + f2fs_write_failed(mapping, offset + count); + } trace_f2fs_direct_IO_exit(inode, offset, count, iov_iter_rw(iter), err); @@ -1714,6 +1789,7 @@ void f2fs_invalidate_page(struct page *page, unsigned int offset, if (IS_ATOMIC_WRITTEN_PAGE(page)) return; + set_page_private(page, 0); ClearPagePrivate(page); } @@ -1727,6 +1803,7 @@ int f2fs_release_page(struct page *page, gfp_t wait) if (IS_ATOMIC_WRITTEN_PAGE(page)) return 0; + set_page_private(page, 0); ClearPagePrivate(page); return 1; } diff --git a/fs/f2fs/debug.c b/fs/f2fs/debug.c index f4a61a5ff79f..d89a425055d0 100644 --- a/fs/f2fs/debug.c +++ b/fs/f2fs/debug.c @@ -48,7 +48,7 @@ static void update_general_status(struct f2fs_sb_info *sbi) si->ndirty_dirs = sbi->ndirty_inode[DIR_INODE]; si->ndirty_files = sbi->ndirty_inode[FILE_INODE]; si->inmem_pages = get_pages(sbi, F2FS_INMEM_PAGES); - si->wb_pages = get_pages(sbi, F2FS_WRITEBACK); + si->wb_bios = atomic_read(&sbi->nr_wb_bios); si->total_count = (int)sbi->user_block_count / sbi->blocks_per_seg; si->rsvd_segs = reserved_segments(sbi); si->overp_segs = overprovision_segments(sbi); @@ -58,6 +58,7 @@ static void update_general_status(struct f2fs_sb_info *sbi) si->inline_xattr = atomic_read(&sbi->inline_xattr); si->inline_inode = atomic_read(&sbi->inline_inode); si->inline_dir = atomic_read(&sbi->inline_dir); + si->orphans = sbi->im[ORPHAN_INO].ino_num; si->utilization = utilization(sbi); si->free_segs = free_segments(sbi); @@ -143,6 +144,7 @@ static void update_mem_info(struct f2fs_sb_info *sbi) si->base_mem = sizeof(struct f2fs_sb_info) + sbi->sb->s_blocksize; si->base_mem += 2 * sizeof(struct f2fs_inode_info); si->base_mem += sizeof(*sbi->ckpt); + si->base_mem += sizeof(struct percpu_counter) * NR_COUNT_TYPE; /* build sm */ si->base_mem += sizeof(struct f2fs_sm_info); @@ -192,7 +194,7 @@ get_cache: si->cache_mem += NM_I(sbi)->dirty_nat_cnt * sizeof(struct nat_entry_set); si->cache_mem += si->inmem_pages * sizeof(struct inmem_pages); - for (i = 0; i <= UPDATE_INO; i++) + for (i = 0; i <= ORPHAN_INO; i++) si->cache_mem += sbi->im[i].ino_num * sizeof(struct ino_entry); si->cache_mem += atomic_read(&sbi->total_ext_tree) * sizeof(struct extent_tree); @@ -216,8 +218,9 @@ static int stat_show(struct seq_file *s, void *v) list_for_each_entry(si, &f2fs_stat_list, stat_list) { update_general_status(si->sbi); - seq_printf(s, "\n=====[ partition info(%pg). #%d ]=====\n", - si->sbi->sb->s_bdev, i++); + seq_printf(s, "\n=====[ partition info(%pg). #%d, %s]=====\n", + si->sbi->sb->s_bdev, i++, + f2fs_readonly(si->sbi->sb) ? "RO": "RW"); seq_printf(s, "[SB: 1] [CP: 2] [SIT: %d] [NAT: %d] ", si->sit_area_segs, si->nat_area_segs); seq_printf(s, "[SSA: %d] [MAIN: %d", @@ -237,6 +240,8 @@ static int stat_show(struct seq_file *s, void *v) si->inline_inode); seq_printf(s, " - Inline_dentry Inode: %u\n", si->inline_dir); + seq_printf(s, " - Orphan Inode: %u\n", + si->orphans); seq_printf(s, "\nMain area: %d segs, %d secs %d zones\n", si->main_area_segs, si->main_area_sections, si->main_area_zones); @@ -295,15 +300,15 @@ static int stat_show(struct seq_file *s, void *v) seq_printf(s, " - Inner Struct Count: tree: %d(%d), node: %d\n", si->ext_tree, si->zombie_tree, si->ext_node); seq_puts(s, "\nBalancing F2FS Async:\n"); - seq_printf(s, " - inmem: %4d, wb: %4d\n", - si->inmem_pages, si->wb_pages); - seq_printf(s, " - nodes: %4d in %4d\n", + seq_printf(s, " - inmem: %4lld, wb_bios: %4d\n", + si->inmem_pages, si->wb_bios); + seq_printf(s, " - nodes: %4lld in %4d\n", si->ndirty_node, si->node_pages); - seq_printf(s, " - dents: %4d in dirs:%4d\n", + seq_printf(s, " - dents: %4lld in dirs:%4d\n", si->ndirty_dent, si->ndirty_dirs); - seq_printf(s, " - datas: %4d in files:%4d\n", + seq_printf(s, " - datas: %4lld in files:%4d\n", si->ndirty_data, si->ndirty_files); - seq_printf(s, " - meta: %4d in %4d\n", + seq_printf(s, " - meta: %4lld in %4d\n", si->ndirty_meta, si->meta_pages); seq_printf(s, " - NATs: %9d/%9d\n - SITs: %9d/%9d\n", si->dirty_nats, si->nats, si->dirty_sits, si->sits); diff --git a/fs/f2fs/dir.c b/fs/f2fs/dir.c index af819571bce7..f9313f684540 100644 --- a/fs/f2fs/dir.c +++ b/fs/f2fs/dir.c @@ -48,7 +48,6 @@ unsigned char f2fs_filetype_table[F2FS_FT_MAX] = { [F2FS_FT_SYMLINK] = DT_LNK, }; -#define S_SHIFT 12 static unsigned char f2fs_type_by_mode[S_IFMT >> S_SHIFT] = { [S_IFREG >> S_SHIFT] = F2FS_FT_REG_FILE, [S_IFDIR >> S_SHIFT] = F2FS_FT_DIR, @@ -64,6 +63,13 @@ void set_de_type(struct f2fs_dir_entry *de, umode_t mode) de->file_type = f2fs_type_by_mode[(mode & S_IFMT) >> S_SHIFT]; } +unsigned char get_de_type(struct f2fs_dir_entry *de) +{ + if (de->file_type < F2FS_FT_MAX) + return f2fs_filetype_table[de->file_type]; + return DT_UNKNOWN; +} + static unsigned long dir_block_index(unsigned int level, int dir_level, unsigned int idx) { @@ -95,11 +101,6 @@ static struct f2fs_dir_entry *find_in_block(struct page *dentry_page, else kunmap(dentry_page); - /* - * For the most part, it should be a bug when name_len is zero. - * We stop here for figuring out where the bugs has occurred. - */ - f2fs_bug_on(F2FS_P_SB(dentry_page), d.max < 0); return de; } @@ -124,6 +125,11 @@ struct f2fs_dir_entry *find_target_dentry(struct fscrypt_name *fname, de = &d->dentry[bit_pos]; + if (unlikely(!de->name_len)) { + bit_pos++; + continue; + } + /* encrypted case */ de_name.name = d->filename[bit_pos]; de_name.len = le16_to_cpu(de->name_len); @@ -141,10 +147,6 @@ struct f2fs_dir_entry *find_target_dentry(struct fscrypt_name *fname, *max_slots = max_len; max_len = 0; - /* remain bug on condition */ - if (unlikely(!de->name_len)) - d->max = -1; - bit_pos += GET_DENTRY_SLOTS(le16_to_cpu(de->name_len)); } @@ -389,9 +391,14 @@ struct page *init_inode_metadata(struct inode *inode, struct inode *dir, return page; if (S_ISDIR(inode->i_mode)) { + /* in order to handle error case */ + get_page(page); err = make_empty_dir(inode, dir, page); - if (err) - goto error; + if (err) { + lock_page(page); + goto put_error; + } + put_page(page); } err = f2fs_init_acl(inode, dir, page, dpage); @@ -435,13 +442,12 @@ struct page *init_inode_metadata(struct inode *inode, struct inode *dir, return page; put_error: - f2fs_put_page(page, 1); -error: - /* once the failed inode becomes a bad inode, i_mode is S_IFREG */ + /* truncate empty dir pages */ truncate_inode_pages(&inode->i_data, 0); - truncate_blocks(inode, 0, false); - remove_dirty_inode(inode); - remove_inode_page(inode); + + clear_nlink(inode); + update_inode(inode, page); + f2fs_put_page(page, 1); return ERR_PTR(err); } @@ -509,11 +515,7 @@ void f2fs_update_dentry(nid_t ino, umode_t mode, struct f2fs_dentry_ptr *d, } } -/* - * Caller should grab and release a rwsem by calling f2fs_lock_op() and - * f2fs_unlock_op(). - */ -int __f2fs_add_link(struct inode *dir, const struct qstr *name, +int f2fs_add_regular_entry(struct inode *dir, const struct qstr *new_name, struct inode *inode, nid_t ino, umode_t mode) { unsigned int bit_pos; @@ -526,28 +528,11 @@ int __f2fs_add_link(struct inode *dir, const struct qstr *name, struct f2fs_dentry_block *dentry_blk = NULL; struct f2fs_dentry_ptr d; struct page *page = NULL; - struct fscrypt_name fname; - struct qstr new_name; - int slots, err; - - err = fscrypt_setup_filename(dir, name, 0, &fname); - if (err) - return err; - - new_name.name = fname_name(&fname); - new_name.len = fname_len(&fname); - - if (f2fs_has_inline_dentry(dir)) { - err = f2fs_add_inline_entry(dir, &new_name, inode, ino, mode); - if (!err || err != -EAGAIN) - goto out; - else - err = 0; - } + int slots, err = 0; level = 0; - slots = GET_DENTRY_SLOTS(new_name.len); - dentry_hash = f2fs_dentry_hash(&new_name); + slots = GET_DENTRY_SLOTS(new_name->len); + dentry_hash = f2fs_dentry_hash(new_name); current_depth = F2FS_I(dir)->i_current_depth; if (F2FS_I(dir)->chash == dentry_hash) { @@ -556,10 +541,12 @@ int __f2fs_add_link(struct inode *dir, const struct qstr *name, } start: - if (unlikely(current_depth == MAX_DIR_HASH_DEPTH)) { - err = -ENOSPC; - goto out; - } +#ifdef CONFIG_F2FS_FAULT_INJECTION + if (time_to_inject(FAULT_DIR_DEPTH)) + return -ENOSPC; +#endif + if (unlikely(current_depth == MAX_DIR_HASH_DEPTH)) + return -ENOSPC; /* Increase the depth, if required */ if (level == current_depth) @@ -573,10 +560,8 @@ start: for (block = bidx; block <= (bidx + nblock - 1); block++) { dentry_page = get_new_data_page(dir, NULL, block, true); - if (IS_ERR(dentry_page)) { - err = PTR_ERR(dentry_page); - goto out; - } + if (IS_ERR(dentry_page)) + return PTR_ERR(dentry_page); dentry_blk = kmap(dentry_page); bit_pos = room_for_filename(&dentry_blk->dentry_bitmap, @@ -596,7 +581,7 @@ add_dentry: if (inode) { down_write(&F2FS_I(inode)->i_sem); - page = init_inode_metadata(inode, dir, &new_name, NULL); + page = init_inode_metadata(inode, dir, new_name, NULL); if (IS_ERR(page)) { err = PTR_ERR(page); goto fail; @@ -606,7 +591,7 @@ add_dentry: } make_dentry_ptr(NULL, &d, (void *)dentry_blk, 1); - f2fs_update_dentry(ino, mode, &d, &new_name, dentry_hash, bit_pos); + f2fs_update_dentry(ino, mode, &d, new_name, dentry_hash, bit_pos); set_page_dirty(dentry_page); @@ -628,7 +613,34 @@ fail: } kunmap(dentry_page); f2fs_put_page(dentry_page, 1); -out: + + return err; +} + +/* + * Caller should grab and release a rwsem by calling f2fs_lock_op() and + * f2fs_unlock_op(). + */ +int __f2fs_add_link(struct inode *dir, const struct qstr *name, + struct inode *inode, nid_t ino, umode_t mode) +{ + struct fscrypt_name fname; + struct qstr new_name; + int err; + + err = fscrypt_setup_filename(dir, name, 0, &fname); + if (err) + return err; + + new_name.name = fname_name(&fname); + new_name.len = fname_len(&fname); + + err = -EAGAIN; + if (f2fs_has_inline_dentry(dir)) + err = f2fs_add_inline_entry(dir, &new_name, inode, ino, mode); + if (err == -EAGAIN) + err = f2fs_add_regular_entry(dir, &new_name, inode, ino, mode); + fscrypt_free_filename(&fname); f2fs_update_time(F2FS_I_SB(dir), REQ_TIME); return err; @@ -792,10 +804,7 @@ bool f2fs_fill_dentries(struct dir_context *ctx, struct f2fs_dentry_ptr *d, continue; } - if (de->file_type < F2FS_FT_MAX) - d_type = f2fs_filetype_table[de->file_type]; - else - d_type = DT_UNKNOWN; + d_type = get_de_type(de); de_name.name = d->filename[bit_pos]; de_name.len = le16_to_cpu(de->name_len); @@ -804,7 +813,7 @@ bool f2fs_fill_dentries(struct dir_context *ctx, struct f2fs_dentry_ptr *d, int save_len = fstr->len; int ret; - de_name.name = kmalloc(de_name.len, GFP_NOFS); + de_name.name = f2fs_kmalloc(de_name.len, GFP_NOFS); if (!de_name.name) return false; @@ -887,6 +896,7 @@ static int f2fs_readdir(struct file *file, struct dir_context *ctx) kunmap(dentry_page); f2fs_put_page(dentry_page, 1); } + err = 0; out: fscrypt_fname_free_buffer(&fstr); return err; @@ -902,7 +912,7 @@ static int f2fs_dir_open(struct inode *inode, struct file *filp) const struct file_operations f2fs_dir_operations = { .llseek = generic_file_llseek, .read = generic_read_dir, - .iterate = f2fs_readdir, + .iterate_shared = f2fs_readdir, .fsync = f2fs_sync_file, .open = f2fs_dir_open, .unlocked_ioctl = f2fs_ioctl, diff --git a/fs/f2fs/extent_cache.c b/fs/f2fs/extent_cache.c index c859bb044728..5bfcdb9b69f2 100644 --- a/fs/f2fs/extent_cache.c +++ b/fs/f2fs/extent_cache.c @@ -196,8 +196,7 @@ bool f2fs_init_extent_tree(struct inode *inode, struct f2fs_extent *i_ext) if (!i_ext || !i_ext->len) return false; - set_extent_info(&ei, le32_to_cpu(i_ext->fofs), - le32_to_cpu(i_ext->blk), le32_to_cpu(i_ext->len)); + get_extent_info(&ei, i_ext); write_lock(&et->lock); if (atomic_read(&et->node_cnt)) diff --git a/fs/f2fs/f2fs.h b/fs/f2fs/f2fs.h index 7a4558d17f36..916e7c238e3d 100644 --- a/fs/f2fs/f2fs.h +++ b/fs/f2fs/f2fs.h @@ -37,6 +37,57 @@ } while (0) #endif +#ifdef CONFIG_F2FS_FAULT_INJECTION +enum { + FAULT_KMALLOC, + FAULT_PAGE_ALLOC, + FAULT_ALLOC_NID, + FAULT_ORPHAN, + FAULT_BLOCK, + FAULT_DIR_DEPTH, + FAULT_MAX, +}; + +struct f2fs_fault_info { + atomic_t inject_ops; + unsigned int inject_rate; + unsigned int inject_type; +}; + +extern struct f2fs_fault_info f2fs_fault; +extern char *fault_name[FAULT_MAX]; +#define IS_FAULT_SET(type) (f2fs_fault.inject_type & (1 << (type))) + +static inline bool time_to_inject(int type) +{ + if (!f2fs_fault.inject_rate) + return false; + if (type == FAULT_KMALLOC && !IS_FAULT_SET(type)) + return false; + else if (type == FAULT_PAGE_ALLOC && !IS_FAULT_SET(type)) + return false; + else if (type == FAULT_ALLOC_NID && !IS_FAULT_SET(type)) + return false; + else if (type == FAULT_ORPHAN && !IS_FAULT_SET(type)) + return false; + else if (type == FAULT_BLOCK && !IS_FAULT_SET(type)) + return false; + else if (type == FAULT_DIR_DEPTH && !IS_FAULT_SET(type)) + return false; + + atomic_inc(&f2fs_fault.inject_ops); + if (atomic_read(&f2fs_fault.inject_ops) >= f2fs_fault.inject_rate) { + atomic_set(&f2fs_fault.inject_ops, 0); + printk("%sF2FS-fs : inject %s in %pF\n", + KERN_INFO, + fault_name[type], + __builtin_return_address(0)); + return true; + } + return false; +} +#endif + /* * For mount options */ @@ -56,6 +107,7 @@ #define F2FS_MOUNT_EXTENT_CACHE 0x00002000 #define F2FS_MOUNT_FORCE_FG_GC 0x00004000 #define F2FS_MOUNT_DATA_FLUSH 0x00008000 +#define F2FS_MOUNT_FAULT_INJECTION 0x00010000 #define clear_opt(sbi, option) (sbi->mount_opt.opt &= ~F2FS_MOUNT_##option) #define set_opt(sbi, option) (sbi->mount_opt.opt |= F2FS_MOUNT_##option) @@ -159,7 +211,6 @@ struct fsync_inode_entry { struct inode *inode; /* vfs inode pointer */ block_t blkaddr; /* block address locating the last fsync */ block_t last_dentry; /* block address locating the last dentry */ - block_t last_inode; /* block address locating the last inode */ }; #define nats_in_cursum(jnl) (le16_to_cpu(jnl->n_nats)) @@ -385,7 +436,7 @@ struct f2fs_inode_info { /* Use below internally in f2fs*/ unsigned long flags; /* use to pass per-file flags */ struct rw_semaphore i_sem; /* protect fi info */ - atomic_t dirty_pages; /* # of dirty pages */ + struct percpu_counter dirty_pages; /* # of dirty pages */ f2fs_hash_t chash; /* hash value of given file name */ unsigned int clevel; /* maximum level of given file name */ nid_t i_xattr_nid; /* node id that contains xattrs */ @@ -398,11 +449,11 @@ struct f2fs_inode_info { }; static inline void get_extent_info(struct extent_info *ext, - struct f2fs_extent i_ext) + struct f2fs_extent *i_ext) { - ext->fofs = le32_to_cpu(i_ext.fofs); - ext->blk = le32_to_cpu(i_ext.blk); - ext->len = le32_to_cpu(i_ext.len); + ext->fofs = le32_to_cpu(i_ext->fofs); + ext->blk = le32_to_cpu(i_ext->blk); + ext->len = le32_to_cpu(i_ext->len); } static inline void set_raw_extent(struct extent_info *ext, @@ -599,7 +650,6 @@ struct f2fs_sm_info { * dirty dentry blocks, dirty node blocks, and dirty meta blocks. */ enum count_type { - F2FS_WRITEBACK, F2FS_DIRTY_DENTS, F2FS_DIRTY_DATA, F2FS_DIRTY_NODES, @@ -672,6 +722,7 @@ enum { SBI_IS_CLOSE, /* specify unmounting */ SBI_NEED_FSCK, /* need fsck.f2fs to fix */ SBI_POR_DOING, /* recovery is doing or not */ + SBI_NEED_SB_WRITE, /* need to recover superblock */ }; enum { @@ -680,6 +731,10 @@ enum { MAX_TIME, }; +#ifdef CONFIG_F2FS_FS_ENCRYPTION +#define F2FS_KEY_DESC_PREFIX "f2fs:" +#define F2FS_KEY_DESC_PREFIX_SIZE 5 +#endif struct f2fs_sb_info { struct super_block *sb; /* pointer to VFS super block */ struct proc_dir_entry *s_proc; /* proc entry */ @@ -687,6 +742,10 @@ struct f2fs_sb_info { int valid_super_block; /* valid super block no */ int s_flag; /* flags for sbi */ +#ifdef CONFIG_F2FS_FS_ENCRYPTION + u8 key_prefix[F2FS_KEY_DESC_PREFIX_SIZE]; + u8 key_prefix_size; +#endif /* for node-related operations */ struct f2fs_nm_info *nm_info; /* node manager */ struct inode *node_inode; /* cache node blocks */ @@ -742,18 +801,24 @@ struct f2fs_sb_info { unsigned int total_sections; /* total section count */ unsigned int total_node_count; /* total node block count */ unsigned int total_valid_node_count; /* valid node block count */ - unsigned int total_valid_inode_count; /* valid inode count */ loff_t max_file_blocks; /* max block index of file */ int active_logs; /* # of active logs */ int dir_level; /* directory level */ block_t user_block_count; /* # of user blocks */ block_t total_valid_block_count; /* # of valid blocks */ - block_t alloc_valid_block_count; /* # of allocated blocks */ block_t discard_blks; /* discard command candidats */ block_t last_valid_block_count; /* for recovery */ u32 s_next_generation; /* for NFS support */ - atomic_t nr_pages[NR_COUNT_TYPE]; /* # of pages, see count_type */ + atomic_t nr_wb_bios; /* # of writeback bios */ + + /* # of pages, see count_type */ + struct percpu_counter nr_pages[NR_COUNT_TYPE]; + /* # of allocated blocks */ + struct percpu_counter alloc_valid_block_count; + + /* valid inode count */ + struct percpu_counter total_valid_inode_count; struct f2fs_mount_info mount_opt; /* mount options */ @@ -1055,21 +1120,33 @@ static inline bool f2fs_has_xattr_block(unsigned int ofs) } static inline bool inc_valid_block_count(struct f2fs_sb_info *sbi, - struct inode *inode, blkcnt_t count) + struct inode *inode, blkcnt_t *count) { block_t valid_block_count; spin_lock(&sbi->stat_lock); - valid_block_count = - sbi->total_valid_block_count + (block_t)count; - if (unlikely(valid_block_count > sbi->user_block_count)) { +#ifdef CONFIG_F2FS_FAULT_INJECTION + if (time_to_inject(FAULT_BLOCK)) { spin_unlock(&sbi->stat_lock); return false; } - inode->i_blocks += count; - sbi->total_valid_block_count = valid_block_count; - sbi->alloc_valid_block_count += (block_t)count; +#endif + valid_block_count = + sbi->total_valid_block_count + (block_t)(*count); + if (unlikely(valid_block_count > sbi->user_block_count)) { + *count = sbi->user_block_count - sbi->total_valid_block_count; + if (!*count) { + spin_unlock(&sbi->stat_lock); + return false; + } + } + /* *count can be recalculated */ + inode->i_blocks += *count; + sbi->total_valid_block_count = + sbi->total_valid_block_count + (block_t)(*count); spin_unlock(&sbi->stat_lock); + + percpu_counter_add(&sbi->alloc_valid_block_count, (*count)); return true; } @@ -1087,20 +1164,20 @@ static inline void dec_valid_block_count(struct f2fs_sb_info *sbi, static inline void inc_page_count(struct f2fs_sb_info *sbi, int count_type) { - atomic_inc(&sbi->nr_pages[count_type]); + percpu_counter_inc(&sbi->nr_pages[count_type]); set_sbi_flag(sbi, SBI_IS_DIRTY); } static inline void inode_inc_dirty_pages(struct inode *inode) { - atomic_inc(&F2FS_I(inode)->dirty_pages); + percpu_counter_inc(&F2FS_I(inode)->dirty_pages); inc_page_count(F2FS_I_SB(inode), S_ISDIR(inode->i_mode) ? F2FS_DIRTY_DENTS : F2FS_DIRTY_DATA); } static inline void dec_page_count(struct f2fs_sb_info *sbi, int count_type) { - atomic_dec(&sbi->nr_pages[count_type]); + percpu_counter_dec(&sbi->nr_pages[count_type]); } static inline void inode_dec_dirty_pages(struct inode *inode) @@ -1109,26 +1186,28 @@ static inline void inode_dec_dirty_pages(struct inode *inode) !S_ISLNK(inode->i_mode)) return; - atomic_dec(&F2FS_I(inode)->dirty_pages); + percpu_counter_dec(&F2FS_I(inode)->dirty_pages); dec_page_count(F2FS_I_SB(inode), S_ISDIR(inode->i_mode) ? F2FS_DIRTY_DENTS : F2FS_DIRTY_DATA); } -static inline int get_pages(struct f2fs_sb_info *sbi, int count_type) +static inline s64 get_pages(struct f2fs_sb_info *sbi, int count_type) { - return atomic_read(&sbi->nr_pages[count_type]); + return percpu_counter_sum_positive(&sbi->nr_pages[count_type]); } -static inline int get_dirty_pages(struct inode *inode) +static inline s64 get_dirty_pages(struct inode *inode) { - return atomic_read(&F2FS_I(inode)->dirty_pages); + return percpu_counter_sum_positive(&F2FS_I(inode)->dirty_pages); } static inline int get_blocktype_secs(struct f2fs_sb_info *sbi, int block_type) { unsigned int pages_per_sec = sbi->segs_per_sec * sbi->blocks_per_seg; - return ((get_pages(sbi, block_type) + pages_per_sec - 1) - >> sbi->log_blocks_per_seg) / sbi->segs_per_sec; + unsigned int segs = (get_pages(sbi, block_type) + pages_per_sec - 1) >> + sbi->log_blocks_per_seg; + + return segs / sbi->segs_per_sec; } static inline block_t valid_user_blocks(struct f2fs_sb_info *sbi) @@ -1217,11 +1296,11 @@ static inline bool inc_valid_node_count(struct f2fs_sb_info *sbi, if (inode) inode->i_blocks++; - sbi->alloc_valid_block_count++; sbi->total_valid_node_count++; sbi->total_valid_block_count++; spin_unlock(&sbi->stat_lock); + percpu_counter_inc(&sbi->alloc_valid_block_count); return true; } @@ -1248,28 +1327,30 @@ static inline unsigned int valid_node_count(struct f2fs_sb_info *sbi) static inline void inc_valid_inode_count(struct f2fs_sb_info *sbi) { - spin_lock(&sbi->stat_lock); - f2fs_bug_on(sbi, sbi->total_valid_inode_count == sbi->total_node_count); - sbi->total_valid_inode_count++; - spin_unlock(&sbi->stat_lock); + percpu_counter_inc(&sbi->total_valid_inode_count); } static inline void dec_valid_inode_count(struct f2fs_sb_info *sbi) { - spin_lock(&sbi->stat_lock); - f2fs_bug_on(sbi, !sbi->total_valid_inode_count); - sbi->total_valid_inode_count--; - spin_unlock(&sbi->stat_lock); + percpu_counter_dec(&sbi->total_valid_inode_count); } -static inline unsigned int valid_inode_count(struct f2fs_sb_info *sbi) +static inline s64 valid_inode_count(struct f2fs_sb_info *sbi) { - return sbi->total_valid_inode_count; + return percpu_counter_sum_positive(&sbi->total_valid_inode_count); } static inline struct page *f2fs_grab_cache_page(struct address_space *mapping, pgoff_t index, bool for_write) { +#ifdef CONFIG_F2FS_FAULT_INJECTION + struct page *page = find_lock_page(mapping, index); + if (page) + return page; + + if (time_to_inject(FAULT_PAGE_ALLOC)) + return NULL; +#endif if (!for_write) return grab_cache_page(mapping, index); return grab_cache_page_write_begin(mapping, index, AOP_FLAG_NOFS); @@ -1435,7 +1516,6 @@ enum { FI_NO_ALLOC, /* should not allocate any blocks */ FI_FREE_NID, /* free allocated nide */ FI_UPDATE_DIR, /* should update inode block for consistency */ - FI_DELAY_IPUT, /* used for the recovery */ FI_NO_EXTENT, /* not to use the extent cache */ FI_INLINE_XATTR, /* used for inline xattr */ FI_INLINE_DATA, /* used for inline data*/ @@ -1618,12 +1698,6 @@ static inline bool f2fs_cp_error(struct f2fs_sb_info *sbi) return is_set_ckpt_flags(sbi->ckpt, CP_ERROR_FLAG); } -static inline void f2fs_stop_checkpoint(struct f2fs_sb_info *sbi) -{ - set_ckpt_flags(sbi->ckpt, CP_ERROR_FLAG); - sbi->sb->s_flags |= MS_RDONLY; -} - static inline bool is_dot_dotdot(const struct qstr *str) { if (str->len == 1 && str->name[0] == '.') @@ -1644,6 +1718,15 @@ static inline bool f2fs_may_extent_tree(struct inode *inode) return S_ISREG(inode->i_mode); } +static inline void *f2fs_kmalloc(size_t size, gfp_t flags) +{ +#ifdef CONFIG_F2FS_FAULT_INJECTION + if (time_to_inject(FAULT_KMALLOC)) + return NULL; +#endif + return kmalloc(size, flags); +} + static inline void *f2fs_kvmalloc(size_t size, gfp_t flags) { void *ret; @@ -1710,7 +1793,7 @@ struct dentry *f2fs_get_parent(struct dentry *child); */ extern unsigned char f2fs_filetype_table[F2FS_FT_MAX]; void set_de_type(struct f2fs_dir_entry *, umode_t); - +unsigned char get_de_type(struct f2fs_dir_entry *); struct f2fs_dir_entry *find_target_dentry(struct fscrypt_name *, f2fs_hash_t, int *, struct f2fs_dentry_ptr *); bool f2fs_fill_dentries(struct dir_context *, struct f2fs_dentry_ptr *, @@ -1731,6 +1814,8 @@ void f2fs_set_link(struct inode *, struct f2fs_dir_entry *, int update_dent_inode(struct inode *, struct inode *, const struct qstr *); void f2fs_update_dentry(nid_t ino, umode_t mode, struct f2fs_dentry_ptr *, const struct qstr *, f2fs_hash_t , unsigned int); +int f2fs_add_regular_entry(struct inode *, const struct qstr *, + struct inode *, nid_t, umode_t); int __f2fs_add_link(struct inode *, const struct qstr *, struct inode *, nid_t, umode_t); void f2fs_delete_entry(struct f2fs_dir_entry *, struct page *, struct inode *, @@ -1781,7 +1866,10 @@ void ra_node_page(struct f2fs_sb_info *, nid_t); struct page *get_node_page(struct f2fs_sb_info *, pgoff_t); struct page *get_node_page_ra(struct page *, int); void sync_inode_page(struct dnode_of_data *); -int sync_node_pages(struct f2fs_sb_info *, nid_t, struct writeback_control *); +void move_node_page(struct page *, int); +int fsync_node_pages(struct f2fs_sb_info *, nid_t, struct writeback_control *, + bool); +int sync_node_pages(struct f2fs_sb_info *, struct writeback_control *); bool alloc_nid(struct f2fs_sb_info *, nid_t *); void alloc_nid_done(struct f2fs_sb_info *, nid_t); void alloc_nid_failed(struct f2fs_sb_info *, nid_t); @@ -1843,6 +1931,7 @@ void destroy_segment_manager_caches(void); /* * checkpoint.c */ +void f2fs_stop_checkpoint(struct f2fs_sb_info *, bool); struct page *grab_meta_page(struct f2fs_sb_info *, pgoff_t); struct page *get_meta_page(struct f2fs_sb_info *, pgoff_t); struct page *get_tmp_page(struct f2fs_sb_info *, pgoff_t); @@ -1852,7 +1941,7 @@ void ra_meta_pages_cond(struct f2fs_sb_info *, pgoff_t); long sync_meta_pages(struct f2fs_sb_info *, enum page_type, long); void add_ino_entry(struct f2fs_sb_info *, nid_t, int type); void remove_ino_entry(struct f2fs_sb_info *, nid_t, int type); -void release_ino_entry(struct f2fs_sb_info *); +void release_ino_entry(struct f2fs_sb_info *, bool); bool exist_written_data(struct f2fs_sb_info *, nid_t, int); int acquire_orphan_inode(struct f2fs_sb_info *); void release_orphan_inode(struct f2fs_sb_info *); @@ -1861,7 +1950,6 @@ void remove_orphan_inode(struct f2fs_sb_info *, nid_t); int recover_orphan_inodes(struct f2fs_sb_info *); int get_valid_checkpoint(struct f2fs_sb_info *); void update_dirty_page(struct inode *, struct page *); -void add_dirty_dir_inode(struct inode *); void remove_dirty_inode(struct inode *); int sync_dirty_inodes(struct f2fs_sb_info *, enum inode_type); int write_checkpoint(struct f2fs_sb_info *, struct cp_control *); @@ -1880,6 +1968,7 @@ int f2fs_submit_page_bio(struct f2fs_io_info *); void f2fs_submit_page_mbio(struct f2fs_io_info *); void set_data_blkaddr(struct dnode_of_data *); void f2fs_update_data_blkaddr(struct dnode_of_data *, block_t); +int reserve_new_blocks(struct dnode_of_data *, blkcnt_t); int reserve_new_block(struct dnode_of_data *); int f2fs_get_block(struct dnode_of_data *, pgoff_t); ssize_t f2fs_preallocate_blocks(struct kiocb *, struct iov_iter *); @@ -1906,7 +1995,7 @@ void build_gc_manager(struct f2fs_sb_info *); /* * recovery.c */ -int recover_fsync_data(struct f2fs_sb_info *); +int recover_fsync_data(struct f2fs_sb_info *, bool); bool space_for_roll_forward(struct f2fs_sb_info *); /* @@ -1921,12 +2010,12 @@ struct f2fs_stat_info { unsigned long long hit_largest, hit_cached, hit_rbtree; unsigned long long hit_total, total_ext; int ext_tree, zombie_tree, ext_node; - int ndirty_node, ndirty_meta; - int ndirty_dent, ndirty_dirs, ndirty_data, ndirty_files; + s64 ndirty_node, ndirty_dent, ndirty_meta, ndirty_data, inmem_pages; + unsigned int ndirty_dirs, ndirty_files; int nats, dirty_nats, sits, dirty_sits, fnids; int total_count, utilization; - int bg_gc, inmem_pages, wb_pages; - int inline_xattr, inline_inode, inline_dir; + int bg_gc, wb_bios; + int inline_xattr, inline_inode, inline_dir, orphans; unsigned int valid_count, valid_node_count, valid_inode_count; unsigned int bimodal, avg_vblocks; int util_free, util_valid, util_invalid; diff --git a/fs/f2fs/file.c b/fs/f2fs/file.c index 90d1157a09f9..f4c0086655c4 100644 --- a/fs/f2fs/file.c +++ b/fs/f2fs/file.c @@ -20,7 +20,7 @@ #include <linux/uaccess.h> #include <linux/mount.h> #include <linux/pagevec.h> -#include <linux/random.h> +#include <linux/uuid.h> #include "f2fs.h" #include "node.h" @@ -182,7 +182,8 @@ static void try_to_fix_pino(struct inode *inode) } } -int f2fs_sync_file(struct file *file, loff_t start, loff_t end, int datasync) +static int f2fs_do_sync_file(struct file *file, loff_t start, loff_t end, + int datasync, bool atomic) { struct inode *inode = file->f_mapping->host; struct f2fs_inode_info *fi = F2FS_I(inode); @@ -256,7 +257,9 @@ go_write: goto out; } sync_nodes: - sync_node_pages(sbi, ino, &wbc); + ret = fsync_node_pages(sbi, ino, &wbc, atomic); + if (ret) + goto out; /* if cp_error was enabled, we should avoid infinite loop */ if (unlikely(f2fs_cp_error(sbi))) { @@ -288,6 +291,11 @@ out: return ret; } +int f2fs_sync_file(struct file *file, loff_t start, loff_t end, int datasync) +{ + return f2fs_do_sync_file(file, start, end, datasync, false); +} + static pgoff_t __get_first_dirty_index(struct address_space *mapping, pgoff_t pgofs, int whence) { @@ -555,6 +563,9 @@ int truncate_blocks(struct inode *inode, u64 from, bool lock) free_from = (pgoff_t)F2FS_BYTES_TO_BLK(from + blocksize - 1); + if (free_from >= sbi->max_file_blocks) + goto free_partial; + if (lock) f2fs_lock_op(sbi); @@ -573,7 +584,7 @@ int truncate_blocks(struct inode *inode, u64 from, bool lock) } set_new_dnode(&dn, inode, ipage, NULL, 0); - err = get_dnode_of_data(&dn, free_from, LOOKUP_NODE); + err = get_dnode_of_data(&dn, free_from, LOOKUP_NODE_RA); if (err) { if (err == -ENOENT) goto free_next; @@ -596,7 +607,7 @@ free_next: out: if (lock) f2fs_unlock_op(sbi); - +free_partial: /* lastly zero out the first data page */ if (!err) err = truncate_partial_data_page(inode, from, truncate_page); @@ -986,6 +997,49 @@ static int f2fs_collapse_range(struct inode *inode, loff_t offset, loff_t len) return ret; } +static int f2fs_do_zero_range(struct dnode_of_data *dn, pgoff_t start, + pgoff_t end) +{ + struct f2fs_sb_info *sbi = F2FS_I_SB(dn->inode); + pgoff_t index = start; + unsigned int ofs_in_node = dn->ofs_in_node; + blkcnt_t count = 0; + int ret; + + for (; index < end; index++, dn->ofs_in_node++) { + if (datablock_addr(dn->node_page, dn->ofs_in_node) == NULL_ADDR) + count++; + } + + dn->ofs_in_node = ofs_in_node; + ret = reserve_new_blocks(dn, count); + if (ret) + return ret; + + dn->ofs_in_node = ofs_in_node; + for (index = start; index < end; index++, dn->ofs_in_node++) { + dn->data_blkaddr = + datablock_addr(dn->node_page, dn->ofs_in_node); + /* + * reserve_new_blocks will not guarantee entire block + * allocation. + */ + if (dn->data_blkaddr == NULL_ADDR) { + ret = -ENOSPC; + break; + } + if (dn->data_blkaddr != NEW_ADDR) { + invalidate_blocks(sbi, dn->data_blkaddr); + dn->data_blkaddr = NEW_ADDR; + set_data_blkaddr(dn); + } + } + + f2fs_update_extent_cache_range(dn, start, 0, index - start); + + return ret; +} + static int f2fs_zero_range(struct inode *inode, loff_t offset, loff_t len, int mode) { @@ -1036,35 +1090,32 @@ static int f2fs_zero_range(struct inode *inode, loff_t offset, loff_t len, (loff_t)pg_start << PAGE_SHIFT); } - for (index = pg_start; index < pg_end; index++) { + for (index = pg_start; index < pg_end;) { struct dnode_of_data dn; - struct page *ipage; + unsigned int end_offset; + pgoff_t end; f2fs_lock_op(sbi); - ipage = get_node_page(sbi, inode->i_ino); - if (IS_ERR(ipage)) { - ret = PTR_ERR(ipage); - f2fs_unlock_op(sbi); - goto out; - } - - set_new_dnode(&dn, inode, ipage, NULL, 0); - ret = f2fs_reserve_block(&dn, index); + set_new_dnode(&dn, inode, NULL, NULL, 0); + ret = get_dnode_of_data(&dn, index, ALLOC_NODE); if (ret) { f2fs_unlock_op(sbi); goto out; } - if (dn.data_blkaddr != NEW_ADDR) { - invalidate_blocks(sbi, dn.data_blkaddr); - f2fs_update_data_blkaddr(&dn, NEW_ADDR); - } + end_offset = ADDRS_PER_PAGE(dn.node_page, inode); + end = min(pg_end, end_offset - dn.ofs_in_node + index); + + ret = f2fs_do_zero_range(&dn, index, end); f2fs_put_dnode(&dn); f2fs_unlock_op(sbi); + if (ret) + goto out; + index = end; new_size = max_t(loff_t, new_size, - (loff_t)(index + 1) << PAGE_SHIFT); + (loff_t)index << PAGE_SHIFT); } if (off_end) { @@ -1147,10 +1198,11 @@ static int expand_inode_data(struct inode *inode, loff_t offset, loff_t len, int mode) { struct f2fs_sb_info *sbi = F2FS_I_SB(inode); - pgoff_t index, pg_start, pg_end; + struct f2fs_map_blocks map = { .m_next_pgofs = NULL }; + pgoff_t pg_end; loff_t new_size = i_size_read(inode); - loff_t off_start, off_end; - int ret = 0; + loff_t off_end; + int ret; ret = inode_newsize_ok(inode, (len + offset)); if (ret) @@ -1162,43 +1214,35 @@ static int expand_inode_data(struct inode *inode, loff_t offset, f2fs_balance_fs(sbi, true); - pg_start = ((unsigned long long) offset) >> PAGE_SHIFT; - pg_end = ((unsigned long long) offset + len) >> PAGE_SHIFT; - - off_start = offset & (PAGE_SIZE - 1); + pg_end = ((unsigned long long)offset + len) >> PAGE_SHIFT; off_end = (offset + len) & (PAGE_SIZE - 1); - f2fs_lock_op(sbi); + map.m_lblk = ((unsigned long long)offset) >> PAGE_SHIFT; + map.m_len = pg_end - map.m_lblk; + if (off_end) + map.m_len++; - for (index = pg_start; index <= pg_end; index++) { - struct dnode_of_data dn; + ret = f2fs_map_blocks(inode, &map, 1, F2FS_GET_BLOCK_PRE_AIO); + if (ret) { + pgoff_t last_off; - if (index == pg_end && !off_end) - goto noalloc; + if (!map.m_len) + return ret; - set_new_dnode(&dn, inode, NULL, NULL, 0); - ret = f2fs_reserve_block(&dn, index); - if (ret) - break; -noalloc: - if (pg_start == pg_end) - new_size = offset + len; - else if (index == pg_start && off_start) - new_size = (loff_t)(index + 1) << PAGE_SHIFT; - else if (index == pg_end) - new_size = ((loff_t)index << PAGE_SHIFT) + - off_end; - else - new_size += PAGE_SIZE; + last_off = map.m_lblk + map.m_len - 1; + + /* update new size to the failed position */ + new_size = (last_off == pg_end) ? offset + len: + (loff_t)(last_off + 1) << PAGE_SHIFT; + } else { + new_size = ((loff_t)pg_end << PAGE_SHIFT) + off_end; } - if (!(mode & FALLOC_FL_KEEP_SIZE) && - i_size_read(inode) < new_size) { + if (!(mode & FALLOC_FL_KEEP_SIZE) && i_size_read(inode) < new_size) { i_size_write(inode, new_size); mark_inode_dirty(inode); update_inode_page(inode); } - f2fs_unlock_op(sbi); return ret; } @@ -1254,10 +1298,19 @@ out: static int f2fs_release_file(struct inode *inode, struct file *filp) { + /* + * f2fs_relase_file is called at every close calls. So we should + * not drop any inmemory pages by close called by other process. + */ + if (!(filp->f_mode & FMODE_WRITE) || + atomic_read(&inode->i_writecount) != 1) + return 0; + /* some remained atomic pages should discarded */ if (f2fs_is_atomic_file(inode)) drop_inmem_pages(inode); if (f2fs_is_volatile_file(inode)) { + clear_inode_flag(F2FS_I(inode), FI_VOLATILE_FILE); set_inode_flag(F2FS_I(inode), FI_DROP_CACHE); filemap_fdatawrite(inode->i_mapping); clear_inode_flag(F2FS_I(inode), FI_DROP_CACHE); @@ -1294,20 +1347,16 @@ static int f2fs_ioc_setflags(struct file *filp, unsigned long arg) unsigned int oldflags; int ret; + if (!inode_owner_or_capable(inode)) + return -EACCES; + + if (get_user(flags, (int __user *)arg)) + return -EFAULT; + ret = mnt_want_write_file(filp); if (ret) return ret; - if (!inode_owner_or_capable(inode)) { - ret = -EACCES; - goto out; - } - - if (get_user(flags, (int __user *)arg)) { - ret = -EFAULT; - goto out; - } - flags = f2fs_mask_flags(inode->i_mode, flags); inode_lock(inode); @@ -1350,17 +1399,35 @@ static int f2fs_ioc_start_atomic_write(struct file *filp) if (!inode_owner_or_capable(inode)) return -EACCES; + ret = mnt_want_write_file(filp); + if (ret) + return ret; + + inode_lock(inode); + if (f2fs_is_atomic_file(inode)) - return 0; + goto out; ret = f2fs_convert_inline_inode(inode); if (ret) - return ret; + goto out; set_inode_flag(F2FS_I(inode), FI_ATOMIC_FILE); f2fs_update_time(F2FS_I_SB(inode), REQ_TIME); - return 0; + if (!get_dirty_pages(inode)) + goto out; + + f2fs_msg(F2FS_I_SB(inode)->sb, KERN_WARNING, + "Unexpected flush for atomic writes: ino=%lu, npages=%lld", + inode->i_ino, get_dirty_pages(inode)); + ret = filemap_write_and_wait_range(inode->i_mapping, 0, LLONG_MAX); + if (ret) + clear_inode_flag(F2FS_I(inode), FI_ATOMIC_FILE); +out: + inode_unlock(inode); + mnt_drop_write_file(filp); + return ret; } static int f2fs_ioc_commit_atomic_write(struct file *filp) @@ -1371,13 +1438,15 @@ static int f2fs_ioc_commit_atomic_write(struct file *filp) if (!inode_owner_or_capable(inode)) return -EACCES; - if (f2fs_is_volatile_file(inode)) - return 0; - ret = mnt_want_write_file(filp); if (ret) return ret; + inode_lock(inode); + + if (f2fs_is_volatile_file(inode)) + goto err_out; + if (f2fs_is_atomic_file(inode)) { clear_inode_flag(F2FS_I(inode), FI_ATOMIC_FILE); ret = commit_inmem_pages(inode); @@ -1387,8 +1456,9 @@ static int f2fs_ioc_commit_atomic_write(struct file *filp) } } - ret = f2fs_sync_file(filp, 0, LLONG_MAX, 0); + ret = f2fs_do_sync_file(filp, 0, LLONG_MAX, 0, true); err_out: + inode_unlock(inode); mnt_drop_write_file(filp); return ret; } @@ -1401,32 +1471,54 @@ static int f2fs_ioc_start_volatile_write(struct file *filp) if (!inode_owner_or_capable(inode)) return -EACCES; + ret = mnt_want_write_file(filp); + if (ret) + return ret; + + inode_lock(inode); + if (f2fs_is_volatile_file(inode)) - return 0; + goto out; ret = f2fs_convert_inline_inode(inode); if (ret) - return ret; + goto out; set_inode_flag(F2FS_I(inode), FI_VOLATILE_FILE); f2fs_update_time(F2FS_I_SB(inode), REQ_TIME); - return 0; +out: + inode_unlock(inode); + mnt_drop_write_file(filp); + return ret; } static int f2fs_ioc_release_volatile_write(struct file *filp) { struct inode *inode = file_inode(filp); + int ret; if (!inode_owner_or_capable(inode)) return -EACCES; + ret = mnt_want_write_file(filp); + if (ret) + return ret; + + inode_lock(inode); + if (!f2fs_is_volatile_file(inode)) - return 0; + goto out; - if (!f2fs_is_first_block_written(inode)) - return truncate_partial_data_page(inode, 0, true); + if (!f2fs_is_first_block_written(inode)) { + ret = truncate_partial_data_page(inode, 0, true); + goto out; + } - return punch_hole(inode, 0, F2FS_BLKSIZE); + ret = punch_hole(inode, 0, F2FS_BLKSIZE); +out: + inode_unlock(inode); + mnt_drop_write_file(filp); + return ret; } static int f2fs_ioc_abort_volatile_write(struct file *filp) @@ -1441,15 +1533,17 @@ static int f2fs_ioc_abort_volatile_write(struct file *filp) if (ret) return ret; - if (f2fs_is_atomic_file(inode)) { - clear_inode_flag(F2FS_I(inode), FI_ATOMIC_FILE); + inode_lock(inode); + + if (f2fs_is_atomic_file(inode)) drop_inmem_pages(inode); - } if (f2fs_is_volatile_file(inode)) { clear_inode_flag(F2FS_I(inode), FI_VOLATILE_FILE); - ret = f2fs_sync_file(filp, 0, LLONG_MAX, 0); + ret = f2fs_do_sync_file(filp, 0, LLONG_MAX, 0, true); } + inode_unlock(inode); + mnt_drop_write_file(filp); f2fs_update_time(F2FS_I_SB(inode), REQ_TIME); return ret; @@ -1461,6 +1555,7 @@ static int f2fs_ioc_shutdown(struct file *filp, unsigned long arg) struct f2fs_sb_info *sbi = F2FS_I_SB(inode); struct super_block *sb = sbi->sb; __u32 in; + int ret; if (!capable(CAP_SYS_ADMIN)) return -EPERM; @@ -1468,31 +1563,38 @@ static int f2fs_ioc_shutdown(struct file *filp, unsigned long arg) if (get_user(in, (__u32 __user *)arg)) return -EFAULT; + ret = mnt_want_write_file(filp); + if (ret) + return ret; + switch (in) { case F2FS_GOING_DOWN_FULLSYNC: sb = freeze_bdev(sb->s_bdev); if (sb && !IS_ERR(sb)) { - f2fs_stop_checkpoint(sbi); + f2fs_stop_checkpoint(sbi, false); thaw_bdev(sb->s_bdev, sb); } break; case F2FS_GOING_DOWN_METASYNC: /* do checkpoint only */ f2fs_sync_fs(sb, 1); - f2fs_stop_checkpoint(sbi); + f2fs_stop_checkpoint(sbi, false); break; case F2FS_GOING_DOWN_NOSYNC: - f2fs_stop_checkpoint(sbi); + f2fs_stop_checkpoint(sbi, false); break; case F2FS_GOING_DOWN_METAFLUSH: sync_meta_pages(sbi, META, LONG_MAX); - f2fs_stop_checkpoint(sbi); + f2fs_stop_checkpoint(sbi, false); break; default: - return -EINVAL; + ret = -EINVAL; + goto out; } f2fs_update_time(sbi, REQ_TIME); - return 0; +out: + mnt_drop_write_file(filp); + return ret; } static int f2fs_ioc_fitrim(struct file *filp, unsigned long arg) @@ -1513,9 +1615,14 @@ static int f2fs_ioc_fitrim(struct file *filp, unsigned long arg) sizeof(range))) return -EFAULT; + ret = mnt_want_write_file(filp); + if (ret) + return ret; + range.minlen = max((unsigned int)range.minlen, q->limits.discard_granularity); ret = f2fs_trim_fs(F2FS_SB(sb), &range); + mnt_drop_write_file(filp); if (ret < 0) return ret; @@ -1540,13 +1647,21 @@ static int f2fs_ioc_set_encryption_policy(struct file *filp, unsigned long arg) { struct fscrypt_policy policy; struct inode *inode = file_inode(filp); + int ret; if (copy_from_user(&policy, (struct fscrypt_policy __user *)arg, sizeof(policy))) return -EFAULT; + ret = mnt_want_write_file(filp); + if (ret) + return ret; + f2fs_update_time(F2FS_I_SB(inode), REQ_TIME); - return fscrypt_process_policy(inode, &policy); + ret = fscrypt_process_policy(inode, &policy); + + mnt_drop_write_file(filp); + return ret; } static int f2fs_ioc_get_encryption_policy(struct file *filp, unsigned long arg) @@ -1603,6 +1718,7 @@ static int f2fs_ioc_gc(struct file *filp, unsigned long arg) struct inode *inode = file_inode(filp); struct f2fs_sb_info *sbi = F2FS_I_SB(inode); __u32 sync; + int ret; if (!capable(CAP_SYS_ADMIN)) return -EPERM; @@ -1613,20 +1729,30 @@ static int f2fs_ioc_gc(struct file *filp, unsigned long arg) if (f2fs_readonly(sbi->sb)) return -EROFS; + ret = mnt_want_write_file(filp); + if (ret) + return ret; + if (!sync) { - if (!mutex_trylock(&sbi->gc_mutex)) - return -EBUSY; + if (!mutex_trylock(&sbi->gc_mutex)) { + ret = -EBUSY; + goto out; + } } else { mutex_lock(&sbi->gc_mutex); } - return f2fs_gc(sbi, sync); + ret = f2fs_gc(sbi, sync); +out: + mnt_drop_write_file(filp); + return ret; } static int f2fs_ioc_write_checkpoint(struct file *filp, unsigned long arg) { struct inode *inode = file_inode(filp); struct f2fs_sb_info *sbi = F2FS_I_SB(inode); + int ret; if (!capable(CAP_SYS_ADMIN)) return -EPERM; @@ -1634,7 +1760,14 @@ static int f2fs_ioc_write_checkpoint(struct file *filp, unsigned long arg) if (f2fs_readonly(sbi->sb)) return -EROFS; - return f2fs_sync_fs(sbi->sb, 1); + ret = mnt_want_write_file(filp); + if (ret) + return ret; + + ret = f2fs_sync_fs(sbi->sb, 1); + + mnt_drop_write_file(filp); + return ret; } static int f2fs_defragment_range(struct f2fs_sb_info *sbi, @@ -1886,13 +2019,8 @@ static ssize_t f2fs_file_write_iter(struct kiocb *iocb, struct iov_iter *from) } inode_unlock(inode); - if (ret > 0) { - ssize_t err; - - err = generic_write_sync(file, iocb->ki_pos - ret, ret); - if (err < 0) - ret = err; - } + if (ret > 0) + ret = generic_write_sync(iocb, ret); return ret; } diff --git a/fs/f2fs/gc.c b/fs/f2fs/gc.c index b0051a97824c..38d56f678912 100644 --- a/fs/f2fs/gc.c +++ b/fs/f2fs/gc.c @@ -96,7 +96,7 @@ int start_gc_thread(struct f2fs_sb_info *sbi) dev_t dev = sbi->sb->s_bdev->bd_dev; int err = 0; - gc_th = kmalloc(sizeof(struct f2fs_gc_kthread), GFP_KERNEL); + gc_th = f2fs_kmalloc(sizeof(struct f2fs_gc_kthread), GFP_KERNEL); if (!gc_th) { err = -ENOMEM; goto out; @@ -465,15 +465,7 @@ next_step: continue; } - /* set page dirty and write it */ - if (gc_type == FG_GC) { - f2fs_wait_on_page_writeback(node_page, NODE, true); - set_page_dirty(node_page); - } else { - if (!PageWriteback(node_page)) - set_page_dirty(node_page); - } - f2fs_put_page(node_page, 1); + move_node_page(node_page, gc_type); stat_inc_node_blk_count(sbi, 1, gc_type); } @@ -834,18 +826,9 @@ static int do_garbage_collect(struct f2fs_sb_info *sbi, f2fs_put_page(sum_page, 0); } - if (gc_type == FG_GC) { - if (type == SUM_TYPE_NODE) { - struct writeback_control wbc = { - .sync_mode = WB_SYNC_ALL, - .nr_to_write = LONG_MAX, - .for_reclaim = 0, - }; - sync_node_pages(sbi, 0, &wbc); - } else { - f2fs_submit_merged_bio(sbi, DATA, WRITE); - } - } + if (gc_type == FG_GC) + f2fs_submit_merged_bio(sbi, + (type == SUM_TYPE_NODE) ? NODE : DATA, WRITE); blk_finish_plug(&plug); diff --git a/fs/f2fs/inline.c b/fs/f2fs/inline.c index a2fbe6f427d3..a4bb155dd00a 100644 --- a/fs/f2fs/inline.c +++ b/fs/f2fs/inline.c @@ -161,7 +161,7 @@ int f2fs_convert_inline_inode(struct inode *inode) if (!f2fs_has_inline_data(inode)) return 0; - page = grab_cache_page(inode->i_mapping, 0); + page = f2fs_grab_cache_page(inode->i_mapping, 0, false); if (!page) return -ENOMEM; @@ -303,11 +303,6 @@ struct f2fs_dir_entry *find_in_inline_dir(struct inode *dir, else f2fs_put_page(ipage, 0); - /* - * For the most part, it should be a bug when name_len is zero. - * We stop here for figuring out where the bugs has occurred. - */ - f2fs_bug_on(sbi, d.max < 0); return de; } @@ -355,7 +350,7 @@ int make_empty_inline_dir(struct inode *inode, struct inode *parent, * NOTE: ipage is grabbed by caller, but if any error occurs, we should * release ipage in this function. */ -static int f2fs_convert_inline_dir(struct inode *dir, struct page *ipage, +static int f2fs_move_inline_dirents(struct inode *dir, struct page *ipage, struct f2fs_inline_dentry *inline_dentry) { struct page *page; @@ -363,7 +358,7 @@ static int f2fs_convert_inline_dir(struct inode *dir, struct page *ipage, struct f2fs_dentry_block *dentry_blk; int err; - page = grab_cache_page(dir->i_mapping, 0); + page = f2fs_grab_cache_page(dir->i_mapping, 0, false); if (!page) { f2fs_put_page(ipage, 1); return -ENOMEM; @@ -405,6 +400,7 @@ static int f2fs_convert_inline_dir(struct inode *dir, struct page *ipage, stat_dec_inline_dir(dir); clear_inode_flag(F2FS_I(dir), FI_INLINE_DENTRY); + F2FS_I(dir)->i_current_depth = 1; if (i_size_read(dir) < PAGE_SIZE) { i_size_write(dir, PAGE_SIZE); set_inode_flag(F2FS_I(dir), FI_UPDATE_DIR); @@ -416,6 +412,105 @@ out: return err; } +static int f2fs_add_inline_entries(struct inode *dir, + struct f2fs_inline_dentry *inline_dentry) +{ + struct f2fs_dentry_ptr d; + unsigned long bit_pos = 0; + int err = 0; + + make_dentry_ptr(NULL, &d, (void *)inline_dentry, 2); + + while (bit_pos < d.max) { + struct f2fs_dir_entry *de; + struct qstr new_name; + nid_t ino; + umode_t fake_mode; + + if (!test_bit_le(bit_pos, d.bitmap)) { + bit_pos++; + continue; + } + + de = &d.dentry[bit_pos]; + + if (unlikely(!de->name_len)) { + bit_pos++; + continue; + } + + new_name.name = d.filename[bit_pos]; + new_name.len = de->name_len; + + ino = le32_to_cpu(de->ino); + fake_mode = get_de_type(de) << S_SHIFT; + + err = f2fs_add_regular_entry(dir, &new_name, NULL, + ino, fake_mode); + if (err) + goto punch_dentry_pages; + + bit_pos += GET_DENTRY_SLOTS(le16_to_cpu(de->name_len)); + } + return 0; +punch_dentry_pages: + truncate_inode_pages(&dir->i_data, 0); + truncate_blocks(dir, 0, false); + remove_dirty_inode(dir); + return err; +} + +static int f2fs_move_rehashed_dirents(struct inode *dir, struct page *ipage, + struct f2fs_inline_dentry *inline_dentry) +{ + struct f2fs_inline_dentry *backup_dentry; + struct f2fs_inode_info *fi = F2FS_I(dir); + int err; + + backup_dentry = f2fs_kmalloc(sizeof(struct f2fs_inline_dentry), + GFP_F2FS_ZERO); + if (!backup_dentry) { + f2fs_put_page(ipage, 1); + return -ENOMEM; + } + + memcpy(backup_dentry, inline_dentry, MAX_INLINE_DATA); + truncate_inline_inode(ipage, 0); + + unlock_page(ipage); + + err = f2fs_add_inline_entries(dir, backup_dentry); + if (err) + goto recover; + + lock_page(ipage); + + stat_dec_inline_dir(dir); + clear_inode_flag(fi, FI_INLINE_DENTRY); + update_inode(dir, ipage); + kfree(backup_dentry); + return 0; +recover: + lock_page(ipage); + memcpy(inline_dentry, backup_dentry, MAX_INLINE_DATA); + fi->i_current_depth = 0; + i_size_write(dir, MAX_INLINE_DATA); + update_inode(dir, ipage); + f2fs_put_page(ipage, 1); + + kfree(backup_dentry); + return err; +} + +static int f2fs_convert_inline_dir(struct inode *dir, struct page *ipage, + struct f2fs_inline_dentry *inline_dentry) +{ + if (!F2FS_I(dir)->i_dir_level) + return f2fs_move_inline_dirents(dir, ipage, inline_dentry); + else + return f2fs_move_rehashed_dirents(dir, ipage, inline_dentry); +} + int f2fs_add_inline_entry(struct inode *dir, const struct qstr *name, struct inode *inode, nid_t ino, umode_t mode) { diff --git a/fs/f2fs/inode.c b/fs/f2fs/inode.c index cb269c46ac25..2e68adab0d64 100644 --- a/fs/f2fs/inode.c +++ b/fs/f2fs/inode.c @@ -283,7 +283,7 @@ retry: cond_resched(); goto retry; } else if (err != -ENOENT) { - f2fs_stop_checkpoint(sbi); + f2fs_stop_checkpoint(sbi, false); } return 0; } @@ -344,7 +344,7 @@ void f2fs_evict_inode(struct inode *inode) sb_start_intwrite(inode->i_sb); set_inode_flag(fi, FI_NO_ALLOC); i_size_write(inode, 0); - +retry: if (F2FS_HAS_BLOCKS(inode)) err = f2fs_truncate(inode, true); @@ -354,6 +354,12 @@ void f2fs_evict_inode(struct inode *inode) f2fs_unlock_op(sbi); } + /* give more chances, if ENOMEM case */ + if (err == -ENOMEM) { + err = 0; + goto retry; + } + sb_end_intwrite(inode->i_sb); no_delete: stat_dec_inline_xattr(inode); @@ -368,26 +374,11 @@ no_delete: if (is_inode_flag_set(fi, FI_UPDATE_WRITE)) add_ino_entry(sbi, inode->i_ino, UPDATE_INO); if (is_inode_flag_set(fi, FI_FREE_NID)) { - if (err && err != -ENOENT) - alloc_nid_done(sbi, inode->i_ino); - else - alloc_nid_failed(sbi, inode->i_ino); + alloc_nid_failed(sbi, inode->i_ino); clear_inode_flag(fi, FI_FREE_NID); } - - if (err && err != -ENOENT) { - if (!exist_written_data(sbi, inode->i_ino, ORPHAN_INO)) { - /* - * get here because we failed to release resource - * of inode previously, reminder our user to run fsck - * for fixing. - */ - set_sbi_flag(sbi, SBI_NEED_FSCK); - f2fs_msg(sbi->sb, KERN_WARNING, - "inode (ino:%lu) resource leak, run fsck " - "to fix this issue!", inode->i_ino); - } - } + f2fs_bug_on(sbi, err && + !exist_written_data(sbi, inode->i_ino, ORPHAN_INO)); out_clear: fscrypt_put_encryption_info(inode, NULL); clear_inode(inode); @@ -397,37 +388,32 @@ out_clear: void handle_failed_inode(struct inode *inode) { struct f2fs_sb_info *sbi = F2FS_I_SB(inode); - int err = 0; + struct node_info ni; - clear_nlink(inode); - make_bad_inode(inode); + /* don't make bad inode, since it becomes a regular file. */ unlock_new_inode(inode); - i_size_write(inode, 0); - if (F2FS_HAS_BLOCKS(inode)) - err = f2fs_truncate(inode, false); - - if (!err) - err = remove_inode_page(inode); - /* - * if we skip truncate_node in remove_inode_page bacause we failed - * before, it's better to find another way to release resource of - * this inode (e.g. valid block count, node block or nid). Here we - * choose to add this inode to orphan list, so that we can call iput - * for releasing in orphan recovery flow. - * * Note: we should add inode to orphan list before f2fs_unlock_op() * so we can prevent losing this orphan when encoutering checkpoint * and following suddenly power-off. */ - if (err && err != -ENOENT) { - err = acquire_orphan_inode(sbi); - if (!err) + get_node_info(sbi, inode->i_ino, &ni); + + if (ni.blk_addr != NULL_ADDR) { + int err = acquire_orphan_inode(sbi); + if (err) { + set_sbi_flag(sbi, SBI_NEED_FSCK); + f2fs_msg(sbi->sb, KERN_WARNING, + "Too many orphan inodes, run fsck to fix."); + } else { add_orphan_inode(sbi, inode->i_ino); + } + alloc_nid_done(sbi, inode->i_ino); + } else { + set_inode_flag(F2FS_I(inode), FI_FREE_NID); } - set_inode_flag(F2FS_I(inode), FI_FREE_NID); f2fs_unlock_op(sbi); /* iput will drop the inode object */ diff --git a/fs/f2fs/namei.c b/fs/f2fs/namei.c index 013e57932d61..324ed3812f30 100644 --- a/fs/f2fs/namei.c +++ b/fs/f2fs/namei.c @@ -202,7 +202,7 @@ struct dentry *f2fs_get_parent(struct dentry *child) unsigned long ino = f2fs_inode_by_name(d_inode(child), &dotdot); if (!ino) return ERR_PTR(-ENOENT); - return d_obtain_alias(f2fs_iget(d_inode(child)->i_sb, ino)); + return d_obtain_alias(f2fs_iget(child->d_sb, ino)); } static int __recover_dot_dentries(struct inode *dir, nid_t pino) diff --git a/fs/f2fs/node.c b/fs/f2fs/node.c index 1a33de9d84b1..1f21aae80c40 100644 --- a/fs/f2fs/node.c +++ b/fs/f2fs/node.c @@ -407,6 +407,29 @@ cache: up_write(&nm_i->nat_tree_lock); } +/* + * readahead MAX_RA_NODE number of node pages. + */ +static void ra_node_pages(struct page *parent, int start, int n) +{ + struct f2fs_sb_info *sbi = F2FS_P_SB(parent); + struct blk_plug plug; + int i, end; + nid_t nid; + + blk_start_plug(&plug); + + /* Then, try readahead for siblings of the desired node */ + end = start + n; + end = min(end, NIDS_PER_BLOCK); + for (i = start; i < end; i++) { + nid = get_nid(parent, i, false); + ra_node_page(sbi, nid); + } + + blk_finish_plug(&plug); +} + pgoff_t get_next_page_offset(struct dnode_of_data *dn, pgoff_t pgofs) { const long direct_index = ADDRS_PER_INODE(dn->inode); @@ -707,6 +730,8 @@ static int truncate_nodes(struct dnode_of_data *dn, unsigned int nofs, return PTR_ERR(page); } + ra_node_pages(page, ofs, NIDS_PER_BLOCK); + rn = F2FS_NODE(page); if (depth < 3) { for (i = ofs; i < NIDS_PER_BLOCK; i++, freed++) { @@ -784,6 +809,8 @@ static int truncate_partial_nodes(struct dnode_of_data *dn, nid[i + 1] = get_nid(pages[i], offset[i + 1], false); } + ra_node_pages(pages[idx], offset[idx + 1], NIDS_PER_BLOCK); + /* free direct nodes linked to a partial indirect node */ for (i = offset[idx + 1]; i < NIDS_PER_BLOCK; i++) { child_nid = get_nid(pages[idx], i, false); @@ -832,7 +859,7 @@ int truncate_inode_blocks(struct inode *inode, pgoff_t from) trace_f2fs_truncate_inode_blocks_enter(inode, from); level = get_node_path(inode, from, offset, noffset); -restart: + page = get_node_page(sbi, inode->i_ino); if (IS_ERR(page)) { trace_f2fs_truncate_inode_blocks_exit(inode, PTR_ERR(page)); @@ -896,10 +923,7 @@ skip_partial: if (offset[1] == 0 && ri->i_nid[offset[0] - NODE_DIR1_BLOCK]) { lock_page(page); - if (unlikely(page->mapping != NODE_MAPPING(sbi))) { - f2fs_put_page(page, 1); - goto restart; - } + BUG_ON(page->mapping != NODE_MAPPING(sbi)); f2fs_wait_on_page_writeback(page, NODE, true); ri->i_nid[offset[0] - NODE_DIR1_BLOCK] = 0; set_page_dirty(page); @@ -998,7 +1022,7 @@ struct page *new_node_page(struct dnode_of_data *dn, if (unlikely(is_inode_flag_set(F2FS_I(dn->inode), FI_NO_ALLOC))) return ERR_PTR(-EPERM); - page = grab_cache_page(NODE_MAPPING(sbi), dn->nid); + page = f2fs_grab_cache_page(NODE_MAPPING(sbi), dn->nid, false); if (!page) return ERR_PTR(-ENOMEM); @@ -1090,7 +1114,7 @@ void ra_node_page(struct f2fs_sb_info *sbi, nid_t nid) if (apage) return; - apage = grab_cache_page(NODE_MAPPING(sbi), nid); + apage = f2fs_grab_cache_page(NODE_MAPPING(sbi), nid, false); if (!apage) return; @@ -1098,29 +1122,6 @@ void ra_node_page(struct f2fs_sb_info *sbi, nid_t nid) f2fs_put_page(apage, err ? 1 : 0); } -/* - * readahead MAX_RA_NODE number of node pages. - */ -static void ra_node_pages(struct page *parent, int start) -{ - struct f2fs_sb_info *sbi = F2FS_P_SB(parent); - struct blk_plug plug; - int i, end; - nid_t nid; - - blk_start_plug(&plug); - - /* Then, try readahead for siblings of the desired node */ - end = start + MAX_RA_NODE; - end = min(end, NIDS_PER_BLOCK); - for (i = start; i < end; i++) { - nid = get_nid(parent, i, false); - ra_node_page(sbi, nid); - } - - blk_finish_plug(&plug); -} - static struct page *__get_node_page(struct f2fs_sb_info *sbi, pgoff_t nid, struct page *parent, int start) { @@ -1131,7 +1132,7 @@ static struct page *__get_node_page(struct f2fs_sb_info *sbi, pgoff_t nid, return ERR_PTR(-ENOENT); f2fs_bug_on(sbi, check_nid_range(sbi, nid)); repeat: - page = grab_cache_page(NODE_MAPPING(sbi), nid); + page = f2fs_grab_cache_page(NODE_MAPPING(sbi), nid, false); if (!page) return ERR_PTR(-ENOMEM); @@ -1144,7 +1145,7 @@ repeat: } if (parent) - ra_node_pages(parent, start + 1); + ra_node_pages(parent, start + 1, MAX_RA_NODE); lock_page(page); @@ -1196,19 +1197,17 @@ static void flush_inline_data(struct f2fs_sb_info *sbi, nid_t ino) { struct inode *inode; struct page *page; + int ret; /* should flush inline_data before evict_inode */ inode = ilookup(sbi->sb, ino); if (!inode) return; - page = pagecache_get_page(inode->i_mapping, 0, FGP_NOWAIT, 0); + page = pagecache_get_page(inode->i_mapping, 0, FGP_LOCK|FGP_NOWAIT, 0); if (!page) goto iput_out; - if (!trylock_page(page)) - goto release_out; - if (!PageUptodate(page)) goto page_out; @@ -1218,24 +1217,214 @@ static void flush_inline_data(struct f2fs_sb_info *sbi, nid_t ino) if (!clear_page_dirty_for_io(page)) goto page_out; - if (!f2fs_write_inline_data(inode, page)) - inode_dec_dirty_pages(inode); - else + ret = f2fs_write_inline_data(inode, page); + inode_dec_dirty_pages(inode); + if (ret) set_page_dirty(page); page_out: - unlock_page(page); -release_out: - f2fs_put_page(page, 0); + f2fs_put_page(page, 1); iput_out: iput(inode); } -int sync_node_pages(struct f2fs_sb_info *sbi, nid_t ino, - struct writeback_control *wbc) +void move_node_page(struct page *node_page, int gc_type) +{ + if (gc_type == FG_GC) { + struct f2fs_sb_info *sbi = F2FS_P_SB(node_page); + struct writeback_control wbc = { + .sync_mode = WB_SYNC_ALL, + .nr_to_write = 1, + .for_reclaim = 0, + }; + + set_page_dirty(node_page); + f2fs_wait_on_page_writeback(node_page, NODE, true); + + f2fs_bug_on(sbi, PageWriteback(node_page)); + if (!clear_page_dirty_for_io(node_page)) + goto out_page; + + if (NODE_MAPPING(sbi)->a_ops->writepage(node_page, &wbc)) + unlock_page(node_page); + goto release_page; + } else { + /* set page dirty and write it */ + if (!PageWriteback(node_page)) + set_page_dirty(node_page); + } +out_page: + unlock_page(node_page); +release_page: + f2fs_put_page(node_page, 0); +} + +static struct page *last_fsync_dnode(struct f2fs_sb_info *sbi, nid_t ino) { pgoff_t index, end; struct pagevec pvec; - int step = ino ? 2 : 0; + struct page *last_page = NULL; + + pagevec_init(&pvec, 0); + index = 0; + end = ULONG_MAX; + + while (index <= end) { + int i, nr_pages; + nr_pages = pagevec_lookup_tag(&pvec, NODE_MAPPING(sbi), &index, + PAGECACHE_TAG_DIRTY, + min(end - index, (pgoff_t)PAGEVEC_SIZE-1) + 1); + if (nr_pages == 0) + break; + + for (i = 0; i < nr_pages; i++) { + struct page *page = pvec.pages[i]; + + if (unlikely(f2fs_cp_error(sbi))) { + f2fs_put_page(last_page, 0); + pagevec_release(&pvec); + return ERR_PTR(-EIO); + } + + if (!IS_DNODE(page) || !is_cold_node(page)) + continue; + if (ino_of_node(page) != ino) + continue; + + lock_page(page); + + if (unlikely(page->mapping != NODE_MAPPING(sbi))) { +continue_unlock: + unlock_page(page); + continue; + } + if (ino_of_node(page) != ino) + goto continue_unlock; + + if (!PageDirty(page)) { + /* someone wrote it for us */ + goto continue_unlock; + } + + if (last_page) + f2fs_put_page(last_page, 0); + + get_page(page); + last_page = page; + unlock_page(page); + } + pagevec_release(&pvec); + cond_resched(); + } + return last_page; +} + +int fsync_node_pages(struct f2fs_sb_info *sbi, nid_t ino, + struct writeback_control *wbc, bool atomic) +{ + pgoff_t index, end; + struct pagevec pvec; + int ret = 0; + struct page *last_page = NULL; + bool marked = false; + + if (atomic) { + last_page = last_fsync_dnode(sbi, ino); + if (IS_ERR_OR_NULL(last_page)) + return PTR_ERR_OR_ZERO(last_page); + } +retry: + pagevec_init(&pvec, 0); + index = 0; + end = ULONG_MAX; + + while (index <= end) { + int i, nr_pages; + nr_pages = pagevec_lookup_tag(&pvec, NODE_MAPPING(sbi), &index, + PAGECACHE_TAG_DIRTY, + min(end - index, (pgoff_t)PAGEVEC_SIZE-1) + 1); + if (nr_pages == 0) + break; + + for (i = 0; i < nr_pages; i++) { + struct page *page = pvec.pages[i]; + + if (unlikely(f2fs_cp_error(sbi))) { + f2fs_put_page(last_page, 0); + pagevec_release(&pvec); + return -EIO; + } + + if (!IS_DNODE(page) || !is_cold_node(page)) + continue; + if (ino_of_node(page) != ino) + continue; + + lock_page(page); + + if (unlikely(page->mapping != NODE_MAPPING(sbi))) { +continue_unlock: + unlock_page(page); + continue; + } + if (ino_of_node(page) != ino) + goto continue_unlock; + + if (!PageDirty(page) && page != last_page) { + /* someone wrote it for us */ + goto continue_unlock; + } + + f2fs_wait_on_page_writeback(page, NODE, true); + BUG_ON(PageWriteback(page)); + + if (!atomic || page == last_page) { + set_fsync_mark(page, 1); + if (IS_INODE(page)) + set_dentry_mark(page, + need_dentry_mark(sbi, ino)); + /* may be written by other thread */ + if (!PageDirty(page)) + set_page_dirty(page); + } + + if (!clear_page_dirty_for_io(page)) + goto continue_unlock; + + ret = NODE_MAPPING(sbi)->a_ops->writepage(page, wbc); + if (ret) { + unlock_page(page); + f2fs_put_page(last_page, 0); + break; + } + if (page == last_page) { + f2fs_put_page(page, 0); + marked = true; + break; + } + } + pagevec_release(&pvec); + cond_resched(); + + if (ret || marked) + break; + } + if (!ret && atomic && !marked) { + f2fs_msg(sbi->sb, KERN_DEBUG, + "Retry to write fsync mark: ino=%u, idx=%lx", + ino, last_page->index); + lock_page(last_page); + set_page_dirty(last_page); + unlock_page(last_page); + goto retry; + } + return ret ? -EIO: 0; +} + +int sync_node_pages(struct f2fs_sb_info *sbi, struct writeback_control *wbc) +{ + pgoff_t index, end; + struct pagevec pvec; + int step = 0; int nwritten = 0; pagevec_init(&pvec, 0); @@ -1274,15 +1463,8 @@ next_step: if (step == 2 && (!IS_DNODE(page) || !is_cold_node(page))) continue; - - /* - * If an fsync mode, - * we should not skip writing node pages. - */ lock_node: - if (ino && ino_of_node(page) == ino) - lock_page(page); - else if (!trylock_page(page)) + if (!trylock_page(page)) continue; if (unlikely(page->mapping != NODE_MAPPING(sbi))) { @@ -1290,8 +1472,6 @@ continue_unlock: unlock_page(page); continue; } - if (ino && ino_of_node(page) != ino) - goto continue_unlock; if (!PageDirty(page)) { /* someone wrote it for us */ @@ -1299,7 +1479,7 @@ continue_unlock: } /* flush inline_data */ - if (!ino && is_inline_node(page)) { + if (is_inline_node(page)) { clear_inline_node(page); unlock_page(page); flush_inline_data(sbi, ino_of_node(page)); @@ -1312,17 +1492,8 @@ continue_unlock: if (!clear_page_dirty_for_io(page)) goto continue_unlock; - /* called by fsync() */ - if (ino && IS_DNODE(page)) { - set_fsync_mark(page, 1); - if (IS_INODE(page)) - set_dentry_mark(page, - need_dentry_mark(sbi, ino)); - nwritten++; - } else { - set_fsync_mark(page, 0); - set_dentry_mark(page, 0); - } + set_fsync_mark(page, 0); + set_dentry_mark(page, 0); if (NODE_MAPPING(sbi)->a_ops->writepage(page, wbc)) unlock_page(page); @@ -1470,7 +1641,7 @@ static int f2fs_write_node_pages(struct address_space *mapping, diff = nr_pages_to_write(sbi, NODE, wbc); wbc->sync_mode = WB_SYNC_NONE; - sync_node_pages(sbi, 0, wbc); + sync_node_pages(sbi, wbc); wbc->nr_to_write = max((long)0, wbc->nr_to_write - diff); return 0; @@ -1524,7 +1695,6 @@ static int add_free_nid(struct f2fs_sb_info *sbi, nid_t nid, bool build) struct f2fs_nm_info *nm_i = NM_I(sbi); struct free_nid *i; struct nat_entry *ne; - bool allocated = false; if (!available_free_memory(sbi, FREE_NIDS)) return -1; @@ -1538,8 +1708,6 @@ static int add_free_nid(struct f2fs_sb_info *sbi, nid_t nid, bool build) ne = __lookup_nat_cache(nm_i, nid); if (ne && (!get_nat_flag(ne, IS_CHECKPOINTED) || nat_get_blkaddr(ne) != NULL_ADDR)) - allocated = true; - if (allocated) return 0; } @@ -1672,6 +1840,10 @@ bool alloc_nid(struct f2fs_sb_info *sbi, nid_t *nid) struct f2fs_nm_info *nm_i = NM_I(sbi); struct free_nid *i = NULL; retry: +#ifdef CONFIG_F2FS_FAULT_INJECTION + if (time_to_inject(FAULT_ALLOC_NID)) + return false; +#endif if (unlikely(sbi->total_valid_node_count + 1 > nm_i->available_nids)) return false; @@ -1846,7 +2018,7 @@ int recover_inode_page(struct f2fs_sb_info *sbi, struct page *page) if (unlikely(old_ni.blk_addr != NULL_ADDR)) return -EINVAL; - ipage = grab_cache_page(NODE_MAPPING(sbi), ino); + ipage = f2fs_grab_cache_page(NODE_MAPPING(sbi), ino, false); if (!ipage) return -ENOMEM; diff --git a/fs/f2fs/recovery.c b/fs/f2fs/recovery.c index 011942f94d64..3d7216d7a288 100644 --- a/fs/f2fs/recovery.c +++ b/fs/f2fs/recovery.c @@ -49,8 +49,9 @@ static struct kmem_cache *fsync_entry_slab; bool space_for_roll_forward(struct f2fs_sb_info *sbi) { - if (sbi->last_valid_block_count + sbi->alloc_valid_block_count - > sbi->user_block_count) + s64 nalloc = percpu_counter_sum_positive(&sbi->alloc_valid_block_count); + + if (sbi->last_valid_block_count + nalloc > sbi->user_block_count) return false; return true; } @@ -67,7 +68,30 @@ static struct fsync_inode_entry *get_fsync_inode(struct list_head *head, return NULL; } -static int recover_dentry(struct inode *inode, struct page *ipage) +static struct fsync_inode_entry *add_fsync_inode(struct list_head *head, + struct inode *inode) +{ + struct fsync_inode_entry *entry; + + entry = kmem_cache_alloc(fsync_entry_slab, GFP_F2FS_ZERO); + if (!entry) + return NULL; + + entry->inode = inode; + list_add_tail(&entry->list, head); + + return entry; +} + +static void del_fsync_inode(struct fsync_inode_entry *entry) +{ + iput(entry->inode); + list_del(&entry->list); + kmem_cache_free(fsync_entry_slab, entry); +} + +static int recover_dentry(struct inode *inode, struct page *ipage, + struct list_head *dir_list) { struct f2fs_inode *raw_inode = F2FS_INODE(ipage); nid_t pino = le32_to_cpu(raw_inode->i_pino); @@ -75,18 +99,29 @@ static int recover_dentry(struct inode *inode, struct page *ipage) struct qstr name; struct page *page; struct inode *dir, *einode; + struct fsync_inode_entry *entry; int err = 0; - dir = f2fs_iget(inode->i_sb, pino); - if (IS_ERR(dir)) { - err = PTR_ERR(dir); - goto out; + entry = get_fsync_inode(dir_list, pino); + if (!entry) { + dir = f2fs_iget(inode->i_sb, pino); + if (IS_ERR(dir)) { + err = PTR_ERR(dir); + goto out; + } + + entry = add_fsync_inode(dir_list, dir); + if (!entry) { + err = -ENOMEM; + iput(dir); + goto out; + } } - if (file_enc_name(inode)) { - iput(dir); + dir = entry->inode; + + if (file_enc_name(inode)) return 0; - } name.len = le32_to_cpu(raw_inode->i_namelen); name.name = raw_inode->i_name; @@ -94,7 +129,7 @@ static int recover_dentry(struct inode *inode, struct page *ipage) if (unlikely(name.len > F2FS_NAME_LEN)) { WARN_ON(1); err = -ENAMETOOLONG; - goto out_err; + goto out; } retry: de = f2fs_find_entry(dir, &name, &page); @@ -120,23 +155,12 @@ retry: goto retry; } err = __f2fs_add_link(dir, &name, inode, inode->i_ino, inode->i_mode); - if (err) - goto out_err; - - if (is_inode_flag_set(F2FS_I(dir), FI_DELAY_IPUT)) { - iput(dir); - } else { - add_dirty_dir_inode(dir); - set_inode_flag(F2FS_I(dir), FI_DELAY_IPUT); - } goto out; out_unmap_put: f2fs_dentry_kunmap(dir, page); f2fs_put_page(page, 0); -out_err: - iput(dir); out: f2fs_msg(inode->i_sb, KERN_NOTICE, "%s: ino = %x, name = %s, dir = %lx, err = %d", @@ -198,6 +222,7 @@ static int find_fsync_dnodes(struct f2fs_sb_info *sbi, struct list_head *head) { unsigned long long cp_ver = cur_cp_version(F2FS_CKPT(sbi)); struct curseg_info *curseg; + struct inode *inode; struct page *page = NULL; block_t blkaddr; int err = 0; @@ -206,8 +231,6 @@ static int find_fsync_dnodes(struct f2fs_sb_info *sbi, struct list_head *head) curseg = CURSEG_I(sbi, CURSEG_WARM_NODE); blkaddr = NEXT_FREE_BLKADDR(sbi, curseg); - ra_meta_pages(sbi, blkaddr, 1, META_POR, true); - while (1) { struct fsync_inode_entry *entry; @@ -233,35 +256,32 @@ static int find_fsync_dnodes(struct f2fs_sb_info *sbi, struct list_head *head) break; } - /* add this fsync inode to the list */ - entry = kmem_cache_alloc(fsync_entry_slab, GFP_F2FS_ZERO); - if (!entry) { - err = -ENOMEM; - break; - } /* * CP | dnode(F) | inode(DF) * For this case, we should not give up now. */ - entry->inode = f2fs_iget(sbi->sb, ino_of_node(page)); - if (IS_ERR(entry->inode)) { - err = PTR_ERR(entry->inode); - kmem_cache_free(fsync_entry_slab, entry); + inode = f2fs_iget(sbi->sb, ino_of_node(page)); + if (IS_ERR(inode)) { + err = PTR_ERR(inode); if (err == -ENOENT) { err = 0; goto next; } break; } - list_add_tail(&entry->list, head); + + /* add this fsync inode to the list */ + entry = add_fsync_inode(head, inode); + if (!entry) { + err = -ENOMEM; + iput(inode); + break; + } } entry->blkaddr = blkaddr; - if (IS_INODE(page)) { - entry->last_inode = blkaddr; - if (is_dent_dnode(page)) - entry->last_dentry = blkaddr; - } + if (IS_INODE(page) && is_dent_dnode(page)) + entry->last_dentry = blkaddr; next: /* check next segment */ blkaddr = next_blkaddr_of_node(page); @@ -277,11 +297,8 @@ static void destroy_fsync_dnodes(struct list_head *head) { struct fsync_inode_entry *entry, *tmp; - list_for_each_entry_safe(entry, tmp, head, list) { - iput(entry->inode); - list_del(&entry->list); - kmem_cache_free(fsync_entry_slab, entry); - } + list_for_each_entry_safe(entry, tmp, head, list) + del_fsync_inode(entry); } static int check_index_in_prev_nodes(struct f2fs_sb_info *sbi, @@ -444,8 +461,7 @@ static int do_recover_data(struct f2fs_sb_info *sbi, struct inode *inode, */ if (dest == NEW_ADDR) { truncate_data_blocks_range(&dn, 1); - err = reserve_new_block(&dn); - f2fs_bug_on(sbi, err); + reserve_new_block(&dn); continue; } @@ -454,6 +470,10 @@ static int do_recover_data(struct f2fs_sb_info *sbi, struct inode *inode, if (src == NULL_ADDR) { err = reserve_new_block(&dn); +#ifdef CONFIG_F2FS_FAULT_INJECTION + while (err) + err = reserve_new_block(&dn); +#endif /* We should not get -ENOSPC */ f2fs_bug_on(sbi, err); } @@ -486,7 +506,8 @@ out: return err; } -static int recover_data(struct f2fs_sb_info *sbi, struct list_head *head) +static int recover_data(struct f2fs_sb_info *sbi, struct list_head *inode_list, + struct list_head *dir_list) { unsigned long long cp_ver = cur_cp_version(F2FS_CKPT(sbi)); struct curseg_info *curseg; @@ -513,7 +534,7 @@ static int recover_data(struct f2fs_sb_info *sbi, struct list_head *head) break; } - entry = get_fsync_inode(head, ino_of_node(page)); + entry = get_fsync_inode(inode_list, ino_of_node(page)); if (!entry) goto next; /* @@ -521,10 +542,10 @@ static int recover_data(struct f2fs_sb_info *sbi, struct list_head *head) * In this case, we can lose the latest inode(x). * So, call recover_inode for the inode update. */ - if (entry->last_inode == blkaddr) + if (IS_INODE(page)) recover_inode(entry->inode, page); if (entry->last_dentry == blkaddr) { - err = recover_dentry(entry->inode, page); + err = recover_dentry(entry->inode, page, dir_list); if (err) { f2fs_put_page(page, 1); break; @@ -536,11 +557,8 @@ static int recover_data(struct f2fs_sb_info *sbi, struct list_head *head) break; } - if (entry->blkaddr == blkaddr) { - iput(entry->inode); - list_del(&entry->list); - kmem_cache_free(fsync_entry_slab, entry); - } + if (entry->blkaddr == blkaddr) + del_fsync_inode(entry); next: /* check next segment */ blkaddr = next_blkaddr_of_node(page); @@ -551,12 +569,14 @@ next: return err; } -int recover_fsync_data(struct f2fs_sb_info *sbi) +int recover_fsync_data(struct f2fs_sb_info *sbi, bool check_only) { struct curseg_info *curseg = CURSEG_I(sbi, CURSEG_WARM_NODE); struct list_head inode_list; + struct list_head dir_list; block_t blkaddr; int err; + int ret = 0; bool need_writecp = false; fsync_entry_slab = f2fs_kmem_cache_create("f2fs_fsync_inode_entry", @@ -565,6 +585,7 @@ int recover_fsync_data(struct f2fs_sb_info *sbi) return -ENOMEM; INIT_LIST_HEAD(&inode_list); + INIT_LIST_HEAD(&dir_list); /* prevent checkpoint */ mutex_lock(&sbi->cp_mutex); @@ -573,21 +594,22 @@ int recover_fsync_data(struct f2fs_sb_info *sbi) /* step #1: find fsynced inode numbers */ err = find_fsync_dnodes(sbi, &inode_list); - if (err) + if (err || list_empty(&inode_list)) goto out; - if (list_empty(&inode_list)) + if (check_only) { + ret = 1; goto out; + } need_writecp = true; /* step #2: recover data */ - err = recover_data(sbi, &inode_list); + err = recover_data(sbi, &inode_list, &dir_list); if (!err) f2fs_bug_on(sbi, !list_empty(&inode_list)); out: destroy_fsync_dnodes(&inode_list); - kmem_cache_destroy(fsync_entry_slab); /* truncate meta pages to be used by the recovery */ truncate_inode_pages_range(META_MAPPING(sbi), @@ -625,5 +647,8 @@ out: } else { mutex_unlock(&sbi->cp_mutex); } - return err; + + destroy_fsync_dnodes(&dir_list); + kmem_cache_destroy(fsync_entry_slab); + return ret ? ret: err; } diff --git a/fs/f2fs/segment.c b/fs/f2fs/segment.c index 540669d6978e..2e6f537a0e7d 100644 --- a/fs/f2fs/segment.c +++ b/fs/f2fs/segment.c @@ -223,9 +223,11 @@ static int __revoke_inmem_pages(struct inode *inode, f2fs_put_dnode(&dn); } next: - ClearPageUptodate(page); + /* we don't need to invalidate this in the sccessful status */ + if (drop || recover) + ClearPageUptodate(page); set_page_private(page, 0); - ClearPageUptodate(page); + ClearPagePrivate(page); f2fs_put_page(page, 1); list_del(&cur->list); @@ -239,6 +241,8 @@ void drop_inmem_pages(struct inode *inode) { struct f2fs_inode_info *fi = F2FS_I(inode); + clear_inode_flag(F2FS_I(inode), FI_ATOMIC_FILE); + mutex_lock(&fi->inmem_lock); __revoke_inmem_pages(inode, &fi->inmem_pages, true, false); mutex_unlock(&fi->inmem_lock); diff --git a/fs/f2fs/segment.h b/fs/f2fs/segment.h index 975c33df65c7..7a756ff5a36d 100644 --- a/fs/f2fs/segment.h +++ b/fs/f2fs/segment.h @@ -158,16 +158,17 @@ struct victim_sel_policy { }; struct seg_entry { - unsigned short valid_blocks; /* # of valid blocks */ + unsigned int type:6; /* segment type like CURSEG_XXX_TYPE */ + unsigned int valid_blocks:10; /* # of valid blocks */ + unsigned int ckpt_valid_blocks:10; /* # of valid blocks last cp */ + unsigned int padding:6; /* padding */ unsigned char *cur_valid_map; /* validity bitmap of blocks */ /* * # of valid blocks and the validity bitmap stored in the the last * checkpoint pack. This information is used by the SSR mode. */ - unsigned short ckpt_valid_blocks; - unsigned char *ckpt_valid_map; + unsigned char *ckpt_valid_map; /* validity bitmap of blocks last cp */ unsigned char *discard_map; - unsigned char type; /* segment type like CURSEG_XXX_TYPE */ unsigned long long mtime; /* modification time of the segment */ }; diff --git a/fs/f2fs/super.c b/fs/f2fs/super.c index 006f87d69921..74cc8520b8b1 100644 --- a/fs/f2fs/super.c +++ b/fs/f2fs/super.c @@ -39,6 +39,30 @@ static struct proc_dir_entry *f2fs_proc_root; static struct kmem_cache *f2fs_inode_cachep; static struct kset *f2fs_kset; +#ifdef CONFIG_F2FS_FAULT_INJECTION +struct f2fs_fault_info f2fs_fault; + +char *fault_name[FAULT_MAX] = { + [FAULT_KMALLOC] = "kmalloc", + [FAULT_PAGE_ALLOC] = "page alloc", + [FAULT_ALLOC_NID] = "alloc nid", + [FAULT_ORPHAN] = "orphan", + [FAULT_BLOCK] = "no more block", + [FAULT_DIR_DEPTH] = "too big dir depth", +}; + +static void f2fs_build_fault_attr(unsigned int rate) +{ + if (rate) { + atomic_set(&f2fs_fault.inject_ops, 0); + f2fs_fault.inject_rate = rate; + f2fs_fault.inject_type = (1 << FAULT_MAX) - 1; + } else { + memset(&f2fs_fault, 0, sizeof(struct f2fs_fault_info)); + } +} +#endif + /* f2fs-wide shrinker description */ static struct shrinker f2fs_shrinker_info = { .scan_objects = f2fs_shrink_scan, @@ -68,6 +92,7 @@ enum { Opt_noextent_cache, Opt_noinline_data, Opt_data_flush, + Opt_fault_injection, Opt_err, }; @@ -93,6 +118,7 @@ static match_table_t f2fs_tokens = { {Opt_noextent_cache, "noextent_cache"}, {Opt_noinline_data, "noinline_data"}, {Opt_data_flush, "data_flush"}, + {Opt_fault_injection, "fault_injection=%u"}, {Opt_err, NULL}, }; @@ -102,6 +128,10 @@ enum { SM_INFO, /* struct f2fs_sm_info */ NM_INFO, /* struct f2fs_nm_info */ F2FS_SBI, /* struct f2fs_sb_info */ +#ifdef CONFIG_F2FS_FAULT_INJECTION + FAULT_INFO_RATE, /* struct f2fs_fault_info */ + FAULT_INFO_TYPE, /* struct f2fs_fault_info */ +#endif }; struct f2fs_attr { @@ -123,6 +153,11 @@ static unsigned char *__struct_ptr(struct f2fs_sb_info *sbi, int struct_type) return (unsigned char *)NM_I(sbi); else if (struct_type == F2FS_SBI) return (unsigned char *)sbi; +#ifdef CONFIG_F2FS_FAULT_INJECTION + else if (struct_type == FAULT_INFO_RATE || + struct_type == FAULT_INFO_TYPE) + return (unsigned char *)&f2fs_fault; +#endif return NULL; } @@ -172,6 +207,10 @@ static ssize_t f2fs_sbi_store(struct f2fs_attr *a, ret = kstrtoul(skip_spaces(buf), 0, &t); if (ret < 0) return ret; +#ifdef CONFIG_F2FS_FAULT_INJECTION + if (a->struct_type == FAULT_INFO_TYPE && t >= (1 << FAULT_MAX)) + return -EINVAL; +#endif *ui = t; return count; } @@ -237,6 +276,10 @@ F2FS_RW_ATTR(F2FS_SBI, f2fs_sb_info, max_victim_search, max_victim_search); F2FS_RW_ATTR(F2FS_SBI, f2fs_sb_info, dir_level, dir_level); F2FS_RW_ATTR(F2FS_SBI, f2fs_sb_info, cp_interval, interval_time[CP_TIME]); F2FS_RW_ATTR(F2FS_SBI, f2fs_sb_info, idle_interval, interval_time[REQ_TIME]); +#ifdef CONFIG_F2FS_FAULT_INJECTION +F2FS_RW_ATTR(FAULT_INFO_RATE, f2fs_fault_info, inject_rate, inject_rate); +F2FS_RW_ATTR(FAULT_INFO_TYPE, f2fs_fault_info, inject_type, inject_type); +#endif F2FS_GENERAL_RO_ATTR(lifetime_write_kbytes); #define ATTR_LIST(name) (&f2fs_attr_##name.attr) @@ -273,6 +316,22 @@ static struct kobj_type f2fs_ktype = { .release = f2fs_sb_release, }; +#ifdef CONFIG_F2FS_FAULT_INJECTION +/* sysfs for f2fs fault injection */ +static struct kobject f2fs_fault_inject; + +static struct attribute *f2fs_fault_attrs[] = { + ATTR_LIST(inject_rate), + ATTR_LIST(inject_type), + NULL +}; + +static struct kobj_type f2fs_fault_ktype = { + .default_attrs = f2fs_fault_attrs, + .sysfs_ops = &f2fs_attr_ops, +}; +#endif + void f2fs_msg(struct super_block *sb, const char *level, const char *fmt, ...) { struct va_format vaf; @@ -300,6 +359,10 @@ static int parse_options(struct super_block *sb, char *options) char *p, *name; int arg = 0; +#ifdef CONFIG_F2FS_FAULT_INJECTION + f2fs_build_fault_attr(0); +#endif + if (!options) return 0; @@ -433,6 +496,16 @@ static int parse_options(struct super_block *sb, char *options) case Opt_data_flush: set_opt(sbi, DATA_FLUSH); break; + case Opt_fault_injection: + if (args->from && match_int(args, &arg)) + return -EINVAL; +#ifdef CONFIG_F2FS_FAULT_INJECTION + f2fs_build_fault_attr(arg); +#else + f2fs_msg(sb, KERN_INFO, + "FAULT_INJECTION was not selected"); +#endif + break; default: f2fs_msg(sb, KERN_ERR, "Unrecognized mount option \"%s\" or missing value", @@ -453,9 +526,13 @@ static struct inode *f2fs_alloc_inode(struct super_block *sb) init_once((void *) fi); + if (percpu_counter_init(&fi->dirty_pages, 0, GFP_NOFS)) { + kmem_cache_free(f2fs_inode_cachep, fi); + return NULL; + } + /* Initialize f2fs-specific inode info */ fi->vfs_inode.i_version = 1; - atomic_set(&fi->dirty_pages, 0); fi->i_current_depth = 1; fi->i_advise = 0; init_rwsem(&fi->i_sem); @@ -530,15 +607,27 @@ static void f2fs_i_callback(struct rcu_head *head) static void f2fs_destroy_inode(struct inode *inode) { + percpu_counter_destroy(&F2FS_I(inode)->dirty_pages); call_rcu(&inode->i_rcu, f2fs_i_callback); } +static void destroy_percpu_info(struct f2fs_sb_info *sbi) +{ + int i; + + for (i = 0; i < NR_COUNT_TYPE; i++) + percpu_counter_destroy(&sbi->nr_pages[i]); + percpu_counter_destroy(&sbi->alloc_valid_block_count); + percpu_counter_destroy(&sbi->total_valid_inode_count); +} + static void f2fs_put_super(struct super_block *sb) { struct f2fs_sb_info *sbi = F2FS_SB(sb); if (sbi->s_proc) { remove_proc_entry("segment_info", sbi->s_proc); + remove_proc_entry("segment_bits", sbi->s_proc); remove_proc_entry(sb->s_id, f2fs_proc_root); } kobject_del(&sbi->s_kobj); @@ -568,15 +657,14 @@ static void f2fs_put_super(struct super_block *sb) * normally superblock is clean, so we need to release this. * In addition, EIO will skip do checkpoint, we need this as well. */ - release_ino_entry(sbi); + release_ino_entry(sbi, true); release_discard_addrs(sbi); f2fs_leave_shrinker(sbi); mutex_unlock(&sbi->umount_mutex); /* our cp_error case, we can wait for any writeback page */ - if (get_pages(sbi, F2FS_WRITEBACK)) - f2fs_flush_merged_bios(sbi); + f2fs_flush_merged_bios(sbi); iput(sbi->node_inode); iput(sbi->meta_inode); @@ -593,6 +681,8 @@ static void f2fs_put_super(struct super_block *sb) if (sbi->s_chksum_driver) crypto_free_shash(sbi->s_chksum_driver); kfree(sbi->raw_super); + + destroy_percpu_info(sbi); kfree(sbi); } @@ -745,19 +835,47 @@ static int segment_info_seq_show(struct seq_file *seq, void *offset) return 0; } -static int segment_info_open_fs(struct inode *inode, struct file *file) +static int segment_bits_seq_show(struct seq_file *seq, void *offset) { - return single_open(file, segment_info_seq_show, PDE_DATA(inode)); + struct super_block *sb = seq->private; + struct f2fs_sb_info *sbi = F2FS_SB(sb); + unsigned int total_segs = + le32_to_cpu(sbi->raw_super->segment_count_main); + int i, j; + + seq_puts(seq, "format: segment_type|valid_blocks|bitmaps\n" + "segment_type(0:HD, 1:WD, 2:CD, 3:HN, 4:WN, 5:CN)\n"); + + for (i = 0; i < total_segs; i++) { + struct seg_entry *se = get_seg_entry(sbi, i); + + seq_printf(seq, "%-10d", i); + seq_printf(seq, "%d|%-3u|", se->type, + get_valid_blocks(sbi, i, 1)); + for (j = 0; j < SIT_VBLOCK_MAP_SIZE; j++) + seq_printf(seq, "%x ", se->cur_valid_map[j]); + seq_putc(seq, '\n'); + } + return 0; } -static const struct file_operations f2fs_seq_segment_info_fops = { - .owner = THIS_MODULE, - .open = segment_info_open_fs, - .read = seq_read, - .llseek = seq_lseek, - .release = single_release, +#define F2FS_PROC_FILE_DEF(_name) \ +static int _name##_open_fs(struct inode *inode, struct file *file) \ +{ \ + return single_open(file, _name##_seq_show, PDE_DATA(inode)); \ +} \ + \ +static const struct file_operations f2fs_seq_##_name##_fops = { \ + .owner = THIS_MODULE, \ + .open = _name##_open_fs, \ + .read = seq_read, \ + .llseek = seq_lseek, \ + .release = single_release, \ }; +F2FS_PROC_FILE_DEF(segment_info); +F2FS_PROC_FILE_DEF(segment_bits); + static void default_options(struct f2fs_sb_info *sbi) { /* init some FS parameters */ @@ -791,13 +909,15 @@ static int f2fs_remount(struct super_block *sb, int *flags, char *data) org_mount_opt = sbi->mount_opt; active_logs = sbi->active_logs; - if (*flags & MS_RDONLY) { - set_opt(sbi, FASTBOOT); - set_sbi_flag(sbi, SBI_IS_DIRTY); + /* recover superblocks we couldn't write due to previous RO mount */ + if (!(*flags & MS_RDONLY) && is_sbi_flag_set(sbi, SBI_NEED_SB_WRITE)) { + err = f2fs_commit_super(sbi, false); + f2fs_msg(sb, KERN_INFO, + "Try to recover all the superblocks, ret: %d", err); + if (!err) + clear_sbi_flag(sbi, SBI_NEED_SB_WRITE); } - sync_filesystem(sb); - sbi->mount_opt.opt = 0; default_options(sbi); @@ -829,7 +949,6 @@ static int f2fs_remount(struct super_block *sb, int *flags, char *data) if ((*flags & MS_RDONLY) || !test_opt(sbi, BG_GC)) { if (sbi->gc_thread) { stop_gc_thread(sbi); - f2fs_sync_fs(sb, 1); need_restart_gc = true; } } else if (!sbi->gc_thread) { @@ -839,6 +958,16 @@ static int f2fs_remount(struct super_block *sb, int *flags, char *data) need_stop_gc = true; } + if (*flags & MS_RDONLY) { + writeback_inodes_sb(sb, WB_REASON_SYNC); + sync_inodes_sb(sb); + + set_sbi_flag(sbi, SBI_IS_DIRTY); + set_sbi_flag(sbi, SBI_IS_CLOSE); + f2fs_sync_fs(sb, 1); + clear_sbi_flag(sbi, SBI_IS_CLOSE); + } + /* * We stop issue flush thread if FS is mounted as RO * or if flush_merge is not passed in mount option. @@ -852,8 +981,9 @@ static int f2fs_remount(struct super_block *sb, int *flags, char *data) } skip: /* Update the POSIXACL Flag */ - sb->s_flags = (sb->s_flags & ~MS_POSIXACL) | + sb->s_flags = (sb->s_flags & ~MS_POSIXACL) | (test_opt(sbi, POSIX_ACL) ? MS_POSIXACL : 0); + return 0; restore_gc: if (need_restart_gc) { @@ -893,6 +1023,12 @@ static int f2fs_get_context(struct inode *inode, void *ctx, size_t len) ctx, len, NULL); } +static int f2fs_key_prefix(struct inode *inode, u8 **key) +{ + *key = F2FS_I_SB(inode)->key_prefix; + return F2FS_I_SB(inode)->key_prefix_size; +} + static int f2fs_set_context(struct inode *inode, const void *ctx, size_t len, void *fs_data) { @@ -909,6 +1045,7 @@ static unsigned f2fs_max_namelen(struct inode *inode) static struct fscrypt_operations f2fs_cryptops = { .get_context = f2fs_get_context, + .key_prefix = f2fs_key_prefix, .set_context = f2fs_set_context, .is_encrypted = f2fs_encrypted_inode, .empty_dir = f2fs_empty_dir, @@ -998,11 +1135,12 @@ static int __f2fs_commit_super(struct buffer_head *bh, return __sync_dirty_buffer(bh, WRITE_FLUSH_FUA); } -static inline bool sanity_check_area_boundary(struct super_block *sb, +static inline bool sanity_check_area_boundary(struct f2fs_sb_info *sbi, struct buffer_head *bh) { struct f2fs_super_block *raw_super = (struct f2fs_super_block *) (bh->b_data + F2FS_SUPER_OFFSET); + struct super_block *sb = sbi->sb; u32 segment0_blkaddr = le32_to_cpu(raw_super->segment0_blkaddr); u32 cp_blkaddr = le32_to_cpu(raw_super->cp_blkaddr); u32 sit_blkaddr = le32_to_cpu(raw_super->sit_blkaddr); @@ -1081,6 +1219,7 @@ static inline bool sanity_check_area_boundary(struct super_block *sb, segment0_blkaddr) >> log_blocks_per_seg); if (f2fs_readonly(sb) || bdev_read_only(sb->s_bdev)) { + set_sbi_flag(sbi, SBI_NEED_SB_WRITE); res = "internally"; } else { err = __f2fs_commit_super(bh, NULL); @@ -1098,11 +1237,12 @@ static inline bool sanity_check_area_boundary(struct super_block *sb, return false; } -static int sanity_check_raw_super(struct super_block *sb, +static int sanity_check_raw_super(struct f2fs_sb_info *sbi, struct buffer_head *bh) { struct f2fs_super_block *raw_super = (struct f2fs_super_block *) (bh->b_data + F2FS_SUPER_OFFSET); + struct super_block *sb = sbi->sb; unsigned int blocksize; if (F2FS_SUPER_MAGIC != le32_to_cpu(raw_super->magic)) { @@ -1169,7 +1309,7 @@ static int sanity_check_raw_super(struct super_block *sb, } /* check CP/SIT/NAT/SSA/MAIN_AREA area boundary */ - if (sanity_check_area_boundary(sb, bh)) + if (sanity_check_area_boundary(sbi, bh)) return 1; return 0; @@ -1201,7 +1341,6 @@ int sanity_check_ckpt(struct f2fs_sb_info *sbi) static void init_sb_info(struct f2fs_sb_info *sbi) { struct f2fs_super_block *raw_super = sbi->raw_super; - int i; sbi->log_sectors_per_block = le32_to_cpu(raw_super->log_sectors_per_block); @@ -1221,9 +1360,6 @@ static void init_sb_info(struct f2fs_sb_info *sbi) sbi->cur_victim_sec = NULL_SECNO; sbi->max_victim_search = DEF_MAX_VICTIM_SEARCH; - for (i = 0; i < NR_COUNT_TYPE; i++) - atomic_set(&sbi->nr_pages[i], 0); - sbi->dir_level = DEF_DIR_LEVEL; sbi->interval_time[CP_TIME] = DEF_CP_INTERVAL; sbi->interval_time[REQ_TIME] = DEF_IDLE_INTERVAL; @@ -1231,6 +1367,30 @@ static void init_sb_info(struct f2fs_sb_info *sbi) INIT_LIST_HEAD(&sbi->s_list); mutex_init(&sbi->umount_mutex); + +#ifdef CONFIG_F2FS_FS_ENCRYPTION + memcpy(sbi->key_prefix, F2FS_KEY_DESC_PREFIX, + F2FS_KEY_DESC_PREFIX_SIZE); + sbi->key_prefix_size = F2FS_KEY_DESC_PREFIX_SIZE; +#endif +} + +static int init_percpu_info(struct f2fs_sb_info *sbi) +{ + int i, err; + + for (i = 0; i < NR_COUNT_TYPE; i++) { + err = percpu_counter_init(&sbi->nr_pages[i], 0, GFP_KERNEL); + if (err) + return err; + } + + err = percpu_counter_init(&sbi->alloc_valid_block_count, 0, GFP_KERNEL); + if (err) + return err; + + return percpu_counter_init(&sbi->total_valid_inode_count, 0, + GFP_KERNEL); } /* @@ -1239,10 +1399,11 @@ static void init_sb_info(struct f2fs_sb_info *sbi) * to get the first valid one. If any one of them is broken, we pass * them recovery flag back to the caller. */ -static int read_raw_super_block(struct super_block *sb, +static int read_raw_super_block(struct f2fs_sb_info *sbi, struct f2fs_super_block **raw_super, int *valid_super_block, int *recovery) { + struct super_block *sb = sbi->sb; int block; struct buffer_head *bh; struct f2fs_super_block *super; @@ -1262,7 +1423,7 @@ static int read_raw_super_block(struct super_block *sb, } /* sanity checking of raw super */ - if (sanity_check_raw_super(sb, bh)) { + if (sanity_check_raw_super(sbi, bh)) { f2fs_msg(sb, KERN_ERR, "Can't find valid F2FS filesystem in %dth superblock", block + 1); @@ -1298,6 +1459,12 @@ int f2fs_commit_super(struct f2fs_sb_info *sbi, bool recover) struct buffer_head *bh; int err; + if ((recover && f2fs_readonly(sbi->sb)) || + bdev_read_only(sbi->sb->s_bdev)) { + set_sbi_flag(sbi, SBI_NEED_SB_WRITE); + return -EROFS; + } + /* write back-up superblock first */ bh = sb_getblk(sbi->sb, sbi->valid_super_block ? 0: 1); if (!bh) @@ -1323,7 +1490,7 @@ static int f2fs_fill_super(struct super_block *sb, void *data, int silent) struct f2fs_sb_info *sbi; struct f2fs_super_block *raw_super; struct inode *root; - long err; + int err; bool retry = true, need_fsck = false; char *options = NULL; int recovery, i, valid_super_block; @@ -1340,6 +1507,8 @@ try_onemore: if (!sbi) return -ENOMEM; + sbi->sb = sb; + /* Load the checksum driver */ sbi->s_chksum_driver = crypto_alloc_shash("crc32", 0, 0); if (IS_ERR(sbi->s_chksum_driver)) { @@ -1355,7 +1524,7 @@ try_onemore: goto free_sbi; } - err = read_raw_super_block(sb, &raw_super, &valid_super_block, + err = read_raw_super_block(sbi, &raw_super, &valid_super_block, &recovery); if (err) goto free_sbi; @@ -1390,7 +1559,6 @@ try_onemore: memcpy(sb->s_uuid, raw_super->uuid, sizeof(raw_super->uuid)); /* init f2fs-specific super block info */ - sbi->sb = sb; sbi->raw_super = raw_super; sbi->valid_super_block = valid_super_block; mutex_init(&sbi->gc_mutex); @@ -1415,6 +1583,10 @@ try_onemore: init_waitqueue_head(&sbi->cp_wait); init_sb_info(sbi); + err = init_percpu_info(sbi); + if (err) + goto free_options; + /* get an inode for meta space */ sbi->meta_inode = f2fs_iget(sb, F2FS_META_INO(sbi)); if (IS_ERR(sbi->meta_inode)) { @@ -1431,13 +1603,13 @@ try_onemore: sbi->total_valid_node_count = le32_to_cpu(sbi->ckpt->valid_node_count); - sbi->total_valid_inode_count = - le32_to_cpu(sbi->ckpt->valid_inode_count); + percpu_counter_set(&sbi->total_valid_inode_count, + le32_to_cpu(sbi->ckpt->valid_inode_count)); sbi->user_block_count = le64_to_cpu(sbi->ckpt->user_block_count); sbi->total_valid_block_count = le64_to_cpu(sbi->ckpt->valid_block_count); sbi->last_valid_block_count = sbi->total_valid_block_count; - sbi->alloc_valid_block_count = 0; + for (i = 0; i < NR_INODE_TYPE; i++) { INIT_LIST_HEAD(&sbi->inode_list[i]); spin_lock_init(&sbi->inode_lock[i]); @@ -1515,9 +1687,12 @@ try_onemore: if (f2fs_proc_root) sbi->s_proc = proc_mkdir(sb->s_id, f2fs_proc_root); - if (sbi->s_proc) + if (sbi->s_proc) { proc_create_data("segment_info", S_IRUGO, sbi->s_proc, &f2fs_seq_segment_info_fops, sb); + proc_create_data("segment_bits", S_IRUGO, sbi->s_proc, + &f2fs_seq_segment_bits_fops, sb); + } sbi->s_kobj.kset = f2fs_kset; init_completion(&sbi->s_kobj_unregister); @@ -1541,14 +1716,24 @@ try_onemore: if (need_fsck) set_sbi_flag(sbi, SBI_NEED_FSCK); - err = recover_fsync_data(sbi); - if (err) { + err = recover_fsync_data(sbi, false); + if (err < 0) { need_fsck = true; f2fs_msg(sb, KERN_ERR, - "Cannot recover all fsync data errno=%ld", err); + "Cannot recover all fsync data errno=%d", err); + goto free_kobj; + } + } else { + err = recover_fsync_data(sbi, true); + + if (!f2fs_readonly(sb) && err > 0) { + err = -EINVAL; + f2fs_msg(sb, KERN_ERR, + "Need to recover fsync data"); goto free_kobj; } } + /* recover_fsync_data() cleared this already */ clear_sbi_flag(sbi, SBI_POR_DOING); @@ -1565,10 +1750,10 @@ try_onemore: kfree(options); /* recover broken superblock */ - if (recovery && !f2fs_readonly(sb) && !bdev_read_only(sb->s_bdev)) { + if (recovery) { err = f2fs_commit_super(sbi, true); f2fs_msg(sb, KERN_INFO, - "Try to recover %dth superblock, ret: %ld", + "Try to recover %dth superblock, ret: %d", sbi->valid_super_block ? 1 : 2, err); } @@ -1583,6 +1768,7 @@ free_kobj: free_proc: if (sbi->s_proc) { remove_proc_entry("segment_info", sbi->s_proc); + remove_proc_entry("segment_bits", sbi->s_proc); remove_proc_entry(sb->s_id, f2fs_proc_root); } f2fs_destroy_stats(sbi); @@ -1603,6 +1789,7 @@ free_meta_inode: make_bad_inode(sbi->meta_inode); iput(sbi->meta_inode); free_options: + destroy_percpu_info(sbi); kfree(options); free_sb_buf: kfree(raw_super); @@ -1688,6 +1875,16 @@ static int __init init_f2fs_fs(void) err = -ENOMEM; goto free_extent_cache; } +#ifdef CONFIG_F2FS_FAULT_INJECTION + f2fs_fault_inject.kset = f2fs_kset; + f2fs_build_fault_attr(0); + err = kobject_init_and_add(&f2fs_fault_inject, &f2fs_fault_ktype, + NULL, "fault_injection"); + if (err) { + f2fs_fault_inject.kset = NULL; + goto free_kset; + } +#endif err = register_shrinker(&f2fs_shrinker_info); if (err) goto free_kset; @@ -1706,6 +1903,10 @@ free_filesystem: free_shrinker: unregister_shrinker(&f2fs_shrinker_info); free_kset: +#ifdef CONFIG_F2FS_FAULT_INJECTION + if (f2fs_fault_inject.kset) + kobject_put(&f2fs_fault_inject); +#endif kset_unregister(f2fs_kset); free_extent_cache: destroy_extent_cache(); @@ -1725,14 +1926,17 @@ static void __exit exit_f2fs_fs(void) { remove_proc_entry("fs/f2fs", NULL); f2fs_destroy_root_stats(); - unregister_shrinker(&f2fs_shrinker_info); unregister_filesystem(&f2fs_fs_type); + unregister_shrinker(&f2fs_shrinker_info); +#ifdef CONFIG_F2FS_FAULT_INJECTION + kobject_put(&f2fs_fault_inject); +#endif + kset_unregister(f2fs_kset); destroy_extent_cache(); destroy_checkpoint_caches(); destroy_segment_manager_caches(); destroy_node_manager_caches(); destroy_inodecache(); - kset_unregister(f2fs_kset); f2fs_destroy_trace_ios(); } diff --git a/fs/f2fs/xattr.c b/fs/f2fs/xattr.c index 06a72dc0191a..00ea56797258 100644 --- a/fs/f2fs/xattr.c +++ b/fs/f2fs/xattr.c @@ -26,10 +26,10 @@ #include "xattr.h" static int f2fs_xattr_generic_get(const struct xattr_handler *handler, - struct dentry *dentry, const char *name, void *buffer, - size_t size) + struct dentry *unused, struct inode *inode, + const char *name, void *buffer, size_t size) { - struct f2fs_sb_info *sbi = F2FS_SB(dentry->d_sb); + struct f2fs_sb_info *sbi = F2FS_SB(inode->i_sb); switch (handler->flags) { case F2FS_XATTR_INDEX_USER: @@ -45,7 +45,7 @@ static int f2fs_xattr_generic_get(const struct xattr_handler *handler, default: return -EINVAL; } - return f2fs_getxattr(d_inode(dentry), handler->flags, name, + return f2fs_getxattr(inode, handler->flags, name, buffer, size, NULL); } @@ -86,11 +86,9 @@ static bool f2fs_xattr_trusted_list(struct dentry *dentry) } static int f2fs_xattr_advise_get(const struct xattr_handler *handler, - struct dentry *dentry, const char *name, void *buffer, - size_t size) + struct dentry *unused, struct inode *inode, + const char *name, void *buffer, size_t size) { - struct inode *inode = d_inode(dentry); - if (buffer) *((char *)buffer) = F2FS_I(inode)->i_advise; return sizeof(char); @@ -500,7 +498,7 @@ static int __f2fs_setxattr(struct inode *inode, int index, free = free + ENTRY_SIZE(here); if (unlikely(free < newsize)) { - error = -ENOSPC; + error = -E2BIG; goto exit; } } @@ -528,7 +526,6 @@ static int __f2fs_setxattr(struct inode *inode, int index, * Before we come here, old entry is removed. * We just write new entry. */ - memset(last, 0, newsize); last->e_name_index = index; last->e_name_len = len; memcpy(last->e_name, name, len); diff --git a/fs/fat/dir.c b/fs/fat/dir.c index d0b95c95079b..663e428596c6 100644 --- a/fs/fat/dir.c +++ b/fs/fat/dir.c @@ -769,7 +769,7 @@ static int fat_ioctl_readdir(struct inode *inode, struct file *file, buf.dirent = dirent; buf.result = 0; - inode_lock(inode); + inode_lock_shared(inode); buf.ctx.pos = file->f_pos; ret = -ENOENT; if (!IS_DEADDIR(inode)) { @@ -777,7 +777,7 @@ static int fat_ioctl_readdir(struct inode *inode, struct file *file, short_only, both ? &buf : NULL); file->f_pos = buf.ctx.pos; } - inode_unlock(inode); + inode_unlock_shared(inode); if (ret >= 0) ret = buf.result; return ret; @@ -861,7 +861,7 @@ static long fat_compat_dir_ioctl(struct file *filp, unsigned cmd, const struct file_operations fat_dir_operations = { .llseek = generic_file_llseek, .read = generic_read_dir, - .iterate = fat_readdir, + .iterate_shared = fat_readdir, .unlocked_ioctl = fat_dir_ioctl, #ifdef CONFIG_COMPAT .compat_ioctl = fat_compat_dir_ioctl, diff --git a/fs/fat/inode.c b/fs/fat/inode.c index 226281068a46..3bcf57925dca 100644 --- a/fs/fat/inode.c +++ b/fs/fat/inode.c @@ -244,13 +244,13 @@ static int fat_write_end(struct file *file, struct address_space *mapping, return err; } -static ssize_t fat_direct_IO(struct kiocb *iocb, struct iov_iter *iter, - loff_t offset) +static ssize_t fat_direct_IO(struct kiocb *iocb, struct iov_iter *iter) { struct file *file = iocb->ki_filp; struct address_space *mapping = file->f_mapping; struct inode *inode = mapping->host; size_t count = iov_iter_count(iter); + loff_t offset = iocb->ki_pos; ssize_t ret; if (iov_iter_rw(iter) == WRITE) { @@ -272,7 +272,7 @@ static ssize_t fat_direct_IO(struct kiocb *iocb, struct iov_iter *iter, * FAT need to use the DIO_LOCKING for avoiding the race * condition of fat_get_block() and ->truncate(). */ - ret = blockdev_direct_IO(iocb, inode, iter, offset, fat_get_block); + ret = blockdev_direct_IO(iocb, inode, iter, fat_get_block); if (ret < 0 && iov_iter_rw(iter) == WRITE) fat_write_failed(mapping, offset + count); diff --git a/fs/file.c b/fs/file.c index 1fbc5c0555a9..6b1acdfe59da 100644 --- a/fs/file.c +++ b/fs/file.c @@ -784,6 +784,11 @@ unsigned long __fdget_pos(unsigned int fd) return v; } +void __f_unlock_pos(struct file *f) +{ + mutex_unlock(&f->f_pos_lock); +} + /* * We only lock f_pos if we have threads or if the file might be * shared with another process. In both cases we'll have an elevated diff --git a/fs/freevxfs/vxfs_lookup.c b/fs/freevxfs/vxfs_lookup.c index a49e0cfbb686..6d576b97f2c8 100644 --- a/fs/freevxfs/vxfs_lookup.c +++ b/fs/freevxfs/vxfs_lookup.c @@ -58,7 +58,7 @@ const struct inode_operations vxfs_dir_inode_ops = { const struct file_operations vxfs_dir_operations = { .llseek = generic_file_llseek, .read = generic_read_dir, - .iterate = vxfs_readdir, + .iterate_shared = vxfs_readdir, }; static inline u_long diff --git a/fs/fs-writeback.c b/fs/fs-writeback.c index 592cea54cea0..989a2cef6b76 100644 --- a/fs/fs-writeback.c +++ b/fs/fs-writeback.c @@ -931,7 +931,8 @@ void wb_start_writeback(struct bdi_writeback *wb, long nr_pages, * This is WB_SYNC_NONE writeback, so if allocation fails just * wakeup the thread for old dirty data writeback */ - work = kzalloc(sizeof(*work), GFP_ATOMIC); + work = kzalloc(sizeof(*work), + GFP_NOWAIT | __GFP_NOMEMALLOC | __GFP_NOWARN); if (!work) { trace_writeback_nowork(wb); wb_wakeup(wb); diff --git a/fs/fuse/dir.c b/fs/fuse/dir.c index 4b855b65d457..b9419058108f 100644 --- a/fs/fuse/dir.c +++ b/fs/fuse/dir.c @@ -1162,7 +1162,6 @@ static int fuse_direntplus_link(struct file *file, struct fuse_direntplus *direntplus, u64 attr_version) { - int err; struct fuse_entry_out *o = &direntplus->entry_out; struct fuse_dirent *dirent = &direntplus->dirent; struct dentry *parent = file->f_path.dentry; @@ -1172,6 +1171,7 @@ static int fuse_direntplus_link(struct file *file, struct inode *dir = d_inode(parent); struct fuse_conn *fc; struct inode *inode; + DECLARE_WAIT_QUEUE_HEAD_ONSTACK(wq); if (!o->nodeid) { /* @@ -1204,65 +1204,61 @@ static int fuse_direntplus_link(struct file *file, name.hash = full_name_hash(name.name, name.len); dentry = d_lookup(parent, &name); - if (dentry) { + if (!dentry) { +retry: + dentry = d_alloc_parallel(parent, &name, &wq); + if (IS_ERR(dentry)) + return PTR_ERR(dentry); + } + if (!d_in_lookup(dentry)) { + struct fuse_inode *fi; inode = d_inode(dentry); - if (!inode) { - d_drop(dentry); - } else if (get_node_id(inode) != o->nodeid || - ((o->attr.mode ^ inode->i_mode) & S_IFMT)) { + if (!inode || + get_node_id(inode) != o->nodeid || + ((o->attr.mode ^ inode->i_mode) & S_IFMT)) { d_invalidate(dentry); - } else if (is_bad_inode(inode)) { - err = -EIO; - goto out; - } else { - struct fuse_inode *fi; - fi = get_fuse_inode(inode); - spin_lock(&fc->lock); - fi->nlookup++; - spin_unlock(&fc->lock); - - fuse_change_attributes(inode, &o->attr, - entry_attr_timeout(o), - attr_version); - - /* - * The other branch to 'found' comes via fuse_iget() - * which bumps nlookup inside - */ - goto found; + dput(dentry); + goto retry; + } + if (is_bad_inode(inode)) { + dput(dentry); + return -EIO; } - dput(dentry); - } - - dentry = d_alloc(parent, &name); - err = -ENOMEM; - if (!dentry) - goto out; - inode = fuse_iget(dir->i_sb, o->nodeid, o->generation, - &o->attr, entry_attr_timeout(o), attr_version); - if (!inode) - goto out; + fi = get_fuse_inode(inode); + spin_lock(&fc->lock); + fi->nlookup++; + spin_unlock(&fc->lock); - alias = d_splice_alias(inode, dentry); - err = PTR_ERR(alias); - if (IS_ERR(alias)) - goto out; + fuse_change_attributes(inode, &o->attr, + entry_attr_timeout(o), + attr_version); + /* + * The other branch comes via fuse_iget() + * which bumps nlookup inside + */ + } else { + inode = fuse_iget(dir->i_sb, o->nodeid, o->generation, + &o->attr, entry_attr_timeout(o), + attr_version); + if (!inode) + inode = ERR_PTR(-ENOMEM); - if (alias) { - dput(dentry); - dentry = alias; + alias = d_splice_alias(inode, dentry); + d_lookup_done(dentry); + if (alias) { + dput(dentry); + dentry = alias; + } + if (IS_ERR(dentry)) + return PTR_ERR(dentry); } - -found: if (fc->readdirplus_auto) set_bit(FUSE_I_INIT_RDPLUS, &get_fuse_inode(inode)->state); fuse_change_entry_timeout(dentry, o); - err = 0; -out: dput(dentry); - return err; + return 0; } static int parse_dirplusfile(char *buf, size_t nbytes, struct file *file, @@ -1759,10 +1755,9 @@ static int fuse_setxattr(struct dentry *entry, const char *name, return err; } -static ssize_t fuse_getxattr(struct dentry *entry, const char *name, - void *value, size_t size) +static ssize_t fuse_getxattr(struct dentry *entry, struct inode *inode, + const char *name, void *value, size_t size) { - struct inode *inode = d_inode(entry); struct fuse_conn *fc = get_fuse_conn(inode); FUSE_ARGS(args); struct fuse_getxattr_in inarg; @@ -1893,7 +1888,7 @@ static const struct inode_operations fuse_dir_inode_operations = { static const struct file_operations fuse_dir_operations = { .llseek = generic_file_llseek, .read = generic_read_dir, - .iterate = fuse_readdir, + .iterate_shared = fuse_readdir, .open = fuse_dir_open, .release = fuse_dir_release, .fsync = fuse_dir_fsync, diff --git a/fs/fuse/file.c b/fs/fuse/file.c index 719924d6c706..9154f8679024 100644 --- a/fs/fuse/file.c +++ b/fs/fuse/file.c @@ -1186,7 +1186,7 @@ static ssize_t fuse_file_write_iter(struct kiocb *iocb, struct iov_iter *from) if (iocb->ki_flags & IOCB_DIRECT) { loff_t pos = iocb->ki_pos; - written = generic_file_direct_write(iocb, from, pos); + written = generic_file_direct_write(iocb, from); if (written < 0 || !iov_iter_count(from)) goto out; @@ -1295,7 +1295,7 @@ static int fuse_get_user_pages(struct fuse_req *req, struct iov_iter *ii, *nbytesp = nbytes; - return ret; + return ret < 0 ? ret : 0; } static inline int fuse_iter_npages(const struct iov_iter *ii_p) @@ -2837,7 +2837,7 @@ static inline loff_t fuse_round_up(loff_t off) } static ssize_t -fuse_direct_IO(struct kiocb *iocb, struct iov_iter *iter, loff_t offset) +fuse_direct_IO(struct kiocb *iocb, struct iov_iter *iter) { DECLARE_COMPLETION_ONSTACK(wait); ssize_t ret = 0; @@ -2848,6 +2848,7 @@ fuse_direct_IO(struct kiocb *iocb, struct iov_iter *iter, loff_t offset) struct inode *inode; loff_t i_size; size_t count = iov_iter_count(iter); + loff_t offset = iocb->ki_pos; struct fuse_io_priv *io; bool is_sync = is_sync_kiocb(iocb); diff --git a/fs/gfs2/acl.c b/fs/gfs2/acl.c index 791932617d1a..363ba9e9d8d0 100644 --- a/fs/gfs2/acl.c +++ b/fs/gfs2/acl.c @@ -24,6 +24,7 @@ #include "glock.h" #include "inode.h" #include "meta_io.h" +#include "rgrp.h" #include "trans.h" #include "util.h" @@ -38,7 +39,7 @@ static const char *gfs2_acl_name(int type) return NULL; } -struct posix_acl *gfs2_get_acl(struct inode *inode, int type) +static struct posix_acl *__gfs2_get_acl(struct inode *inode, int type) { struct gfs2_inode *ip = GFS2_I(inode); struct posix_acl *acl; @@ -50,29 +51,41 @@ struct posix_acl *gfs2_get_acl(struct inode *inode, int type) return NULL; name = gfs2_acl_name(type); - if (name == NULL) - return ERR_PTR(-EINVAL); - len = gfs2_xattr_acl_get(ip, name, &data); - if (len < 0) + if (len <= 0) return ERR_PTR(len); - if (len == 0) - return NULL; - acl = posix_acl_from_xattr(&init_user_ns, data, len); kfree(data); return acl; } -int gfs2_set_acl(struct inode *inode, struct posix_acl *acl, int type) +struct posix_acl *gfs2_get_acl(struct inode *inode, int type) +{ + struct gfs2_inode *ip = GFS2_I(inode); + struct gfs2_holder gh; + bool need_unlock = false; + struct posix_acl *acl; + + if (!gfs2_glock_is_locked_by_me(ip->i_gl)) { + int ret = gfs2_glock_nq_init(ip->i_gl, LM_ST_SHARED, + LM_FLAG_ANY, &gh); + if (ret) + return ERR_PTR(ret); + need_unlock = true; + } + acl = __gfs2_get_acl(inode, type); + if (need_unlock) + gfs2_glock_dq_uninit(&gh); + return acl; +} + +int __gfs2_set_acl(struct inode *inode, struct posix_acl *acl, int type) { int error; int len; char *data; const char *name = gfs2_acl_name(type); - BUG_ON(name == NULL); - if (acl && acl->a_count > GFS2_ACL_MAX_ENTRIES(GFS2_SB(inode))) return -E2BIG; @@ -115,3 +128,26 @@ out: kfree(data); return error; } + +int gfs2_set_acl(struct inode *inode, struct posix_acl *acl, int type) +{ + struct gfs2_inode *ip = GFS2_I(inode); + struct gfs2_holder gh; + bool need_unlock = false; + int ret; + + ret = gfs2_rsqa_alloc(ip); + if (ret) + return ret; + + if (!gfs2_glock_is_locked_by_me(ip->i_gl)) { + ret = gfs2_glock_nq_init(ip->i_gl, LM_ST_EXCLUSIVE, 0, &gh); + if (ret) + return ret; + need_unlock = true; + } + ret = __gfs2_set_acl(inode, acl, type); + if (need_unlock) + gfs2_glock_dq_uninit(&gh); + return ret; +} diff --git a/fs/gfs2/acl.h b/fs/gfs2/acl.h index 3af4f407a483..f674fdd22337 100644 --- a/fs/gfs2/acl.h +++ b/fs/gfs2/acl.h @@ -15,6 +15,7 @@ #define GFS2_ACL_MAX_ENTRIES(sdp) ((300 << (sdp)->sd_sb.sb_bsize_shift) >> 12) extern struct posix_acl *gfs2_get_acl(struct inode *inode, int type); +extern int __gfs2_set_acl(struct inode *inode, struct posix_acl *acl, int type); extern int gfs2_set_acl(struct inode *inode, struct posix_acl *acl, int type); #endif /* __ACL_DOT_H__ */ diff --git a/fs/gfs2/aops.c b/fs/gfs2/aops.c index 1bbbee945f46..37b7bc14c8da 100644 --- a/fs/gfs2/aops.c +++ b/fs/gfs2/aops.c @@ -977,7 +977,7 @@ static void gfs2_discard(struct gfs2_sbd *sdp, struct buffer_head *bh) if (!list_empty(&bd->bd_list) && !buffer_pinned(bh)) list_del_init(&bd->bd_list); else - gfs2_remove_from_journal(bh, current->journal_info, 0); + gfs2_remove_from_journal(bh, REMOVE_JDATA); } bh->b_bdev = NULL; clear_buffer_mapped(bh); @@ -1042,13 +1042,13 @@ static int gfs2_ok_for_dio(struct gfs2_inode *ip, loff_t offset) -static ssize_t gfs2_direct_IO(struct kiocb *iocb, struct iov_iter *iter, - loff_t offset) +static ssize_t gfs2_direct_IO(struct kiocb *iocb, struct iov_iter *iter) { struct file *file = iocb->ki_filp; struct inode *inode = file->f_mapping->host; struct address_space *mapping = inode->i_mapping; struct gfs2_inode *ip = GFS2_I(inode); + loff_t offset = iocb->ki_pos; struct gfs2_holder gh; int rv; @@ -1063,7 +1063,7 @@ static ssize_t gfs2_direct_IO(struct kiocb *iocb, struct iov_iter *iter, gfs2_holder_init(ip->i_gl, LM_ST_DEFERRED, 0, &gh); rv = gfs2_glock_nq(&gh); if (rv) - return rv; + goto out_uninit; rv = gfs2_ok_for_dio(ip, offset); if (rv != 1) goto out; /* dio not valid, fall back to buffered i/o */ @@ -1099,9 +1099,10 @@ static ssize_t gfs2_direct_IO(struct kiocb *iocb, struct iov_iter *iter, } rv = __blockdev_direct_IO(iocb, inode, inode->i_sb->s_bdev, iter, - offset, gfs2_get_block_direct, NULL, NULL, 0); + gfs2_get_block_direct, NULL, NULL, 0); out: gfs2_glock_dq(&gh); +out_uninit: gfs2_holder_uninit(&gh); return rv; } diff --git a/fs/gfs2/file.c b/fs/gfs2/file.c index 208efc70ad49..e0f98e483aec 100644 --- a/fs/gfs2/file.c +++ b/fs/gfs2/file.c @@ -160,7 +160,7 @@ static int gfs2_get_flags(struct file *filp, u32 __user *ptr) gfs2_holder_init(ip->i_gl, LM_ST_SHARED, 0, &gh); error = gfs2_glock_nq(&gh); if (error) - return error; + goto out_uninit; fsflags = fsflags_cvt(gfs2_to_fsflags, ip->i_diskflags); if (!S_ISDIR(inode->i_mode) && ip->i_diskflags & GFS2_DIF_JDATA) @@ -169,6 +169,7 @@ static int gfs2_get_flags(struct file *filp, u32 __user *ptr) error = -EFAULT; gfs2_glock_dq(&gh); +out_uninit: gfs2_holder_uninit(&gh); return error; } @@ -895,7 +896,10 @@ static long __gfs2_fallocate(struct file *file, int mode, loff_t offset, loff_t mark_inode_dirty(inode); } - return generic_write_sync(file, pos, count); + if ((file->f_flags & O_DSYNC) || IS_SYNC(file->f_mapping->host)) + return vfs_fsync_range(file, pos, pos + count - 1, + (file->f_flags & __O_SYNC) ? 0 : 1); + return 0; out_trans_fail: gfs2_inplace_release(ip); @@ -950,6 +954,30 @@ out_uninit: return ret; } +static ssize_t gfs2_file_splice_read(struct file *in, loff_t *ppos, + struct pipe_inode_info *pipe, size_t len, + unsigned int flags) +{ + struct inode *inode = in->f_mapping->host; + struct gfs2_inode *ip = GFS2_I(inode); + struct gfs2_holder gh; + int ret; + + inode_lock(inode); + + ret = gfs2_glock_nq_init(ip->i_gl, LM_ST_SHARED, 0, &gh); + if (ret) { + inode_unlock(inode); + return ret; + } + + gfs2_glock_dq_uninit(&gh); + inode_unlock(inode); + + return generic_file_splice_read(in, ppos, pipe, len, flags); +} + + static ssize_t gfs2_file_splice_write(struct pipe_inode_info *pipe, struct file *out, loff_t *ppos, size_t len, unsigned int flags) @@ -1112,14 +1140,14 @@ const struct file_operations gfs2_file_fops = { .fsync = gfs2_fsync, .lock = gfs2_lock, .flock = gfs2_flock, - .splice_read = generic_file_splice_read, + .splice_read = gfs2_file_splice_read, .splice_write = gfs2_file_splice_write, .setlease = simple_nosetlease, .fallocate = gfs2_fallocate, }; const struct file_operations gfs2_dir_fops = { - .iterate = gfs2_readdir, + .iterate_shared = gfs2_readdir, .unlocked_ioctl = gfs2_ioctl, .open = gfs2_open, .release = gfs2_release, @@ -1140,14 +1168,14 @@ const struct file_operations gfs2_file_fops_nolock = { .open = gfs2_open, .release = gfs2_release, .fsync = gfs2_fsync, - .splice_read = generic_file_splice_read, + .splice_read = gfs2_file_splice_read, .splice_write = gfs2_file_splice_write, .setlease = generic_setlease, .fallocate = gfs2_fallocate, }; const struct file_operations gfs2_dir_fops_nolock = { - .iterate = gfs2_readdir, + .iterate_shared = gfs2_readdir, .unlocked_ioctl = gfs2_ioctl, .open = gfs2_open, .release = gfs2_release, diff --git a/fs/gfs2/glock.c b/fs/gfs2/glock.c index 6539131c52a2..706fd9352f36 100644 --- a/fs/gfs2/glock.c +++ b/fs/gfs2/glock.c @@ -218,7 +218,7 @@ static void gfs2_holder_wake(struct gfs2_holder *gh) * */ -static inline void do_error(struct gfs2_glock *gl, const int ret) +static void do_error(struct gfs2_glock *gl, const int ret) { struct gfs2_holder *gh, *tmp; @@ -475,7 +475,14 @@ __acquires(&gl->gl_lockref.lock) if (sdp->sd_lockstruct.ls_ops->lm_lock) { /* lock_dlm */ ret = sdp->sd_lockstruct.ls_ops->lm_lock(gl, target, lck_flags); - if (ret) { + if (ret == -EINVAL && gl->gl_target == LM_ST_UNLOCKED && + target == LM_ST_UNLOCKED && + test_bit(SDF_SKIP_DLM_UNLOCK, &sdp->sd_flags)) { + finish_xmote(gl, target); + if (queue_delayed_work(glock_workqueue, &gl->gl_work, 0) == 0) + gfs2_glock_put(gl); + } + else if (ret) { pr_err("lm_lock ret %d\n", ret); GLOCK_BUG_ON(gl, 1); } @@ -1913,7 +1920,7 @@ static int gfs2_glocks_open(struct inode *inode, struct file *file) if (seq->buf) seq->size = GFS2_SEQ_GOODSIZE; gi->gl = NULL; - ret = rhashtable_walk_init(&gl_hash_table, &gi->hti); + ret = rhashtable_walk_init(&gl_hash_table, &gi->hti, GFP_KERNEL); } return ret; } @@ -1941,7 +1948,7 @@ static int gfs2_glstats_open(struct inode *inode, struct file *file) if (seq->buf) seq->size = GFS2_SEQ_GOODSIZE; gi->gl = NULL; - ret = rhashtable_walk_init(&gl_hash_table, &gi->hti); + ret = rhashtable_walk_init(&gl_hash_table, &gi->hti, GFP_KERNEL); } return ret; } diff --git a/fs/gfs2/glops.c b/fs/gfs2/glops.c index 437fd73e381e..5db59d444838 100644 --- a/fs/gfs2/glops.c +++ b/fs/gfs2/glops.c @@ -286,17 +286,10 @@ static void inode_go_inval(struct gfs2_glock *gl, int flags) static int inode_go_demote_ok(const struct gfs2_glock *gl) { struct gfs2_sbd *sdp = gl->gl_name.ln_sbd; - struct gfs2_holder *gh; if (sdp->sd_jindex == gl->gl_object || sdp->sd_rindex == gl->gl_object) return 0; - if (!list_empty(&gl->gl_holders)) { - gh = list_entry(gl->gl_holders.next, struct gfs2_holder, gh_list); - if (gh->gh_list.next != &gl->gl_holders) - return 0; - } - return 1; } diff --git a/fs/gfs2/inode.c b/fs/gfs2/inode.c index bb30f9a72c65..21dc784f66c2 100644 --- a/fs/gfs2/inode.c +++ b/fs/gfs2/inode.c @@ -93,12 +93,12 @@ struct inode *gfs2_inode_lookup(struct super_block *sb, unsigned int type, int error; inode = iget_locked(sb, (unsigned long)no_addr); - ip = GFS2_I(inode); - ip->i_no_addr = no_addr; - if (!inode) return ERR_PTR(-ENOMEM); + ip = GFS2_I(inode); + ip->i_no_addr = no_addr; + if (inode->i_state & I_NEW) { struct gfs2_sbd *sdp = GFS2_SB(inode); ip->i_no_formal_ino = no_formal_ino; @@ -692,12 +692,12 @@ static int gfs2_create_inode(struct inode *dir, struct dentry *dentry, considered free. Any failures need to undo the gfs2 structures. */ if (default_acl) { - error = gfs2_set_acl(inode, default_acl, ACL_TYPE_DEFAULT); + error = __gfs2_set_acl(inode, default_acl, ACL_TYPE_DEFAULT); posix_acl_release(default_acl); } if (acl) { if (!error) - error = gfs2_set_acl(inode, acl, ACL_TYPE_ACCESS); + error = __gfs2_set_acl(inode, acl, ACL_TYPE_ACCESS); posix_acl_release(acl); } @@ -1948,67 +1948,6 @@ static int gfs2_getattr(struct vfsmount *mnt, struct dentry *dentry, return 0; } -static int gfs2_setxattr(struct dentry *dentry, const char *name, - const void *data, size_t size, int flags) -{ - struct inode *inode = d_inode(dentry); - struct gfs2_inode *ip = GFS2_I(inode); - struct gfs2_holder gh; - int ret; - - gfs2_holder_init(ip->i_gl, LM_ST_EXCLUSIVE, 0, &gh); - ret = gfs2_glock_nq(&gh); - if (ret == 0) { - ret = gfs2_rsqa_alloc(ip); - if (ret == 0) - ret = generic_setxattr(dentry, name, data, size, flags); - gfs2_glock_dq(&gh); - } - gfs2_holder_uninit(&gh); - return ret; -} - -static ssize_t gfs2_getxattr(struct dentry *dentry, const char *name, - void *data, size_t size) -{ - struct inode *inode = d_inode(dentry); - struct gfs2_inode *ip = GFS2_I(inode); - struct gfs2_holder gh; - int ret; - - /* For selinux during lookup */ - if (gfs2_glock_is_locked_by_me(ip->i_gl)) - return generic_getxattr(dentry, name, data, size); - - gfs2_holder_init(ip->i_gl, LM_ST_SHARED, LM_FLAG_ANY, &gh); - ret = gfs2_glock_nq(&gh); - if (ret == 0) { - ret = generic_getxattr(dentry, name, data, size); - gfs2_glock_dq(&gh); - } - gfs2_holder_uninit(&gh); - return ret; -} - -static int gfs2_removexattr(struct dentry *dentry, const char *name) -{ - struct inode *inode = d_inode(dentry); - struct gfs2_inode *ip = GFS2_I(inode); - struct gfs2_holder gh; - int ret; - - gfs2_holder_init(ip->i_gl, LM_ST_EXCLUSIVE, 0, &gh); - ret = gfs2_glock_nq(&gh); - if (ret == 0) { - ret = gfs2_rsqa_alloc(ip); - if (ret == 0) - ret = generic_removexattr(dentry, name); - gfs2_glock_dq(&gh); - } - gfs2_holder_uninit(&gh); - return ret; -} - static int gfs2_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo, u64 start, u64 len) { @@ -2055,10 +1994,10 @@ const struct inode_operations gfs2_file_iops = { .permission = gfs2_permission, .setattr = gfs2_setattr, .getattr = gfs2_getattr, - .setxattr = gfs2_setxattr, - .getxattr = gfs2_getxattr, + .setxattr = generic_setxattr, + .getxattr = generic_getxattr, .listxattr = gfs2_listxattr, - .removexattr = gfs2_removexattr, + .removexattr = generic_removexattr, .fiemap = gfs2_fiemap, .get_acl = gfs2_get_acl, .set_acl = gfs2_set_acl, @@ -2077,10 +2016,10 @@ const struct inode_operations gfs2_dir_iops = { .permission = gfs2_permission, .setattr = gfs2_setattr, .getattr = gfs2_getattr, - .setxattr = gfs2_setxattr, - .getxattr = gfs2_getxattr, + .setxattr = generic_setxattr, + .getxattr = generic_getxattr, .listxattr = gfs2_listxattr, - .removexattr = gfs2_removexattr, + .removexattr = generic_removexattr, .fiemap = gfs2_fiemap, .get_acl = gfs2_get_acl, .set_acl = gfs2_set_acl, @@ -2093,10 +2032,10 @@ const struct inode_operations gfs2_symlink_iops = { .permission = gfs2_permission, .setattr = gfs2_setattr, .getattr = gfs2_getattr, - .setxattr = gfs2_setxattr, - .getxattr = gfs2_getxattr, + .setxattr = generic_setxattr, + .getxattr = generic_getxattr, .listxattr = gfs2_listxattr, - .removexattr = gfs2_removexattr, + .removexattr = generic_removexattr, .fiemap = gfs2_fiemap, }; diff --git a/fs/gfs2/meta_io.c b/fs/gfs2/meta_io.c index 0448524c11bc..8eaadabbc771 100644 --- a/fs/gfs2/meta_io.c +++ b/fs/gfs2/meta_io.c @@ -325,18 +325,19 @@ int gfs2_meta_wait(struct gfs2_sbd *sdp, struct buffer_head *bh) return 0; } -void gfs2_remove_from_journal(struct buffer_head *bh, struct gfs2_trans *tr, int meta) +void gfs2_remove_from_journal(struct buffer_head *bh, int meta) { struct address_space *mapping = bh->b_page->mapping; struct gfs2_sbd *sdp = gfs2_mapping2sbd(mapping); struct gfs2_bufdata *bd = bh->b_private; + struct gfs2_trans *tr = current->journal_info; int was_pinned = 0; if (test_clear_buffer_pinned(bh)) { trace_gfs2_pin(bd, 0); atomic_dec(&sdp->sd_log_pinned); list_del_init(&bd->bd_list); - if (meta) + if (meta == REMOVE_META) tr->tr_num_buf_rm++; else tr->tr_num_databuf_rm++; @@ -376,7 +377,7 @@ void gfs2_meta_wipe(struct gfs2_inode *ip, u64 bstart, u32 blen) if (bh) { lock_buffer(bh); gfs2_log_lock(sdp); - gfs2_remove_from_journal(bh, current->journal_info, 1); + gfs2_remove_from_journal(bh, REMOVE_META); gfs2_log_unlock(sdp); unlock_buffer(bh); brelse(bh); diff --git a/fs/gfs2/meta_io.h b/fs/gfs2/meta_io.h index c5086c8af5ed..ffdf6aa3509d 100644 --- a/fs/gfs2/meta_io.h +++ b/fs/gfs2/meta_io.h @@ -57,8 +57,12 @@ extern int gfs2_meta_read(struct gfs2_glock *gl, u64 blkno, int flags, extern int gfs2_meta_wait(struct gfs2_sbd *sdp, struct buffer_head *bh); extern struct buffer_head *gfs2_getbuf(struct gfs2_glock *gl, u64 blkno, int create); -extern void gfs2_remove_from_journal(struct buffer_head *bh, - struct gfs2_trans *tr, int meta); +enum { + REMOVE_JDATA = 0, + REMOVE_META = 1, +}; + +extern void gfs2_remove_from_journal(struct buffer_head *bh, int meta); extern void gfs2_meta_wipe(struct gfs2_inode *ip, u64 bstart, u32 blen); extern int gfs2_meta_indirect_buffer(struct gfs2_inode *ip, int height, u64 num, struct buffer_head **bhp); diff --git a/fs/gfs2/ops_fstype.c b/fs/gfs2/ops_fstype.c index 49b0bff18fe3..45463600fb81 100644 --- a/fs/gfs2/ops_fstype.c +++ b/fs/gfs2/ops_fstype.c @@ -824,7 +824,7 @@ static int init_inodes(struct gfs2_sbd *sdp, int undo) * i_mutex on quota files is special. Since this inode is hidden system * file, we are safe to define locking ourselves. */ - lockdep_set_class(&sdp->sd_quota_inode->i_mutex, + lockdep_set_class(&sdp->sd_quota_inode->i_rwsem, &gfs2_quota_imutex_key); error = gfs2_rindex_update(sdp); @@ -1360,7 +1360,7 @@ static struct dentry *gfs2_mount_meta(struct file_system_type *fs_type, return ERR_PTR(error); } s = sget(&gfs2_fs_type, test_gfs2_super, set_meta_super, flags, - d_inode(path.dentry)->i_sb->s_bdev); + path.dentry->d_sb->s_bdev); path_put(&path); if (IS_ERR(s)) { pr_warn("gfs2 mount does not exist\n"); diff --git a/fs/gfs2/rgrp.c b/fs/gfs2/rgrp.c index 99a0bdac8796..5bd216901e89 100644 --- a/fs/gfs2/rgrp.c +++ b/fs/gfs2/rgrp.c @@ -73,8 +73,7 @@ static const char valid_change[16] = { }; static int gfs2_rbm_find(struct gfs2_rbm *rbm, u8 state, u32 *minext, - const struct gfs2_inode *ip, bool nowrap, - const struct gfs2_alloc_parms *ap); + const struct gfs2_inode *ip, bool nowrap); /** @@ -1511,7 +1510,7 @@ static void rg_mblk_search(struct gfs2_rgrpd *rgd, struct gfs2_inode *ip, if (WARN_ON(gfs2_rbm_from_block(&rbm, goal))) return; - ret = gfs2_rbm_find(&rbm, GFS2_BLKST_FREE, &extlen, ip, true, ap); + ret = gfs2_rbm_find(&rbm, GFS2_BLKST_FREE, &extlen, ip, true); if (ret == 0) { rs->rs_rbm = rbm; rs->rs_free = extlen; @@ -1638,7 +1637,6 @@ fail: * @ip: If set, check for reservations * @nowrap: Stop looking at the end of the rgrp, rather than wrapping * around until we've reached the starting point. - * @ap: the allocation parameters * * Side effects: * - If looking for free blocks, we set GBF_FULL on each bitmap which @@ -1650,8 +1648,7 @@ fail: */ static int gfs2_rbm_find(struct gfs2_rbm *rbm, u8 state, u32 *minext, - const struct gfs2_inode *ip, bool nowrap, - const struct gfs2_alloc_parms *ap) + const struct gfs2_inode *ip, bool nowrap) { struct buffer_head *bh; int initial_bii; @@ -1772,7 +1769,7 @@ static void try_rgrp_unlink(struct gfs2_rgrpd *rgd, u64 *last_unlinked, u64 skip while (1) { down_write(&sdp->sd_log_flush_lock); error = gfs2_rbm_find(&rbm, GFS2_BLKST_UNLINKED, NULL, NULL, - true, NULL); + true); up_write(&sdp->sd_log_flush_lock); if (error == -ENOSPC) break; @@ -2329,12 +2326,11 @@ int gfs2_alloc_blocks(struct gfs2_inode *ip, u64 *bn, unsigned int *nblocks, int error; gfs2_set_alloc_start(&rbm, ip, dinode); - error = gfs2_rbm_find(&rbm, GFS2_BLKST_FREE, NULL, ip, false, NULL); + error = gfs2_rbm_find(&rbm, GFS2_BLKST_FREE, NULL, ip, false); if (error == -ENOSPC) { gfs2_set_alloc_start(&rbm, ip, dinode); - error = gfs2_rbm_find(&rbm, GFS2_BLKST_FREE, NULL, NULL, false, - NULL); + error = gfs2_rbm_find(&rbm, GFS2_BLKST_FREE, NULL, NULL, false); } /* Since all blocks are reserved in advance, this shouldn't happen */ diff --git a/fs/gfs2/super.c b/fs/gfs2/super.c index f8a0cd821290..9b2ff353e45f 100644 --- a/fs/gfs2/super.c +++ b/fs/gfs2/super.c @@ -1176,7 +1176,7 @@ static int gfs2_statfs_i(struct gfs2_sbd *sdp, struct gfs2_statfs_change_host *s static int gfs2_statfs(struct dentry *dentry, struct kstatfs *buf) { - struct super_block *sb = d_inode(dentry)->i_sb; + struct super_block *sb = dentry->d_sb; struct gfs2_sbd *sdp = sb->s_fs_info; struct gfs2_statfs_change_host sc; int error; diff --git a/fs/gfs2/util.c b/fs/gfs2/util.c index cf645835710f..aee4485ad8a9 100644 --- a/fs/gfs2/util.c +++ b/fs/gfs2/util.c @@ -68,6 +68,7 @@ int gfs2_lm_withdraw(struct gfs2_sbd *sdp, const char *fmt, ...) fs_err(sdp, "telling LM to unmount\n"); lm->lm_unmount(sdp); } + set_bit(SDF_SKIP_DLM_UNLOCK, &sdp->sd_flags); fs_err(sdp, "withdrawn\n"); dump_stack(); } diff --git a/fs/gfs2/xattr.c b/fs/gfs2/xattr.c index e8dfb4740c04..f42ab53bd30d 100644 --- a/fs/gfs2/xattr.c +++ b/fs/gfs2/xattr.c @@ -583,13 +583,11 @@ out: * * Returns: actual size of data on success, -errno on error */ -static int gfs2_xattr_get(const struct xattr_handler *handler, - struct dentry *dentry, const char *name, - void *buffer, size_t size) +static int __gfs2_xattr_get(struct inode *inode, const char *name, + void *buffer, size_t size, int type) { - struct gfs2_inode *ip = GFS2_I(d_inode(dentry)); + struct gfs2_inode *ip = GFS2_I(inode); struct gfs2_ea_location el; - int type = handler->flags; int error; if (!ip->i_eattr) @@ -611,6 +609,29 @@ static int gfs2_xattr_get(const struct xattr_handler *handler, return error; } +static int gfs2_xattr_get(const struct xattr_handler *handler, + struct dentry *unused, struct inode *inode, + const char *name, void *buffer, size_t size) +{ + struct gfs2_inode *ip = GFS2_I(inode); + struct gfs2_holder gh; + bool need_unlock = false; + int ret; + + /* During lookup, SELinux calls this function with the glock locked. */ + + if (!gfs2_glock_is_locked_by_me(ip->i_gl)) { + ret = gfs2_glock_nq_init(ip->i_gl, LM_ST_SHARED, LM_FLAG_ANY, &gh); + if (ret) + return ret; + need_unlock = true; + } + ret = __gfs2_xattr_get(inode, name, buffer, size, handler->flags); + if (need_unlock) + gfs2_glock_dq_uninit(&gh); + return ret; +} + /** * ea_alloc_blk - allocates a new block for extended attributes. * @ip: A pointer to the inode that's getting extended attributes @@ -1233,8 +1254,21 @@ static int gfs2_xattr_set(const struct xattr_handler *handler, struct dentry *dentry, const char *name, const void *value, size_t size, int flags) { - return __gfs2_xattr_set(d_inode(dentry), name, value, - size, flags, handler->flags); + struct inode *inode = d_inode(dentry); + struct gfs2_inode *ip = GFS2_I(inode); + struct gfs2_holder gh; + int ret; + + ret = gfs2_rsqa_alloc(ip); + if (ret) + return ret; + + ret = gfs2_glock_nq_init(ip->i_gl, LM_ST_EXCLUSIVE, 0, &gh); + if (ret) + return ret; + ret = __gfs2_xattr_set(inode, name, value, size, flags, handler->flags); + gfs2_glock_dq_uninit(&gh); + return ret; } static int ea_dealloc_indirect(struct gfs2_inode *ip) diff --git a/fs/hfs/attr.c b/fs/hfs/attr.c index 8d931b157bbe..064f92f17efc 100644 --- a/fs/hfs/attr.c +++ b/fs/hfs/attr.c @@ -56,10 +56,9 @@ out: return res; } -ssize_t hfs_getxattr(struct dentry *dentry, const char *name, - void *value, size_t size) +ssize_t hfs_getxattr(struct dentry *unused, struct inode *inode, + const char *name, void *value, size_t size) { - struct inode *inode = d_inode(dentry); struct hfs_find_data fd; hfs_cat_rec rec; struct hfs_cat_file *file; diff --git a/fs/hfs/catalog.c b/fs/hfs/catalog.c index 1eb5d415d434..98cde8ba5dc2 100644 --- a/fs/hfs/catalog.c +++ b/fs/hfs/catalog.c @@ -240,10 +240,13 @@ int hfs_cat_delete(u32 cnid, struct inode *dir, struct qstr *str) } } + /* we only need to take spinlock for exclusion with ->release() */ + spin_lock(&HFS_I(dir)->open_dir_lock); list_for_each_entry(rd, &HFS_I(dir)->open_dir_list, list) { if (fd.tree->keycmp(fd.search_key, (void *)&rd->key) < 0) rd->file->f_pos--; } + spin_unlock(&HFS_I(dir)->open_dir_lock); res = hfs_brec_remove(&fd); if (res) diff --git a/fs/hfs/dir.c b/fs/hfs/dir.c index e9f2b855f831..163190ecc0d2 100644 --- a/fs/hfs/dir.c +++ b/fs/hfs/dir.c @@ -161,8 +161,14 @@ static int hfs_readdir(struct file *file, struct dir_context *ctx) } file->private_data = rd; rd->file = file; + spin_lock(&HFS_I(inode)->open_dir_lock); list_add(&rd->list, &HFS_I(inode)->open_dir_list); + spin_unlock(&HFS_I(inode)->open_dir_lock); } + /* + * Can be done after the list insertion; exclusion with + * hfs_delete_cat() is provided by directory lock. + */ memcpy(&rd->key, &fd.key, sizeof(struct hfs_cat_key)); out: hfs_find_exit(&fd); @@ -173,9 +179,9 @@ static int hfs_dir_release(struct inode *inode, struct file *file) { struct hfs_readdir_data *rd = file->private_data; if (rd) { - inode_lock(inode); + spin_lock(&HFS_I(inode)->open_dir_lock); list_del(&rd->list); - inode_unlock(inode); + spin_unlock(&HFS_I(inode)->open_dir_lock); kfree(rd); } return 0; @@ -303,7 +309,7 @@ static int hfs_rename(struct inode *old_dir, struct dentry *old_dentry, const struct file_operations hfs_dir_operations = { .read = generic_read_dir, - .iterate = hfs_readdir, + .iterate_shared = hfs_readdir, .llseek = generic_file_llseek, .release = hfs_dir_release, }; diff --git a/fs/hfs/hfs_fs.h b/fs/hfs/hfs_fs.h index 1f1c7dcbcc2f..fa3eed86837c 100644 --- a/fs/hfs/hfs_fs.h +++ b/fs/hfs/hfs_fs.h @@ -69,6 +69,7 @@ struct hfs_inode_info { struct hfs_cat_key cat_key; struct list_head open_dir_list; + spinlock_t open_dir_lock; struct inode *rsrc_inode; struct mutex extents_lock; @@ -213,8 +214,8 @@ extern void hfs_delete_inode(struct inode *); /* attr.c */ extern int hfs_setxattr(struct dentry *dentry, const char *name, const void *value, size_t size, int flags); -extern ssize_t hfs_getxattr(struct dentry *dentry, const char *name, - void *value, size_t size); +extern ssize_t hfs_getxattr(struct dentry *dentry, struct inode *inode, + const char *name, void *value, size_t size); extern ssize_t hfs_listxattr(struct dentry *dentry, char *buffer, size_t size); /* mdb.c */ diff --git a/fs/hfs/inode.c b/fs/hfs/inode.c index cb1e5faa2fb7..8eed66af5b82 100644 --- a/fs/hfs/inode.c +++ b/fs/hfs/inode.c @@ -124,8 +124,7 @@ static int hfs_releasepage(struct page *page, gfp_t mask) return res ? try_to_free_buffers(page) : 0; } -static ssize_t hfs_direct_IO(struct kiocb *iocb, struct iov_iter *iter, - loff_t offset) +static ssize_t hfs_direct_IO(struct kiocb *iocb, struct iov_iter *iter) { struct file *file = iocb->ki_filp; struct address_space *mapping = file->f_mapping; @@ -133,7 +132,7 @@ static ssize_t hfs_direct_IO(struct kiocb *iocb, struct iov_iter *iter, size_t count = iov_iter_count(iter); ssize_t ret; - ret = blockdev_direct_IO(iocb, inode, iter, offset, hfs_get_block); + ret = blockdev_direct_IO(iocb, inode, iter, hfs_get_block); /* * In case of error extending write may have instantiated a few @@ -141,7 +140,7 @@ static ssize_t hfs_direct_IO(struct kiocb *iocb, struct iov_iter *iter, */ if (unlikely(iov_iter_rw(iter) == WRITE && ret < 0)) { loff_t isize = i_size_read(inode); - loff_t end = offset + count; + loff_t end = iocb->ki_pos + count; if (end > isize) hfs_write_failed(mapping, end); @@ -187,6 +186,7 @@ struct inode *hfs_new_inode(struct inode *dir, struct qstr *name, umode_t mode) mutex_init(&HFS_I(inode)->extents_lock); INIT_LIST_HEAD(&HFS_I(inode)->open_dir_list); + spin_lock_init(&HFS_I(inode)->open_dir_lock); hfs_cat_build_key(sb, (btree_key *)&HFS_I(inode)->cat_key, dir->i_ino, name); inode->i_ino = HFS_SB(sb)->next_id++; inode->i_mode = mode; @@ -318,6 +318,7 @@ static int hfs_read_inode(struct inode *inode, void *data) HFS_I(inode)->rsrc_inode = NULL; mutex_init(&HFS_I(inode)->extents_lock); INIT_LIST_HEAD(&HFS_I(inode)->open_dir_list); + spin_lock_init(&HFS_I(inode)->open_dir_lock); /* Initialize the inode */ inode->i_uid = hsb->s_uid; diff --git a/fs/hfsplus/catalog.c b/fs/hfsplus/catalog.c index 022974ab6e3c..fb707e8f423a 100644 --- a/fs/hfsplus/catalog.c +++ b/fs/hfsplus/catalog.c @@ -374,12 +374,15 @@ int hfsplus_delete_cat(u32 cnid, struct inode *dir, struct qstr *str) hfsplus_free_fork(sb, cnid, &fork, HFSPLUS_TYPE_RSRC); } + /* we only need to take spinlock for exclusion with ->release() */ + spin_lock(&HFSPLUS_I(dir)->open_dir_lock); list_for_each(pos, &HFSPLUS_I(dir)->open_dir_list) { struct hfsplus_readdir_data *rd = list_entry(pos, struct hfsplus_readdir_data, list); if (fd.tree->keycmp(fd.search_key, (void *)&rd->key) < 0) rd->file->f_pos--; } + spin_unlock(&HFSPLUS_I(dir)->open_dir_lock); err = hfs_brec_remove(&fd); if (err) diff --git a/fs/hfsplus/dir.c b/fs/hfsplus/dir.c index a4e867e08947..42e128661dc1 100644 --- a/fs/hfsplus/dir.c +++ b/fs/hfsplus/dir.c @@ -271,8 +271,14 @@ next: } file->private_data = rd; rd->file = file; + spin_lock(&HFSPLUS_I(inode)->open_dir_lock); list_add(&rd->list, &HFSPLUS_I(inode)->open_dir_list); + spin_unlock(&HFSPLUS_I(inode)->open_dir_lock); } + /* + * Can be done after the list insertion; exclusion with + * hfsplus_delete_cat() is provided by directory lock. + */ memcpy(&rd->key, fd.key, sizeof(struct hfsplus_cat_key)); out: kfree(strbuf); @@ -284,9 +290,9 @@ static int hfsplus_dir_release(struct inode *inode, struct file *file) { struct hfsplus_readdir_data *rd = file->private_data; if (rd) { - inode_lock(inode); + spin_lock(&HFSPLUS_I(inode)->open_dir_lock); list_del(&rd->list); - inode_unlock(inode); + spin_unlock(&HFSPLUS_I(inode)->open_dir_lock); kfree(rd); } return 0; @@ -569,7 +575,7 @@ const struct inode_operations hfsplus_dir_inode_operations = { const struct file_operations hfsplus_dir_operations = { .fsync = hfsplus_file_fsync, .read = generic_read_dir, - .iterate = hfsplus_readdir, + .iterate_shared = hfsplus_readdir, .unlocked_ioctl = hfsplus_ioctl, .llseek = generic_file_llseek, .release = hfsplus_dir_release, diff --git a/fs/hfsplus/hfsplus_fs.h b/fs/hfsplus/hfsplus_fs.h index f91a1faf819e..fdc3446d934a 100644 --- a/fs/hfsplus/hfsplus_fs.h +++ b/fs/hfsplus/hfsplus_fs.h @@ -244,6 +244,7 @@ struct hfsplus_inode_info { u8 userflags; /* BSD user file flags */ u32 subfolders; /* Subfolder count (HFSX only) */ struct list_head open_dir_list; + spinlock_t open_dir_lock; loff_t phys_size; struct inode vfs_inode; diff --git a/fs/hfsplus/inode.c b/fs/hfsplus/inode.c index b28f39865c3a..ef9fefe364a6 100644 --- a/fs/hfsplus/inode.c +++ b/fs/hfsplus/inode.c @@ -122,8 +122,7 @@ static int hfsplus_releasepage(struct page *page, gfp_t mask) return res ? try_to_free_buffers(page) : 0; } -static ssize_t hfsplus_direct_IO(struct kiocb *iocb, struct iov_iter *iter, - loff_t offset) +static ssize_t hfsplus_direct_IO(struct kiocb *iocb, struct iov_iter *iter) { struct file *file = iocb->ki_filp; struct address_space *mapping = file->f_mapping; @@ -131,7 +130,7 @@ static ssize_t hfsplus_direct_IO(struct kiocb *iocb, struct iov_iter *iter, size_t count = iov_iter_count(iter); ssize_t ret; - ret = blockdev_direct_IO(iocb, inode, iter, offset, hfsplus_get_block); + ret = blockdev_direct_IO(iocb, inode, iter, hfsplus_get_block); /* * In case of error extending write may have instantiated a few @@ -139,7 +138,7 @@ static ssize_t hfsplus_direct_IO(struct kiocb *iocb, struct iov_iter *iter, */ if (unlikely(iov_iter_rw(iter) == WRITE && ret < 0)) { loff_t isize = i_size_read(inode); - loff_t end = offset + count; + loff_t end = iocb->ki_pos + count; if (end > isize) hfsplus_write_failed(mapping, end); @@ -374,6 +373,7 @@ struct inode *hfsplus_new_inode(struct super_block *sb, umode_t mode) hip = HFSPLUS_I(inode); INIT_LIST_HEAD(&hip->open_dir_list); + spin_lock_init(&hip->open_dir_lock); mutex_init(&hip->extents_lock); atomic_set(&hip->opencnt, 0); hip->extent_state = 0; diff --git a/fs/hfsplus/posix_acl.c b/fs/hfsplus/posix_acl.c index afb33eda6d7d..ab7ea2506b4d 100644 --- a/fs/hfsplus/posix_acl.c +++ b/fs/hfsplus/posix_acl.c @@ -48,9 +48,6 @@ struct posix_acl *hfsplus_get_posix_acl(struct inode *inode, int type) hfsplus_destroy_attr_entry((hfsplus_attr_entry *)value); - if (!IS_ERR(acl)) - set_cached_acl(inode, type, acl); - return acl; } diff --git a/fs/hfsplus/super.c b/fs/hfsplus/super.c index c35911362ff9..755bf30ba1ce 100644 --- a/fs/hfsplus/super.c +++ b/fs/hfsplus/super.c @@ -67,6 +67,7 @@ struct inode *hfsplus_iget(struct super_block *sb, unsigned long ino) return inode; INIT_LIST_HEAD(&HFSPLUS_I(inode)->open_dir_list); + spin_lock_init(&HFSPLUS_I(inode)->open_dir_lock); mutex_init(&HFSPLUS_I(inode)->extents_lock); HFSPLUS_I(inode)->flags = 0; HFSPLUS_I(inode)->extent_state = 0; diff --git a/fs/hfsplus/xattr.c b/fs/hfsplus/xattr.c index 70e445ff0cff..4f118d282a7a 100644 --- a/fs/hfsplus/xattr.c +++ b/fs/hfsplus/xattr.c @@ -579,7 +579,7 @@ failed_getxattr_init: return res; } -ssize_t hfsplus_getxattr(struct dentry *dentry, const char *name, +ssize_t hfsplus_getxattr(struct inode *inode, const char *name, void *value, size_t size, const char *prefix, size_t prefixlen) { @@ -594,7 +594,7 @@ ssize_t hfsplus_getxattr(struct dentry *dentry, const char *name, strcpy(xattr_name, prefix); strcpy(xattr_name + prefixlen, name); - res = __hfsplus_getxattr(d_inode(dentry), xattr_name, value, size); + res = __hfsplus_getxattr(inode, xattr_name, value, size); kfree(xattr_name); return res; @@ -844,8 +844,8 @@ end_removexattr: } static int hfsplus_osx_getxattr(const struct xattr_handler *handler, - struct dentry *dentry, const char *name, - void *buffer, size_t size) + struct dentry *unused, struct inode *inode, + const char *name, void *buffer, size_t size) { /* * Don't allow retrieving properly prefixed attributes @@ -860,7 +860,7 @@ static int hfsplus_osx_getxattr(const struct xattr_handler *handler, * creates), so we pass the name through unmodified (after * ensuring it doesn't conflict with another namespace). */ - return __hfsplus_getxattr(d_inode(dentry), name, buffer, size); + return __hfsplus_getxattr(inode, name, buffer, size); } static int hfsplus_osx_setxattr(const struct xattr_handler *handler, diff --git a/fs/hfsplus/xattr.h b/fs/hfsplus/xattr.h index f9b0955b3d28..d04ba6f58df2 100644 --- a/fs/hfsplus/xattr.h +++ b/fs/hfsplus/xattr.h @@ -28,7 +28,7 @@ int hfsplus_setxattr(struct dentry *dentry, const char *name, ssize_t __hfsplus_getxattr(struct inode *inode, const char *name, void *value, size_t size); -ssize_t hfsplus_getxattr(struct dentry *dentry, const char *name, +ssize_t hfsplus_getxattr(struct inode *inode, const char *name, void *value, size_t size, const char *prefix, size_t prefixlen); diff --git a/fs/hfsplus/xattr_security.c b/fs/hfsplus/xattr_security.c index 72a68a3a0c99..ae2ca8c2e335 100644 --- a/fs/hfsplus/xattr_security.c +++ b/fs/hfsplus/xattr_security.c @@ -14,10 +14,10 @@ #include "acl.h" static int hfsplus_security_getxattr(const struct xattr_handler *handler, - struct dentry *dentry, const char *name, - void *buffer, size_t size) + struct dentry *unused, struct inode *inode, + const char *name, void *buffer, size_t size) { - return hfsplus_getxattr(dentry, name, buffer, size, + return hfsplus_getxattr(inode, name, buffer, size, XATTR_SECURITY_PREFIX, XATTR_SECURITY_PREFIX_LEN); } diff --git a/fs/hfsplus/xattr_trusted.c b/fs/hfsplus/xattr_trusted.c index 95a7704c7abb..eae2947060aa 100644 --- a/fs/hfsplus/xattr_trusted.c +++ b/fs/hfsplus/xattr_trusted.c @@ -12,10 +12,10 @@ #include "xattr.h" static int hfsplus_trusted_getxattr(const struct xattr_handler *handler, - struct dentry *dentry, const char *name, - void *buffer, size_t size) + struct dentry *unused, struct inode *inode, + const char *name, void *buffer, size_t size) { - return hfsplus_getxattr(dentry, name, buffer, size, + return hfsplus_getxattr(inode, name, buffer, size, XATTR_TRUSTED_PREFIX, XATTR_TRUSTED_PREFIX_LEN); } diff --git a/fs/hfsplus/xattr_user.c b/fs/hfsplus/xattr_user.c index 6fc269baf959..3c9eec3e4c7b 100644 --- a/fs/hfsplus/xattr_user.c +++ b/fs/hfsplus/xattr_user.c @@ -12,11 +12,11 @@ #include "xattr.h" static int hfsplus_user_getxattr(const struct xattr_handler *handler, - struct dentry *dentry, const char *name, - void *buffer, size_t size) + struct dentry *unused, struct inode *inode, + const char *name, void *buffer, size_t size) { - return hfsplus_getxattr(dentry, name, buffer, size, + return hfsplus_getxattr(inode, name, buffer, size, XATTR_USER_PREFIX, XATTR_USER_PREFIX_LEN); } diff --git a/fs/hostfs/hostfs_kern.c b/fs/hostfs/hostfs_kern.c index 7016653f3e41..5c57654927a6 100644 --- a/fs/hostfs/hostfs_kern.c +++ b/fs/hostfs/hostfs_kern.c @@ -398,7 +398,7 @@ static const struct file_operations hostfs_file_fops = { static const struct file_operations hostfs_dir_fops = { .llseek = generic_file_llseek, - .iterate = hostfs_readdir, + .iterate_shared = hostfs_readdir, .read = generic_read_dir, .open = hostfs_open, .fsync = hostfs_fsync, diff --git a/fs/hpfs/dir.c b/fs/hpfs/dir.c index e57a53c13d86..7b9150c2e75c 100644 --- a/fs/hpfs/dir.c +++ b/fs/hpfs/dir.c @@ -44,7 +44,11 @@ static loff_t hpfs_dir_lseek(struct file *filp, loff_t off, int whence) else goto fail; if (pos == 12) goto fail; } - hpfs_add_pos(i, &filp->f_pos); + if (unlikely(hpfs_add_pos(i, &filp->f_pos) < 0)) { + hpfs_unlock(s); + inode_unlock(i); + return -ENOMEM; + } ok: filp->f_pos = new_off; hpfs_unlock(s); @@ -141,8 +145,10 @@ static int hpfs_readdir(struct file *file, struct dir_context *ctx) ctx->pos = 1; } if (ctx->pos == 1) { + ret = hpfs_add_pos(inode, &file->f_pos); + if (unlikely(ret < 0)) + goto out; ctx->pos = ((loff_t) hpfs_de_as_down_as_possible(inode->i_sb, hpfs_inode->i_dno) << 4) + 1; - hpfs_add_pos(inode, &file->f_pos); file->f_version = inode->i_version; } next_pos = ctx->pos; @@ -324,7 +330,7 @@ const struct file_operations hpfs_dir_ops = { .llseek = hpfs_dir_lseek, .read = generic_read_dir, - .iterate = hpfs_readdir, + .iterate_shared = hpfs_readdir, .release = hpfs_dir_release, .fsync = hpfs_file_fsync, .unlocked_ioctl = hpfs_ioctl, diff --git a/fs/hpfs/dnode.c b/fs/hpfs/dnode.c index 2923a7bd82ac..86ab7e790b4e 100644 --- a/fs/hpfs/dnode.c +++ b/fs/hpfs/dnode.c @@ -21,7 +21,7 @@ static loff_t get_pos(struct dnode *d, struct hpfs_dirent *fde) return ((loff_t)le32_to_cpu(d->self) << 4) | (loff_t)1; } -void hpfs_add_pos(struct inode *inode, loff_t *pos) +int hpfs_add_pos(struct inode *inode, loff_t *pos) { struct hpfs_inode_info *hpfs_inode = hpfs_i(inode); int i = 0; @@ -29,11 +29,12 @@ void hpfs_add_pos(struct inode *inode, loff_t *pos) if (hpfs_inode->i_rddir_off) for (; hpfs_inode->i_rddir_off[i]; i++) - if (hpfs_inode->i_rddir_off[i] == pos) return; + if (hpfs_inode->i_rddir_off[i] == pos) + return 0; if (!(i&0x0f)) { if (!(ppos = kmalloc((i+0x11) * sizeof(loff_t*), GFP_NOFS))) { pr_err("out of memory for position list\n"); - return; + return -ENOMEM; } if (hpfs_inode->i_rddir_off) { memcpy(ppos, hpfs_inode->i_rddir_off, i * sizeof(loff_t)); @@ -43,6 +44,7 @@ void hpfs_add_pos(struct inode *inode, loff_t *pos) } hpfs_inode->i_rddir_off[i] = pos; hpfs_inode->i_rddir_off[i + 1] = NULL; + return 0; } void hpfs_del_pos(struct inode *inode, loff_t *pos) diff --git a/fs/hpfs/hpfs_fn.h b/fs/hpfs/hpfs_fn.h index 975654a63c13..aebb78f9e47f 100644 --- a/fs/hpfs/hpfs_fn.h +++ b/fs/hpfs/hpfs_fn.h @@ -242,7 +242,7 @@ extern const struct file_operations hpfs_dir_ops; /* dnode.c */ -void hpfs_add_pos(struct inode *, loff_t *); +int hpfs_add_pos(struct inode *, loff_t *); void hpfs_del_pos(struct inode *, loff_t *); struct hpfs_dirent *hpfs_add_de(struct super_block *, struct dnode *, const unsigned char *, unsigned, secno); diff --git a/fs/inode.c b/fs/inode.c index 69b8b526c194..4ccbc21b30ce 100644 --- a/fs/inode.c +++ b/fs/inode.c @@ -151,6 +151,7 @@ int inode_init_always(struct super_block *sb, struct inode *inode) inode->i_bdev = NULL; inode->i_cdev = NULL; inode->i_link = NULL; + inode->i_dir_seq = 0; inode->i_rdev = 0; inode->dirtied_when = 0; @@ -165,8 +166,8 @@ int inode_init_always(struct super_block *sb, struct inode *inode) spin_lock_init(&inode->i_lock); lockdep_set_class(&inode->i_lock, &sb->s_type->i_lock_key); - mutex_init(&inode->i_mutex); - lockdep_set_class(&inode->i_mutex, &sb->s_type->i_mutex_key); + init_rwsem(&inode->i_rwsem); + lockdep_set_class(&inode->i_rwsem, &sb->s_type->i_mutex_key); atomic_set(&inode->i_dio_count, 0); @@ -238,9 +239,9 @@ void __destroy_inode(struct inode *inode) } #ifdef CONFIG_FS_POSIX_ACL - if (inode->i_acl && inode->i_acl != ACL_NOT_CACHED) + if (inode->i_acl && !is_uncached_acl(inode->i_acl)) posix_acl_release(inode->i_acl); - if (inode->i_default_acl && inode->i_default_acl != ACL_NOT_CACHED) + if (inode->i_default_acl && !is_uncached_acl(inode->i_default_acl)) posix_acl_release(inode->i_default_acl); #endif this_cpu_dec(nr_inodes); @@ -924,13 +925,13 @@ void lockdep_annotate_inode_mutex_key(struct inode *inode) struct file_system_type *type = inode->i_sb->s_type; /* Set new key only if filesystem hasn't already changed it */ - if (lockdep_match_class(&inode->i_mutex, &type->i_mutex_key)) { + if (lockdep_match_class(&inode->i_rwsem, &type->i_mutex_key)) { /* * ensure nobody is actually holding i_mutex */ - mutex_destroy(&inode->i_mutex); - mutex_init(&inode->i_mutex); - lockdep_set_class(&inode->i_mutex, + // mutex_destroy(&inode->i_mutex); + init_rwsem(&inode->i_rwsem); + lockdep_set_class(&inode->i_rwsem, &type->i_mutex_dir_key); } } diff --git a/fs/isofs/dir.c b/fs/isofs/dir.c index b943cbd963bb..e7599615e4e0 100644 --- a/fs/isofs/dir.c +++ b/fs/isofs/dir.c @@ -58,7 +58,7 @@ int get_acorn_filename(struct iso_directory_record *de, std = sizeof(struct iso_directory_record) + de->name_len[0]; if (std & 1) std++; - if ((*((unsigned char *) de) - std) != 32) + if (de->length[0] - std != 32) return retnamlen; chr = ((unsigned char *) de) + std; if (strncmp(chr, "ARCHIMEDES", 10)) @@ -269,7 +269,7 @@ const struct file_operations isofs_dir_operations = { .llseek = generic_file_llseek, .read = generic_read_dir, - .iterate = isofs_readdir, + .iterate_shared = isofs_readdir, }; /* diff --git a/fs/isofs/rock.c b/fs/isofs/rock.c index 5384ceb35b1c..98b3eb7d8eaf 100644 --- a/fs/isofs/rock.c +++ b/fs/isofs/rock.c @@ -203,6 +203,8 @@ int get_rock_ridge_filename(struct iso_directory_record *de, int retnamlen = 0; int truncate = 0; int ret = 0; + char *p; + int len; if (!ISOFS_SB(inode->i_sb)->s_rock) return 0; @@ -267,12 +269,17 @@ repeat: rr->u.NM.flags); break; } - if ((strlen(retname) + rr->len - 5) >= 254) { + len = rr->len - 5; + if (retnamlen + len >= 254) { truncate = 1; break; } - strncat(retname, rr->u.NM.name, rr->len - 5); - retnamlen += rr->len - 5; + p = memchr(rr->u.NM.name, '\0', len); + if (unlikely(p)) + len = p - rr->u.NM.name; + memcpy(retname + retnamlen, rr->u.NM.name, len); + retnamlen += len; + retname[retnamlen] = '\0'; break; case SIG('R', 'E'): kfree(rs.buffer); diff --git a/fs/jbd2/recovery.c b/fs/jbd2/recovery.c index 08a456b96e4e..805bc6bcd8ab 100644 --- a/fs/jbd2/recovery.c +++ b/fs/jbd2/recovery.c @@ -303,7 +303,7 @@ int jbd2_journal_recover(journal_t *journal) * Locate any valid recovery information from the journal and set up the * journal structures in memory to ignore it (presumably because the * caller has evidence that it is out of date). - * This function does'nt appear to be exorted.. + * This function doesn't appear to be exported.. * * We perform one pass over the journal to allow us to tell the user how * much recovery information is being erased, and to let us initialise diff --git a/fs/jbd2/transaction.c b/fs/jbd2/transaction.c index be56c8ca34c2..1749519b362f 100644 --- a/fs/jbd2/transaction.c +++ b/fs/jbd2/transaction.c @@ -543,7 +543,7 @@ EXPORT_SYMBOL(jbd2_journal_start_reserved); * * Some transactions, such as large extends and truncates, can be done * atomically all at once or in several stages. The operation requests - * a credit for a number of buffer modications in advance, but can + * a credit for a number of buffer modifications in advance, but can * extend its credit if it needs more. * * jbd2_journal_extend tries to give the running handle more buffer credits. @@ -627,7 +627,7 @@ error_out: * If the jbd2_journal_extend() call above fails to grant new buffer credits * to a running handle, a call to jbd2_journal_restart will commit the * handle's transaction so far and reattach the handle to a new - * transaction capabable of guaranteeing the requested number of + * transaction capable of guaranteeing the requested number of * credits. We preserve reserved handle if there's any attached to the * passed in handle. */ @@ -1586,7 +1586,7 @@ drop: /** * int jbd2_journal_stop() - complete a transaction - * @handle: tranaction to complete. + * @handle: transaction to complete. * * All done for a particular handle. * diff --git a/fs/jffs2/acl.c b/fs/jffs2/acl.c index 2f7a3c090489..bc2693d56298 100644 --- a/fs/jffs2/acl.c +++ b/fs/jffs2/acl.c @@ -203,8 +203,6 @@ struct posix_acl *jffs2_get_acl(struct inode *inode, int type) acl = ERR_PTR(rc); } kfree(value); - if (!IS_ERR(acl)) - set_cached_acl(inode, type, acl); return acl; } diff --git a/fs/jffs2/dir.c b/fs/jffs2/dir.c index 30c4c9ebb693..84c4bf3631a2 100644 --- a/fs/jffs2/dir.c +++ b/fs/jffs2/dir.c @@ -40,7 +40,7 @@ static int jffs2_rename (struct inode *, struct dentry *, const struct file_operations jffs2_dir_operations = { .read = generic_read_dir, - .iterate = jffs2_readdir, + .iterate_shared=jffs2_readdir, .unlocked_ioctl=jffs2_ioctl, .fsync = jffs2_fsync, .llseek = generic_file_llseek, @@ -241,7 +241,7 @@ static int jffs2_unlink(struct inode *dir_i, struct dentry *dentry) static int jffs2_link (struct dentry *old_dentry, struct inode *dir_i, struct dentry *dentry) { - struct jffs2_sb_info *c = JFFS2_SB_INFO(d_inode(old_dentry)->i_sb); + struct jffs2_sb_info *c = JFFS2_SB_INFO(old_dentry->d_sb); struct jffs2_inode_info *f = JFFS2_INODE_INFO(d_inode(old_dentry)); struct jffs2_inode_info *dir_f = JFFS2_INODE_INFO(dir_i); int ret; diff --git a/fs/jffs2/security.c b/fs/jffs2/security.c index 7a28facd7175..3ed9a4b49778 100644 --- a/fs/jffs2/security.c +++ b/fs/jffs2/security.c @@ -49,10 +49,10 @@ int jffs2_init_security(struct inode *inode, struct inode *dir, /* ---- XATTR Handler for "security.*" ----------------- */ static int jffs2_security_getxattr(const struct xattr_handler *handler, - struct dentry *dentry, const char *name, - void *buffer, size_t size) + struct dentry *unused, struct inode *inode, + const char *name, void *buffer, size_t size) { - return do_jffs2_getxattr(d_inode(dentry), JFFS2_XPREFIX_SECURITY, + return do_jffs2_getxattr(inode, JFFS2_XPREFIX_SECURITY, name, buffer, size); } diff --git a/fs/jffs2/super.c b/fs/jffs2/super.c index 0a9a114bb9d1..5ef21f4c4c77 100644 --- a/fs/jffs2/super.c +++ b/fs/jffs2/super.c @@ -147,7 +147,7 @@ static struct dentry *jffs2_get_parent(struct dentry *child) JFFS2_DEBUG("Parent of directory ino #%u is #%u\n", f->inocache->ino, pino); - return d_obtain_alias(jffs2_iget(d_inode(child)->i_sb, pino)); + return d_obtain_alias(jffs2_iget(child->d_sb, pino)); } static const struct export_operations jffs2_export_ops = { diff --git a/fs/jffs2/xattr_trusted.c b/fs/jffs2/xattr_trusted.c index b2555ef07a12..4ebecff1d922 100644 --- a/fs/jffs2/xattr_trusted.c +++ b/fs/jffs2/xattr_trusted.c @@ -17,10 +17,10 @@ #include "nodelist.h" static int jffs2_trusted_getxattr(const struct xattr_handler *handler, - struct dentry *dentry, const char *name, - void *buffer, size_t size) + struct dentry *unused, struct inode *inode, + const char *name, void *buffer, size_t size) { - return do_jffs2_getxattr(d_inode(dentry), JFFS2_XPREFIX_TRUSTED, + return do_jffs2_getxattr(inode, JFFS2_XPREFIX_TRUSTED, name, buffer, size); } diff --git a/fs/jffs2/xattr_user.c b/fs/jffs2/xattr_user.c index 539bd630b5e4..bce249e1b277 100644 --- a/fs/jffs2/xattr_user.c +++ b/fs/jffs2/xattr_user.c @@ -17,10 +17,10 @@ #include "nodelist.h" static int jffs2_user_getxattr(const struct xattr_handler *handler, - struct dentry *dentry, const char *name, - void *buffer, size_t size) + struct dentry *unused, struct inode *inode, + const char *name, void *buffer, size_t size) { - return do_jffs2_getxattr(d_inode(dentry), JFFS2_XPREFIX_USER, + return do_jffs2_getxattr(inode, JFFS2_XPREFIX_USER, name, buffer, size); } diff --git a/fs/jfs/acl.c b/fs/jfs/acl.c index 49456853e9de..21fa92ba2c19 100644 --- a/fs/jfs/acl.c +++ b/fs/jfs/acl.c @@ -34,10 +34,6 @@ struct posix_acl *jfs_get_acl(struct inode *inode, int type) int size; char *value = NULL; - acl = get_cached_acl(inode, type); - if (acl != ACL_NOT_CACHED) - return acl; - switch(type) { case ACL_TYPE_ACCESS: ea_name = XATTR_NAME_POSIX_ACL_ACCESS; @@ -67,8 +63,6 @@ struct posix_acl *jfs_get_acl(struct inode *inode, int type) acl = posix_acl_from_xattr(&init_user_ns, value, size); } kfree(value); - if (!IS_ERR(acl)) - set_cached_acl(inode, type, acl); return acl; } diff --git a/fs/jfs/file.c b/fs/jfs/file.c index 4ce7735dd042..7f1a585a0a94 100644 --- a/fs/jfs/file.c +++ b/fs/jfs/file.c @@ -140,10 +140,10 @@ int jfs_setattr(struct dentry *dentry, struct iattr *iattr) } const struct inode_operations jfs_file_inode_operations = { - .setxattr = jfs_setxattr, - .getxattr = jfs_getxattr, + .setxattr = generic_setxattr, + .getxattr = generic_getxattr, .listxattr = jfs_listxattr, - .removexattr = jfs_removexattr, + .removexattr = generic_removexattr, .setattr = jfs_setattr, #ifdef CONFIG_JFS_POSIX_ACL .get_acl = jfs_get_acl, diff --git a/fs/jfs/inode.c b/fs/jfs/inode.c index 9d9bae63ae2a..ad3e7b1effc4 100644 --- a/fs/jfs/inode.c +++ b/fs/jfs/inode.c @@ -102,8 +102,8 @@ int jfs_commit_inode(struct inode *inode, int wait) * partitions and may think inode is dirty */ if (!special_file(inode->i_mode) && noisy) { - jfs_err("jfs_commit_inode(0x%p) called on " - "read-only volume", inode); + jfs_err("jfs_commit_inode(0x%p) called on read-only volume", + inode); jfs_err("Is remount racy?"); noisy--; } @@ -332,8 +332,7 @@ static sector_t jfs_bmap(struct address_space *mapping, sector_t block) return generic_block_bmap(mapping, block, jfs_get_block); } -static ssize_t jfs_direct_IO(struct kiocb *iocb, struct iov_iter *iter, - loff_t offset) +static ssize_t jfs_direct_IO(struct kiocb *iocb, struct iov_iter *iter) { struct file *file = iocb->ki_filp; struct address_space *mapping = file->f_mapping; @@ -341,7 +340,7 @@ static ssize_t jfs_direct_IO(struct kiocb *iocb, struct iov_iter *iter, size_t count = iov_iter_count(iter); ssize_t ret; - ret = blockdev_direct_IO(iocb, inode, iter, offset, jfs_get_block); + ret = blockdev_direct_IO(iocb, inode, iter, jfs_get_block); /* * In case of error extending write may have instantiated a few @@ -349,7 +348,7 @@ static ssize_t jfs_direct_IO(struct kiocb *iocb, struct iov_iter *iter, */ if (unlikely(iov_iter_rw(iter) == WRITE && ret < 0)) { loff_t isize = i_size_read(inode); - loff_t end = offset + count; + loff_t end = iocb->ki_pos + count; if (end > isize) jfs_write_failed(mapping, end); diff --git a/fs/jfs/jfs_discard.c b/fs/jfs/jfs_discard.c index dfcd50304559..f76ff0a46444 100644 --- a/fs/jfs/jfs_discard.c +++ b/fs/jfs/jfs_discard.c @@ -49,14 +49,12 @@ void jfs_issue_discard(struct inode *ip, u64 blkno, u64 nblocks) r = sb_issue_discard(sb, blkno, nblocks, GFP_NOFS, 0); if (unlikely(r != 0)) { - jfs_err("JFS: sb_issue_discard" \ - "(%p, %llu, %llu, GFP_NOFS, 0) = %d => failed!\n", + jfs_err("JFS: sb_issue_discard(%p, %llu, %llu, GFP_NOFS, 0) = %d => failed!", sb, (unsigned long long)blkno, (unsigned long long)nblocks, r); } - jfs_info("JFS: sb_issue_discard" \ - "(%p, %llu, %llu, GFP_NOFS, 0) = %d\n", + jfs_info("JFS: sb_issue_discard(%p, %llu, %llu, GFP_NOFS, 0) = %d", sb, (unsigned long long)blkno, (unsigned long long)nblocks, r); diff --git a/fs/jfs/jfs_dtree.c b/fs/jfs/jfs_dtree.c index d88576e23fe4..de2bcb36e079 100644 --- a/fs/jfs/jfs_dtree.c +++ b/fs/jfs/jfs_dtree.c @@ -3072,8 +3072,7 @@ int jfs_readdir(struct file *file, struct dir_context *ctx) } if (dirtab_slot.flag == DIR_INDEX_FREE) { if (loop_count++ > JFS_IP(ip)->next_index) { - jfs_err("jfs_readdir detected " - "infinite loop!"); + jfs_err("jfs_readdir detected infinite loop!"); ctx->pos = DIREND; return 0; } @@ -3151,8 +3150,7 @@ int jfs_readdir(struct file *file, struct dir_context *ctx) if (!dir_emit(ctx, "..", 2, PARENT(ip), DT_DIR)) return 0; } else { - jfs_err("jfs_readdir called with " - "invalid offset!"); + jfs_err("jfs_readdir called with invalid offset!"); } dtoffset->pn = 1; dtoffset->index = 0; @@ -3165,8 +3163,8 @@ int jfs_readdir(struct file *file, struct dir_context *ctx) } if ((rc = dtReadNext(ip, &ctx->pos, &btstack))) { - jfs_err("jfs_readdir: unexpected rc = %d " - "from dtReadNext", rc); + jfs_err("jfs_readdir: unexpected rc = %d from dtReadNext", + rc); ctx->pos = DIREND; return 0; } diff --git a/fs/jfs/jfs_imap.c b/fs/jfs/jfs_imap.c index f321986e73d2..6aca224a5d68 100644 --- a/fs/jfs/jfs_imap.c +++ b/fs/jfs/jfs_imap.c @@ -534,8 +534,7 @@ void diWriteSpecial(struct inode *ip, int secondary) /* read the page of fixed disk inode (AIT) in raw mode */ mp = read_metapage(ip, address << sbi->l2nbperpage, PSIZE, 1); if (mp == NULL) { - jfs_err("diWriteSpecial: failed to read aggregate inode " - "extent!"); + jfs_err("diWriteSpecial: failed to read aggregate inode extent!"); return; } diff --git a/fs/jfs/jfs_inode.c b/fs/jfs/jfs_inode.c index cf7936fe2e68..5e33cb9a190d 100644 --- a/fs/jfs/jfs_inode.c +++ b/fs/jfs/jfs_inode.c @@ -151,7 +151,7 @@ struct inode *ialloc(struct inode *parent, umode_t mode) jfs_inode->xtlid = 0; jfs_set_inode_flags(inode); - jfs_info("ialloc returns inode = 0x%p\n", inode); + jfs_info("ialloc returns inode = 0x%p", inode); return inode; diff --git a/fs/jfs/jfs_logmgr.c b/fs/jfs/jfs_logmgr.c index a270cb7ff4e0..63759d723920 100644 --- a/fs/jfs/jfs_logmgr.c +++ b/fs/jfs/jfs_logmgr.c @@ -1094,7 +1094,7 @@ int lmLogOpen(struct super_block *sb) if (log->bdev->bd_dev == sbi->logdev) { if (memcmp(log->uuid, sbi->loguuid, sizeof(log->uuid))) { - jfs_warn("wrong uuid on JFS journal\n"); + jfs_warn("wrong uuid on JFS journal"); mutex_unlock(&jfs_log_mutex); return -EINVAL; } @@ -1333,9 +1333,8 @@ int lmLogInit(struct jfs_log * log) rc = -EINVAL; goto errout20; } - jfs_info("lmLogInit: inline log:0x%p base:0x%Lx " - "size:0x%x", log, - (unsigned long long) log->base, log->size); + jfs_info("lmLogInit: inline log:0x%p base:0x%Lx size:0x%x", + log, (unsigned long long)log->base, log->size); } else { if (memcmp(logsuper->uuid, log->uuid, 16)) { jfs_warn("wrong uuid on JFS log device"); @@ -1343,9 +1342,8 @@ int lmLogInit(struct jfs_log * log) } log->size = le32_to_cpu(logsuper->size); log->l2bsize = le32_to_cpu(logsuper->l2bsize); - jfs_info("lmLogInit: external log:0x%p base:0x%Lx " - "size:0x%x", log, - (unsigned long long) log->base, log->size); + jfs_info("lmLogInit: external log:0x%p base:0x%Lx size:0x%x", + log, (unsigned long long)log->base, log->size); } log->page = le32_to_cpu(logsuper->end) / LOGPSIZE; @@ -2136,7 +2134,7 @@ static void lbmStartIO(struct lbuf * bp) struct bio *bio; struct jfs_log *log = bp->l_log; - jfs_info("lbmStartIO\n"); + jfs_info("lbmStartIO"); bio = bio_alloc(GFP_NOFS, 1); bio->bi_iter.bi_sector = bp->l_blkno << (log->l2bsize - 9); diff --git a/fs/jfs/jfs_txnmgr.c b/fs/jfs/jfs_txnmgr.c index d595856453b2..eddf2b6eda85 100644 --- a/fs/jfs/jfs_txnmgr.c +++ b/fs/jfs/jfs_txnmgr.c @@ -1764,7 +1764,7 @@ static void xtLog(struct jfs_log * log, struct tblock * tblk, struct lrd * lrd, if (lwm == next) goto out; if (lwm > next) { - jfs_err("xtLog: lwm > next\n"); + jfs_err("xtLog: lwm > next"); goto out; } tlck->flag |= tlckUPDATEMAP; @@ -1798,8 +1798,8 @@ static void xtLog(struct jfs_log * log, struct tblock * tblk, struct lrd * lrd, xadlock->xdlist = &p->xad[lwm]; tblk->xflag &= ~COMMIT_LAZY; } - jfs_info("xtLog: alloc ip:0x%p mp:0x%p tlck:0x%p lwm:%d " - "count:%d", tlck->ip, mp, tlck, lwm, xadlock->count); + jfs_info("xtLog: alloc ip:0x%p mp:0x%p tlck:0x%p lwm:%d count:%d", + tlck->ip, mp, tlck, lwm, xadlock->count); maplock->index = 1; @@ -2025,8 +2025,7 @@ static void xtLog(struct jfs_log * log, struct tblock * tblk, struct lrd * lrd, xadlock->count = next - lwm; xadlock->xdlist = &p->xad[lwm]; - jfs_info("xtLog: alloc ip:0x%p mp:0x%p count:%d " - "lwm:%d next:%d", + jfs_info("xtLog: alloc ip:0x%p mp:0x%p count:%d lwm:%d next:%d", tlck->ip, mp, xadlock->count, lwm, next); maplock->index++; xadlock++; @@ -2047,8 +2046,8 @@ static void xtLog(struct jfs_log * log, struct tblock * tblk, struct lrd * lrd, pxdlock->count = 1; pxdlock->pxd = pxd; - jfs_info("xtLog: truncate ip:0x%p mp:0x%p count:%d " - "hwm:%d", ip, mp, pxdlock->count, hwm); + jfs_info("xtLog: truncate ip:0x%p mp:0x%p count:%d hwm:%d", + ip, mp, pxdlock->count, hwm); maplock->index++; xadlock++; } @@ -2066,8 +2065,7 @@ static void xtLog(struct jfs_log * log, struct tblock * tblk, struct lrd * lrd, xadlock->count = hwm - next + 1; xadlock->xdlist = &p->xad[next]; - jfs_info("xtLog: free ip:0x%p mp:0x%p count:%d " - "next:%d hwm:%d", + jfs_info("xtLog: free ip:0x%p mp:0x%p count:%d next:%d hwm:%d", tlck->ip, mp, xadlock->count, next, hwm); maplock->index++; } @@ -2523,8 +2521,7 @@ void txFreeMap(struct inode *ip, xlen = lengthXAD(xad); dbUpdatePMap(ipbmap, true, xaddr, (s64) xlen, tblk); - jfs_info("freePMap: xaddr:0x%lx " - "xlen:%d", + jfs_info("freePMap: xaddr:0x%lx xlen:%d", (ulong) xaddr, xlen); } } @@ -2814,7 +2811,7 @@ int jfs_lazycommit(void *arg) if (!list_empty(&TxAnchor.unlock_queue)) jfs_err("jfs_lazycommit being killed w/pending transactions!"); else - jfs_info("jfs_lazycommit being killed\n"); + jfs_info("jfs_lazycommit being killed"); return 0; } diff --git a/fs/jfs/jfs_xattr.h b/fs/jfs/jfs_xattr.h index e8d717dabca3..561f6af46288 100644 --- a/fs/jfs/jfs_xattr.h +++ b/fs/jfs/jfs_xattr.h @@ -19,6 +19,8 @@ #ifndef H_JFS_XATTR #define H_JFS_XATTR +#include <linux/xattr.h> + /* * jfs_ea_list describe the on-disk format of the extended attributes. * I know the null-terminator is redundant since namelen is stored, but @@ -54,12 +56,8 @@ struct jfs_ea_list { extern int __jfs_setxattr(tid_t, struct inode *, const char *, const void *, size_t, int); -extern int jfs_setxattr(struct dentry *, const char *, const void *, size_t, - int); extern ssize_t __jfs_getxattr(struct inode *, const char *, void *, size_t); -extern ssize_t jfs_getxattr(struct dentry *, const char *, void *, size_t); extern ssize_t jfs_listxattr(struct dentry *, char *, size_t); -extern int jfs_removexattr(struct dentry *, const char *); extern const struct xattr_handler *jfs_xattr_handlers[]; diff --git a/fs/jfs/namei.c b/fs/jfs/namei.c index 701f89370de7..539deddecbb0 100644 --- a/fs/jfs/namei.c +++ b/fs/jfs/namei.c @@ -1225,8 +1225,8 @@ static int jfs_rename(struct inode *old_dir, struct dentry *old_dentry, rc = dtSearch(new_dir, &new_dname, &ino, &btstack, JFS_CREATE); if (rc) { - jfs_err("jfs_rename didn't expect dtSearch to fail " - "w/rc = %d", rc); + jfs_err("jfs_rename didn't expect dtSearch to fail w/rc = %d", + rc); goto out_tx; } @@ -1524,7 +1524,7 @@ struct dentry *jfs_get_parent(struct dentry *dentry) parent_ino = le32_to_cpu(JFS_IP(d_inode(dentry))->i_dtroot.header.idotdot); - return d_obtain_alias(jfs_iget(d_inode(dentry)->i_sb, parent_ino)); + return d_obtain_alias(jfs_iget(dentry->d_sb, parent_ino)); } const struct inode_operations jfs_dir_inode_operations = { @@ -1537,10 +1537,10 @@ const struct inode_operations jfs_dir_inode_operations = { .rmdir = jfs_rmdir, .mknod = jfs_mknod, .rename = jfs_rename, - .setxattr = jfs_setxattr, - .getxattr = jfs_getxattr, + .setxattr = generic_setxattr, + .getxattr = generic_getxattr, .listxattr = jfs_listxattr, - .removexattr = jfs_removexattr, + .removexattr = generic_removexattr, .setattr = jfs_setattr, #ifdef CONFIG_JFS_POSIX_ACL .get_acl = jfs_get_acl, diff --git a/fs/jfs/super.c b/fs/jfs/super.c index 78d599198bf5..cec8814a3b8b 100644 --- a/fs/jfs/super.c +++ b/fs/jfs/super.c @@ -84,7 +84,7 @@ static void jfs_handle_error(struct super_block *sb) panic("JFS (device %s): panic forced after error\n", sb->s_id); else if (sbi->flag & JFS_ERR_REMOUNT_RO) { - jfs_err("ERROR: (device %s): remounting filesystem as read-only\n", + jfs_err("ERROR: (device %s): remounting filesystem as read-only", sb->s_id); sb->s_flags |= MS_RDONLY; } @@ -641,7 +641,7 @@ static int jfs_freeze(struct super_block *sb) } rc = updateSuper(sb, FM_CLEAN); if (rc) { - jfs_err("jfs_freeze: updateSuper failed\n"); + jfs_err("jfs_freeze: updateSuper failed"); /* * Don't fail here. Everything succeeded except * marking the superblock clean, so there's really diff --git a/fs/jfs/symlink.c b/fs/jfs/symlink.c index f8db4fde0b0b..c94c7e4a1323 100644 --- a/fs/jfs/symlink.c +++ b/fs/jfs/symlink.c @@ -25,19 +25,19 @@ const struct inode_operations jfs_fast_symlink_inode_operations = { .readlink = generic_readlink, .get_link = simple_get_link, .setattr = jfs_setattr, - .setxattr = jfs_setxattr, - .getxattr = jfs_getxattr, + .setxattr = generic_setxattr, + .getxattr = generic_getxattr, .listxattr = jfs_listxattr, - .removexattr = jfs_removexattr, + .removexattr = generic_removexattr, }; const struct inode_operations jfs_symlink_inode_operations = { .readlink = generic_readlink, .get_link = page_get_link, .setattr = jfs_setattr, - .setxattr = jfs_setxattr, - .getxattr = jfs_getxattr, + .setxattr = generic_setxattr, + .getxattr = generic_getxattr, .listxattr = jfs_listxattr, - .removexattr = jfs_removexattr, + .removexattr = generic_removexattr, }; diff --git a/fs/jfs/xattr.c b/fs/jfs/xattr.c index 48b15a6e5558..beb182b503b3 100644 --- a/fs/jfs/xattr.c +++ b/fs/jfs/xattr.c @@ -86,6 +86,14 @@ struct ea_buffer { #define EA_MALLOC 0x0008 +/* + * Mapping of on-disk attribute names: for on-disk attribute names with an + * unknown prefix (not "system.", "user.", "security.", or "trusted."), the + * prefix "os2." is prepended. On the way back to disk, "os2." prefixes are + * stripped and we make sure that the remaining name does not start with one + * of the know prefixes. + */ + static int is_known_namespace(const char *name) { if (strncmp(name, XATTR_SYSTEM_PREFIX, XATTR_SYSTEM_PREFIX_LEN) && @@ -97,29 +105,19 @@ static int is_known_namespace(const char *name) return true; } -/* - * These three routines are used to recognize on-disk extended attributes - * that are in a recognized namespace. If the attribute is not recognized, - * "os2." is prepended to the name - */ -static int is_os2_xattr(struct jfs_ea *ea) -{ - return !is_known_namespace(ea->name); -} - static inline int name_size(struct jfs_ea *ea) { - if (is_os2_xattr(ea)) - return ea->namelen + XATTR_OS2_PREFIX_LEN; - else + if (is_known_namespace(ea->name)) return ea->namelen; + else + return ea->namelen + XATTR_OS2_PREFIX_LEN; } static inline int copy_name(char *buffer, struct jfs_ea *ea) { int len = ea->namelen; - if (is_os2_xattr(ea)) { + if (!is_known_namespace(ea->name)) { memcpy(buffer, XATTR_OS2_PREFIX, XATTR_OS2_PREFIX_LEN); buffer += XATTR_OS2_PREFIX_LEN; len += XATTR_OS2_PREFIX_LEN; @@ -665,35 +663,6 @@ static int ea_put(tid_t tid, struct inode *inode, struct ea_buffer *ea_buf, return 0; } -/* - * Most of the permission checking is done by xattr_permission in the vfs. - * We also need to verify that this is a namespace that we recognize. - */ -static int can_set_xattr(struct inode *inode, const char *name, - const void *value, size_t value_len) -{ - if (!strncmp(name, XATTR_OS2_PREFIX, XATTR_OS2_PREFIX_LEN)) { - /* - * This makes sure that we aren't trying to set an - * attribute in a different namespace by prefixing it - * with "os2." - */ - if (is_known_namespace(name + XATTR_OS2_PREFIX_LEN)) - return -EOPNOTSUPP; - return 0; - } - - /* - * Don't allow setting an attribute in an unknown namespace. - */ - if (strncmp(name, XATTR_TRUSTED_PREFIX, XATTR_TRUSTED_PREFIX_LEN) && - strncmp(name, XATTR_SECURITY_PREFIX, XATTR_SECURITY_PREFIX_LEN) && - strncmp(name, XATTR_USER_PREFIX, XATTR_USER_PREFIX_LEN)) - return -EOPNOTSUPP; - - return 0; -} - int __jfs_setxattr(tid_t tid, struct inode *inode, const char *name, const void *value, size_t value_len, int flags) { @@ -704,21 +673,10 @@ int __jfs_setxattr(tid_t tid, struct inode *inode, const char *name, int xattr_size; int new_size; int namelen = strlen(name); - char *os2name = NULL; int found = 0; int rc; int length; - if (strncmp(name, XATTR_OS2_PREFIX, XATTR_OS2_PREFIX_LEN) == 0) { - os2name = kmalloc(namelen - XATTR_OS2_PREFIX_LEN + 1, - GFP_KERNEL); - if (!os2name) - return -ENOMEM; - strcpy(os2name, name + XATTR_OS2_PREFIX_LEN); - name = os2name; - namelen -= XATTR_OS2_PREFIX_LEN; - } - down_write(&JFS_IP(inode)->xattr_sem); xattr_size = ea_get(inode, &ea_buf, 0); @@ -841,44 +799,6 @@ int __jfs_setxattr(tid_t tid, struct inode *inode, const char *name, out: up_write(&JFS_IP(inode)->xattr_sem); - kfree(os2name); - - return rc; -} - -int jfs_setxattr(struct dentry *dentry, const char *name, const void *value, - size_t value_len, int flags) -{ - struct inode *inode = d_inode(dentry); - struct jfs_inode_info *ji = JFS_IP(inode); - int rc; - tid_t tid; - - /* - * If this is a request for a synthetic attribute in the system.* - * namespace use the generic infrastructure to resolve a handler - * for it via sb->s_xattr. - */ - if (!strncmp(name, XATTR_SYSTEM_PREFIX, XATTR_SYSTEM_PREFIX_LEN)) - return generic_setxattr(dentry, name, value, value_len, flags); - - if ((rc = can_set_xattr(inode, name, value, value_len))) - return rc; - - if (value == NULL) { /* empty EA, do not remove */ - value = ""; - value_len = 0; - } - - tid = txBegin(inode->i_sb, 0); - mutex_lock(&ji->commit_mutex); - rc = __jfs_setxattr(tid, d_inode(dentry), name, value, value_len, - flags); - if (!rc) - rc = txCommit(tid, 1, &inode, 0); - txEnd(tid); - mutex_unlock(&ji->commit_mutex); - return rc; } @@ -933,37 +853,6 @@ ssize_t __jfs_getxattr(struct inode *inode, const char *name, void *data, return size; } -ssize_t jfs_getxattr(struct dentry *dentry, const char *name, void *data, - size_t buf_size) -{ - int err; - - /* - * If this is a request for a synthetic attribute in the system.* - * namespace use the generic infrastructure to resolve a handler - * for it via sb->s_xattr. - */ - if (!strncmp(name, XATTR_SYSTEM_PREFIX, XATTR_SYSTEM_PREFIX_LEN)) - return generic_getxattr(dentry, name, data, buf_size); - - if (strncmp(name, XATTR_OS2_PREFIX, XATTR_OS2_PREFIX_LEN) == 0) { - /* - * skip past "os2." prefix - */ - name += XATTR_OS2_PREFIX_LEN; - /* - * Don't allow retrieving properly prefixed attributes - * by prepending them with "os2." - */ - if (is_known_namespace(name)) - return -EOPNOTSUPP; - } - - err = __jfs_getxattr(d_inode(dentry), name, data, buf_size); - - return err; -} - /* * No special permissions are needed to list attributes except for trusted.* */ @@ -1027,27 +916,16 @@ ssize_t jfs_listxattr(struct dentry * dentry, char *data, size_t buf_size) return size; } -int jfs_removexattr(struct dentry *dentry, const char *name) +static int __jfs_xattr_set(struct inode *inode, const char *name, + const void *value, size_t size, int flags) { - struct inode *inode = d_inode(dentry); struct jfs_inode_info *ji = JFS_IP(inode); - int rc; tid_t tid; - - /* - * If this is a request for a synthetic attribute in the system.* - * namespace use the generic infrastructure to resolve a handler - * for it via sb->s_xattr. - */ - if (!strncmp(name, XATTR_SYSTEM_PREFIX, XATTR_SYSTEM_PREFIX_LEN)) - return generic_removexattr(dentry, name); - - if ((rc = can_set_xattr(inode, name, NULL, 0))) - return rc; + int rc; tid = txBegin(inode->i_sb, 0); mutex_lock(&ji->commit_mutex); - rc = __jfs_setxattr(tid, d_inode(dentry), name, NULL, 0, XATTR_REPLACE); + rc = __jfs_setxattr(tid, inode, name, value, size, flags); if (!rc) rc = txCommit(tid, 1, &inode, 0); txEnd(tid); @@ -1056,15 +934,77 @@ int jfs_removexattr(struct dentry *dentry, const char *name) return rc; } -/* - * List of handlers for synthetic system.* attributes. All real ondisk - * attributes are handled directly. - */ +static int jfs_xattr_get(const struct xattr_handler *handler, + struct dentry *unused, struct inode *inode, + const char *name, void *value, size_t size) +{ + name = xattr_full_name(handler, name); + return __jfs_getxattr(inode, name, value, size); +} + +static int jfs_xattr_set(const struct xattr_handler *handler, + struct dentry *dentry, const char *name, + const void *value, size_t size, int flags) +{ + struct inode *inode = d_inode(dentry); + + name = xattr_full_name(handler, name); + return __jfs_xattr_set(inode, name, value, size, flags); +} + +static int jfs_xattr_get_os2(const struct xattr_handler *handler, + struct dentry *unused, struct inode *inode, + const char *name, void *value, size_t size) +{ + if (is_known_namespace(name)) + return -EOPNOTSUPP; + return __jfs_getxattr(inode, name, value, size); +} + +static int jfs_xattr_set_os2(const struct xattr_handler *handler, + struct dentry *dentry, const char *name, + const void *value, size_t size, int flags) +{ + struct inode *inode = d_inode(dentry); + + if (is_known_namespace(name)) + return -EOPNOTSUPP; + return __jfs_xattr_set(inode, name, value, size, flags); +} + +static const struct xattr_handler jfs_user_xattr_handler = { + .prefix = XATTR_USER_PREFIX, + .get = jfs_xattr_get, + .set = jfs_xattr_set, +}; + +static const struct xattr_handler jfs_os2_xattr_handler = { + .prefix = XATTR_OS2_PREFIX, + .get = jfs_xattr_get_os2, + .set = jfs_xattr_set_os2, +}; + +static const struct xattr_handler jfs_security_xattr_handler = { + .prefix = XATTR_SECURITY_PREFIX, + .get = jfs_xattr_get, + .set = jfs_xattr_set, +}; + +static const struct xattr_handler jfs_trusted_xattr_handler = { + .prefix = XATTR_TRUSTED_PREFIX, + .get = jfs_xattr_get, + .set = jfs_xattr_set, +}; + const struct xattr_handler *jfs_xattr_handlers[] = { #ifdef CONFIG_JFS_POSIX_ACL &posix_acl_access_xattr_handler, &posix_acl_default_xattr_handler, #endif + &jfs_os2_xattr_handler, + &jfs_user_xattr_handler, + &jfs_security_xattr_handler, + &jfs_trusted_xattr_handler, NULL, }; diff --git a/fs/kernfs/dir.c b/fs/kernfs/dir.c index 03b688d19f69..8a652404eef6 100644 --- a/fs/kernfs/dir.c +++ b/fs/kernfs/dir.c @@ -153,9 +153,9 @@ static int kernfs_path_from_node_locked(struct kernfs_node *kn_to, p = buf + len + nlen; *p = '\0'; for (kn = kn_to; kn != common; kn = kn->parent) { - nlen = strlen(kn->name); - p -= nlen; - memcpy(p, kn->name, nlen); + size_t tmp = strlen(kn->name); + p -= tmp; + memcpy(p, kn->name, tmp); *(--p) = '/'; } @@ -753,7 +753,8 @@ int kernfs_add_one(struct kernfs_node *kn) ps_iattr = parent->iattr; if (ps_iattr) { struct iattr *ps_iattrs = &ps_iattr->ia_iattr; - ps_iattrs->ia_ctime = ps_iattrs->ia_mtime = CURRENT_TIME; + ktime_get_real_ts(&ps_iattrs->ia_ctime); + ps_iattrs->ia_mtime = ps_iattrs->ia_ctime; } mutex_unlock(&kernfs_mutex); @@ -1279,8 +1280,9 @@ static void __kernfs_remove(struct kernfs_node *kn) /* update timestamps on the parent */ if (ps_iattr) { - ps_iattr->ia_iattr.ia_ctime = CURRENT_TIME; - ps_iattr->ia_iattr.ia_mtime = CURRENT_TIME; + ktime_get_real_ts(&ps_iattr->ia_iattr.ia_ctime); + ps_iattr->ia_iattr.ia_mtime = + ps_iattr->ia_iattr.ia_ctime; } kernfs_put(pos); @@ -1643,22 +1645,9 @@ static int kernfs_fop_readdir(struct file *file, struct dir_context *ctx) return 0; } -static loff_t kernfs_dir_fop_llseek(struct file *file, loff_t offset, - int whence) -{ - struct inode *inode = file_inode(file); - loff_t ret; - - inode_lock(inode); - ret = generic_file_llseek(file, offset, whence); - inode_unlock(inode); - - return ret; -} - const struct file_operations kernfs_dir_fops = { .read = generic_read_dir, - .iterate = kernfs_fop_readdir, + .iterate_shared = kernfs_fop_readdir, .release = kernfs_dir_fop_release, - .llseek = kernfs_dir_fop_llseek, + .llseek = generic_file_llseek, }; diff --git a/fs/kernfs/file.c b/fs/kernfs/file.c index 7247252ee9b1..e1574008adc9 100644 --- a/fs/kernfs/file.c +++ b/fs/kernfs/file.c @@ -190,15 +190,16 @@ static ssize_t kernfs_file_direct_read(struct kernfs_open_file *of, char *buf; buf = of->prealloc_buf; - if (!buf) + if (buf) + mutex_lock(&of->prealloc_mutex); + else buf = kmalloc(len, GFP_KERNEL); if (!buf) return -ENOMEM; /* * @of->mutex nests outside active ref and is used both to ensure that - * the ops aren't called concurrently for the same open file, and - * to provide exclusive access to ->prealloc_buf (when that exists). + * the ops aren't called concurrently for the same open file. */ mutex_lock(&of->mutex); if (!kernfs_get_active(of->kn)) { @@ -214,21 +215,23 @@ static ssize_t kernfs_file_direct_read(struct kernfs_open_file *of, else len = -EINVAL; + kernfs_put_active(of->kn); + mutex_unlock(&of->mutex); + if (len < 0) - goto out_unlock; + goto out_free; if (copy_to_user(user_buf, buf, len)) { len = -EFAULT; - goto out_unlock; + goto out_free; } *ppos += len; - out_unlock: - kernfs_put_active(of->kn); - mutex_unlock(&of->mutex); out_free: - if (buf != of->prealloc_buf) + if (buf == of->prealloc_buf) + mutex_unlock(&of->prealloc_mutex); + else kfree(buf); return len; } @@ -284,15 +287,22 @@ static ssize_t kernfs_fop_write(struct file *file, const char __user *user_buf, } buf = of->prealloc_buf; - if (!buf) + if (buf) + mutex_lock(&of->prealloc_mutex); + else buf = kmalloc(len + 1, GFP_KERNEL); if (!buf) return -ENOMEM; + if (copy_from_user(buf, user_buf, len)) { + len = -EFAULT; + goto out_free; + } + buf[len] = '\0'; /* guarantee string termination */ + /* * @of->mutex nests outside active ref and is used both to ensure that - * the ops aren't called concurrently for the same open file, and - * to provide exclusive access to ->prealloc_buf (when that exists). + * the ops aren't called concurrently for the same open file. */ mutex_lock(&of->mutex); if (!kernfs_get_active(of->kn)) { @@ -301,26 +311,22 @@ static ssize_t kernfs_fop_write(struct file *file, const char __user *user_buf, goto out_free; } - if (copy_from_user(buf, user_buf, len)) { - len = -EFAULT; - goto out_unlock; - } - buf[len] = '\0'; /* guarantee string termination */ - ops = kernfs_ops(of->kn); if (ops->write) len = ops->write(of, buf, len, *ppos); else len = -EINVAL; + kernfs_put_active(of->kn); + mutex_unlock(&of->mutex); + if (len > 0) *ppos += len; -out_unlock: - kernfs_put_active(of->kn); - mutex_unlock(&of->mutex); out_free: - if (buf != of->prealloc_buf) + if (buf == of->prealloc_buf) + mutex_unlock(&of->prealloc_mutex); + else kfree(buf); return len; } @@ -687,6 +693,7 @@ static int kernfs_fop_open(struct inode *inode, struct file *file) error = -ENOMEM; if (!of->prealloc_buf) goto err_free; + mutex_init(&of->prealloc_mutex); } /* diff --git a/fs/kernfs/inode.c b/fs/kernfs/inode.c index 16405ae88d2d..1719649d7ad7 100644 --- a/fs/kernfs/inode.c +++ b/fs/kernfs/inode.c @@ -54,7 +54,10 @@ static struct kernfs_iattrs *kernfs_iattrs(struct kernfs_node *kn) iattrs->ia_mode = kn->mode; iattrs->ia_uid = GLOBAL_ROOT_UID; iattrs->ia_gid = GLOBAL_ROOT_GID; - iattrs->ia_atime = iattrs->ia_mtime = iattrs->ia_ctime = CURRENT_TIME; + + ktime_get_real_ts(&iattrs->ia_atime); + iattrs->ia_mtime = iattrs->ia_atime; + iattrs->ia_ctime = iattrs->ia_atime; simple_xattrs_init(&kn->iattr->xattrs); out_unlock: @@ -208,10 +211,10 @@ int kernfs_iop_removexattr(struct dentry *dentry, const char *name) return simple_xattr_set(&attrs->xattrs, name, NULL, 0, XATTR_REPLACE); } -ssize_t kernfs_iop_getxattr(struct dentry *dentry, const char *name, void *buf, - size_t size) +ssize_t kernfs_iop_getxattr(struct dentry *unused, struct inode *inode, + const char *name, void *buf, size_t size) { - struct kernfs_node *kn = dentry->d_fsdata; + struct kernfs_node *kn = inode->i_private; struct kernfs_iattrs *attrs; attrs = kernfs_iattrs(kn); @@ -236,16 +239,18 @@ ssize_t kernfs_iop_listxattr(struct dentry *dentry, char *buf, size_t size) static inline void set_default_inode_attr(struct inode *inode, umode_t mode) { inode->i_mode = mode; - inode->i_atime = inode->i_mtime = inode->i_ctime = CURRENT_TIME; + inode->i_atime = inode->i_mtime = + inode->i_ctime = current_fs_time(inode->i_sb); } static inline void set_inode_attr(struct inode *inode, struct iattr *iattr) { + struct super_block *sb = inode->i_sb; inode->i_uid = iattr->ia_uid; inode->i_gid = iattr->ia_gid; - inode->i_atime = iattr->ia_atime; - inode->i_mtime = iattr->ia_mtime; - inode->i_ctime = iattr->ia_ctime; + inode->i_atime = timespec_trunc(iattr->ia_atime, sb->s_time_gran); + inode->i_mtime = timespec_trunc(iattr->ia_mtime, sb->s_time_gran); + inode->i_ctime = timespec_trunc(iattr->ia_ctime, sb->s_time_gran); } static void kernfs_refresh_inode(struct kernfs_node *kn, struct inode *inode) diff --git a/fs/kernfs/kernfs-internal.h b/fs/kernfs/kernfs-internal.h index 6762bfbd8207..45c9192c276e 100644 --- a/fs/kernfs/kernfs-internal.h +++ b/fs/kernfs/kernfs-internal.h @@ -84,8 +84,8 @@ int kernfs_iop_getattr(struct vfsmount *mnt, struct dentry *dentry, int kernfs_iop_setxattr(struct dentry *dentry, const char *name, const void *value, size_t size, int flags); int kernfs_iop_removexattr(struct dentry *dentry, const char *name); -ssize_t kernfs_iop_getxattr(struct dentry *dentry, const char *name, void *buf, - size_t size); +ssize_t kernfs_iop_getxattr(struct dentry *dentry, struct inode *inode, + const char *name, void *buf, size_t size); ssize_t kernfs_iop_listxattr(struct dentry *dentry, char *buf, size_t size); /* diff --git a/fs/kernfs/mount.c b/fs/kernfs/mount.c index f73541fbe7af..63534f5f9073 100644 --- a/fs/kernfs/mount.c +++ b/fs/kernfs/mount.c @@ -15,6 +15,7 @@ #include <linux/slab.h> #include <linux/pagemap.h> #include <linux/namei.h> +#include <linux/seq_file.h> #include "kernfs-internal.h" @@ -40,6 +41,19 @@ static int kernfs_sop_show_options(struct seq_file *sf, struct dentry *dentry) return 0; } +static int kernfs_sop_show_path(struct seq_file *sf, struct dentry *dentry) +{ + struct kernfs_node *node = dentry->d_fsdata; + struct kernfs_root *root = kernfs_root(node); + struct kernfs_syscall_ops *scops = root->syscall_ops; + + if (scops && scops->show_path) + return scops->show_path(sf, node, root); + + seq_dentry(sf, dentry, " \t\n\\"); + return 0; +} + const struct super_operations kernfs_sops = { .statfs = simple_statfs, .drop_inode = generic_delete_inode, @@ -47,6 +61,7 @@ const struct super_operations kernfs_sops = { .remount_fs = kernfs_sop_remount_fs, .show_options = kernfs_sop_show_options, + .show_path = kernfs_sop_show_path, }; /** @@ -120,9 +135,8 @@ struct dentry *kernfs_node_dentry(struct kernfs_node *kn, kntmp = find_next_ancestor(kn, knparent); if (WARN_ON(!kntmp)) return ERR_PTR(-EINVAL); - mutex_lock(&d_inode(dentry)->i_mutex); - dtmp = lookup_one_len(kntmp->name, dentry, strlen(kntmp->name)); - mutex_unlock(&d_inode(dentry)->i_mutex); + dtmp = lookup_one_len_unlocked(kntmp->name, dentry, + strlen(kntmp->name)); dput(dentry); if (IS_ERR(dtmp)) return dtmp; diff --git a/fs/libfs.c b/fs/libfs.c index f3fa82ce9b70..8765ff1adc07 100644 --- a/fs/libfs.c +++ b/fs/libfs.c @@ -89,7 +89,6 @@ EXPORT_SYMBOL(dcache_dir_close); loff_t dcache_dir_lseek(struct file *file, loff_t offset, int whence) { struct dentry *dentry = file->f_path.dentry; - inode_lock(d_inode(dentry)); switch (whence) { case 1: offset += file->f_pos; @@ -97,7 +96,6 @@ loff_t dcache_dir_lseek(struct file *file, loff_t offset, int whence) if (offset >= 0) break; default: - inode_unlock(d_inode(dentry)); return -EINVAL; } if (offset != file->f_pos) { @@ -124,7 +122,6 @@ loff_t dcache_dir_lseek(struct file *file, loff_t offset, int whence) spin_unlock(&dentry->d_lock); } } - inode_unlock(d_inode(dentry)); return offset; } EXPORT_SYMBOL(dcache_dir_lseek); @@ -190,7 +187,7 @@ const struct file_operations simple_dir_operations = { .release = dcache_dir_close, .llseek = dcache_dir_lseek, .read = generic_read_dir, - .iterate = dcache_readdir, + .iterate_shared = dcache_readdir, .fsync = noop_fsync, }; EXPORT_SYMBOL(simple_dir_operations); @@ -1127,8 +1124,8 @@ static int empty_dir_setxattr(struct dentry *dentry, const char *name, return -EOPNOTSUPP; } -static ssize_t empty_dir_getxattr(struct dentry *dentry, const char *name, - void *value, size_t size) +static ssize_t empty_dir_getxattr(struct dentry *dentry, struct inode *inode, + const char *name, void *value, size_t size) { return -EOPNOTSUPP; } @@ -1169,7 +1166,7 @@ static int empty_dir_readdir(struct file *file, struct dir_context *ctx) static const struct file_operations empty_dir_operations = { .llseek = empty_dir_llseek, .read = generic_read_dir, - .iterate = empty_dir_readdir, + .iterate_shared = empty_dir_readdir, .fsync = noop_fsync, }; diff --git a/fs/logfs/dir.c b/fs/logfs/dir.c index ddbed2be5366..2d5336bd4efd 100644 --- a/fs/logfs/dir.c +++ b/fs/logfs/dir.c @@ -791,7 +791,7 @@ const struct inode_operations logfs_dir_iops = { const struct file_operations logfs_dir_fops = { .fsync = logfs_fsync, .unlocked_ioctl = logfs_ioctl, - .iterate = logfs_readdir, + .iterate_shared = logfs_readdir, .read = generic_read_dir, - .llseek = default_llseek, + .llseek = generic_file_llseek, }; diff --git a/fs/minix/dir.c b/fs/minix/dir.c index 33957c07cd11..31dcd515b9d5 100644 --- a/fs/minix/dir.c +++ b/fs/minix/dir.c @@ -21,7 +21,7 @@ static int minix_readdir(struct file *, struct dir_context *); const struct file_operations minix_dir_operations = { .llseek = generic_file_llseek, .read = generic_read_dir, - .iterate = minix_readdir, + .iterate_shared = minix_readdir, .fsync = generic_file_fsync, }; diff --git a/fs/namei.c b/fs/namei.c index 1d9ca2d5dff6..5375571cf6e1 100644 --- a/fs/namei.c +++ b/fs/namei.c @@ -265,7 +265,7 @@ static int check_acl(struct inode *inode, int mask) if (!acl) return -EAGAIN; /* no ->get_acl() calls in RCU mode... */ - if (acl == ACL_NOT_CACHED) + if (is_uncached_acl(acl)) return -ECHILD; return posix_acl_permission(inode, acl, mask & ~MAY_NOT_BLOCK); } @@ -1603,32 +1603,42 @@ static struct dentry *lookup_slow(const struct qstr *name, struct dentry *dir, unsigned int flags) { - struct dentry *dentry; - inode_lock(dir->d_inode); - dentry = d_lookup(dir, name); - if (unlikely(dentry)) { + struct dentry *dentry = ERR_PTR(-ENOENT), *old; + struct inode *inode = dir->d_inode; + DECLARE_WAIT_QUEUE_HEAD_ONSTACK(wq); + + inode_lock_shared(inode); + /* Don't go there if it's already dead */ + if (unlikely(IS_DEADDIR(inode))) + goto out; +again: + dentry = d_alloc_parallel(dir, name, &wq); + if (IS_ERR(dentry)) + goto out; + if (unlikely(!d_in_lookup(dentry))) { if ((dentry->d_flags & DCACHE_OP_REVALIDATE) && !(flags & LOOKUP_NO_REVAL)) { int error = d_revalidate(dentry, flags); if (unlikely(error <= 0)) { - if (!error) + if (!error) { d_invalidate(dentry); + dput(dentry); + goto again; + } dput(dentry); dentry = ERR_PTR(error); } } - if (dentry) { - inode_unlock(dir->d_inode); - return dentry; + } else { + old = inode->i_op->lookup(inode, dentry, flags); + d_lookup_done(dentry); + if (unlikely(old)) { + dput(dentry); + dentry = old; } } - dentry = d_alloc(dir, name); - if (unlikely(!dentry)) { - inode_unlock(dir->d_inode); - return ERR_PTR(-ENOMEM); - } - dentry = lookup_real(dir->d_inode, dentry, flags); - inode_unlock(dir->d_inode); +out: + inode_unlock_shared(inode); return dentry; } @@ -1794,30 +1804,49 @@ static inline unsigned int fold_hash(unsigned long hash) return hash_64(hash, 32); } +/* + * This is George Marsaglia's XORSHIFT generator. + * It implements a maximum-period LFSR in only a few + * instructions. It also has the property (required + * by hash_name()) that mix_hash(0) = 0. + */ +static inline unsigned long mix_hash(unsigned long hash) +{ + hash ^= hash << 13; + hash ^= hash >> 7; + hash ^= hash << 17; + return hash; +} + #else /* 32-bit case */ #define fold_hash(x) (x) +static inline unsigned long mix_hash(unsigned long hash) +{ + hash ^= hash << 13; + hash ^= hash >> 17; + hash ^= hash << 5; + return hash; +} + #endif unsigned int full_name_hash(const unsigned char *name, unsigned int len) { - unsigned long a, mask; - unsigned long hash = 0; + unsigned long a, hash = 0; for (;;) { a = load_unaligned_zeropad(name); if (len < sizeof(unsigned long)) break; - hash += a; - hash *= 9; + hash = mix_hash(hash + a); name += sizeof(unsigned long); len -= sizeof(unsigned long); if (!len) goto done; } - mask = bytemask_from_count(len); - hash += mask & a; + hash += a & bytemask_from_count(len); done: return fold_hash(hash); } @@ -1835,7 +1864,7 @@ static inline u64 hash_name(const char *name) hash = a = 0; len = -sizeof(unsigned long); do { - hash = (hash + a) * 9; + hash = mix_hash(hash + a); len += sizeof(unsigned long); a = load_unaligned_zeropad(name+len); b = a ^ REPEAT_BYTE('/'); @@ -2267,6 +2296,33 @@ int vfs_path_lookup(struct dentry *dentry, struct vfsmount *mnt, EXPORT_SYMBOL(vfs_path_lookup); /** + * lookup_hash - lookup single pathname component on already hashed name + * @name: name and hash to lookup + * @base: base directory to lookup from + * + * The name must have been verified and hashed (see lookup_one_len()). Using + * this after just full_name_hash() is unsafe. + * + * This function also doesn't check for search permission on base directory. + * + * Use lookup_one_len_unlocked() instead, unless you really know what you are + * doing. + * + * Do not hold i_mutex; this helper takes i_mutex if necessary. + */ +struct dentry *lookup_hash(const struct qstr *name, struct dentry *base) +{ + struct dentry *ret; + + ret = lookup_dcache(name, base, 0); + if (!ret) + ret = lookup_slow(name, base, 0); + + return ret; +} +EXPORT_SYMBOL(lookup_hash); + +/** * lookup_one_len - filesystem helper to lookup single pathname component * @name: pathname component to lookup * @base: base directory to lookup from @@ -2337,7 +2393,6 @@ struct dentry *lookup_one_len_unlocked(const char *name, struct qstr this; unsigned int c; int err; - struct dentry *ret; this.name = name; this.len = len; @@ -2369,10 +2424,7 @@ struct dentry *lookup_one_len_unlocked(const char *name, if (err) return ERR_PTR(err); - ret = lookup_dcache(&this, base, 0); - if (!ret) - ret = lookup_slow(&this, base, 0); - return ret; + return lookup_hash(&this, base); } EXPORT_SYMBOL(lookup_one_len_unlocked); @@ -2655,7 +2707,7 @@ struct dentry *lock_rename(struct dentry *p1, struct dentry *p2) return NULL; } - mutex_lock(&p1->d_inode->i_sb->s_vfs_rename_mutex); + mutex_lock(&p1->d_sb->s_vfs_rename_mutex); p = d_ancestor(p2, p1); if (p) { @@ -2682,7 +2734,7 @@ void unlock_rename(struct dentry *p1, struct dentry *p2) inode_unlock(p1->d_inode); if (p1 != p2) { inode_unlock(p2->d_inode); - mutex_unlock(&p1->d_inode->i_sb->s_vfs_rename_mutex); + mutex_unlock(&p1->d_sb->s_vfs_rename_mutex); } } EXPORT_SYMBOL(unlock_rename); @@ -2785,7 +2837,7 @@ static inline int open_to_namei_flags(int flag) return flag; } -static int may_o_create(struct path *dir, struct dentry *dentry, umode_t mode) +static int may_o_create(const struct path *dir, struct dentry *dentry, umode_t mode) { int error = security_path_mknod(dir, dentry, mode, 0); if (error) @@ -2814,155 +2866,56 @@ static int may_o_create(struct path *dir, struct dentry *dentry, umode_t mode) static int atomic_open(struct nameidata *nd, struct dentry *dentry, struct path *path, struct file *file, const struct open_flags *op, - bool got_write, bool need_lookup, + int open_flag, umode_t mode, int *opened) { + struct dentry *const DENTRY_NOT_SET = (void *) -1UL; struct inode *dir = nd->path.dentry->d_inode; - unsigned open_flag = open_to_namei_flags(op->open_flag); - umode_t mode; int error; - int acc_mode; - int create_error = 0; - struct dentry *const DENTRY_NOT_SET = (void *) -1UL; - bool excl; - - BUG_ON(dentry->d_inode); - /* Don't create child dentry for a dead directory. */ - if (unlikely(IS_DEADDIR(dir))) { - error = -ENOENT; - goto out; - } - - mode = op->mode; - if ((open_flag & O_CREAT) && !IS_POSIXACL(dir)) - mode &= ~current_umask(); - - excl = (open_flag & (O_EXCL | O_CREAT)) == (O_EXCL | O_CREAT); - if (excl) + if (!(~open_flag & (O_EXCL | O_CREAT))) /* both O_EXCL and O_CREAT */ open_flag &= ~O_TRUNC; - /* - * Checking write permission is tricky, bacuse we don't know if we are - * going to actually need it: O_CREAT opens should work as long as the - * file exists. But checking existence breaks atomicity. The trick is - * to check access and if not granted clear O_CREAT from the flags. - * - * Another problem is returing the "right" error value (e.g. for an - * O_EXCL open we want to return EEXIST not EROFS). - */ - if (((open_flag & (O_CREAT | O_TRUNC)) || - (open_flag & O_ACCMODE) != O_RDONLY) && unlikely(!got_write)) { - if (!(open_flag & O_CREAT)) { - /* - * No O_CREATE -> atomicity not a requirement -> fall - * back to lookup + open - */ - goto no_open; - } else if (open_flag & (O_EXCL | O_TRUNC)) { - /* Fall back and fail with the right error */ - create_error = -EROFS; - goto no_open; - } else { - /* No side effects, safe to clear O_CREAT */ - create_error = -EROFS; - open_flag &= ~O_CREAT; - } - } - - if (open_flag & O_CREAT) { - error = may_o_create(&nd->path, dentry, mode); - if (error) { - create_error = error; - if (open_flag & O_EXCL) - goto no_open; - open_flag &= ~O_CREAT; - } - } - if (nd->flags & LOOKUP_DIRECTORY) open_flag |= O_DIRECTORY; file->f_path.dentry = DENTRY_NOT_SET; file->f_path.mnt = nd->path.mnt; - error = dir->i_op->atomic_open(dir, dentry, file, open_flag, mode, - opened); - if (error < 0) { - if (create_error && error == -ENOENT) - error = create_error; - goto out; - } - - if (error) { /* returned 1, that is */ + error = dir->i_op->atomic_open(dir, dentry, file, + open_to_namei_flags(open_flag), + mode, opened); + d_lookup_done(dentry); + if (!error) { + /* + * We didn't have the inode before the open, so check open + * permission here. + */ + int acc_mode = op->acc_mode; + if (*opened & FILE_CREATED) { + WARN_ON(!(open_flag & O_CREAT)); + fsnotify_create(dir, dentry); + acc_mode = 0; + } + error = may_open(&file->f_path, acc_mode, open_flag); + if (WARN_ON(error > 0)) + error = -EINVAL; + } else if (error > 0) { if (WARN_ON(file->f_path.dentry == DENTRY_NOT_SET)) { error = -EIO; - goto out; - } - if (file->f_path.dentry) { - dput(dentry); - dentry = file->f_path.dentry; - } - if (*opened & FILE_CREATED) - fsnotify_create(dir, dentry); - if (!dentry->d_inode) { - WARN_ON(*opened & FILE_CREATED); - if (create_error) { - error = create_error; - goto out; - } } else { - if (excl && !(*opened & FILE_CREATED)) { - error = -EEXIST; - goto out; + if (file->f_path.dentry) { + dput(dentry); + dentry = file->f_path.dentry; } + if (*opened & FILE_CREATED) + fsnotify_create(dir, dentry); + path->dentry = dentry; + path->mnt = nd->path.mnt; + return 1; } - goto looked_up; } - - /* - * We didn't have the inode before the open, so check open permission - * here. - */ - acc_mode = op->acc_mode; - if (*opened & FILE_CREATED) { - WARN_ON(!(open_flag & O_CREAT)); - fsnotify_create(dir, dentry); - acc_mode = 0; - } - error = may_open(&file->f_path, acc_mode, open_flag); - if (error) - fput(file); - -out: dput(dentry); return error; - -no_open: - if (need_lookup) { - dentry = lookup_real(dir, dentry, nd->flags); - if (IS_ERR(dentry)) - return PTR_ERR(dentry); - - if (create_error) { - int open_flag = op->open_flag; - - error = create_error; - if ((open_flag & O_EXCL)) { - if (!dentry->d_inode) - goto out; - } else if (!dentry->d_inode) { - goto out; - } else if ((open_flag & O_TRUNC) && - d_is_reg(dentry)) { - goto out; - } - /* will fail later, go on to get the right error */ - } - } -looked_up: - path->dentry = dentry; - path->mnt = nd->path.mnt; - return 1; } /* @@ -2990,62 +2943,118 @@ static int lookup_open(struct nameidata *nd, struct path *path, { struct dentry *dir = nd->path.dentry; struct inode *dir_inode = dir->d_inode; + int open_flag = op->open_flag; struct dentry *dentry; - int error; - bool need_lookup = false; + int error, create_error = 0; + umode_t mode = op->mode; + DECLARE_WAIT_QUEUE_HEAD_ONSTACK(wq); + + if (unlikely(IS_DEADDIR(dir_inode))) + return -ENOENT; *opened &= ~FILE_CREATED; - dentry = lookup_dcache(&nd->last, dir, nd->flags); - if (IS_ERR(dentry)) - return PTR_ERR(dentry); + dentry = d_lookup(dir, &nd->last); + for (;;) { + if (!dentry) { + dentry = d_alloc_parallel(dir, &nd->last, &wq); + if (IS_ERR(dentry)) + return PTR_ERR(dentry); + } + if (d_in_lookup(dentry)) + break; - if (!dentry) { - dentry = d_alloc(dir, &nd->last); - if (unlikely(!dentry)) - return -ENOMEM; - need_lookup = true; - } else if (dentry->d_inode) { + if (!(dentry->d_flags & DCACHE_OP_REVALIDATE)) + break; + + error = d_revalidate(dentry, nd->flags); + if (likely(error > 0)) + break; + if (error) + goto out_dput; + d_invalidate(dentry); + dput(dentry); + dentry = NULL; + } + if (dentry->d_inode) { /* Cached positive dentry: will open in f_op->open */ goto out_no_open; } - if ((nd->flags & LOOKUP_OPEN) && dir_inode->i_op->atomic_open) { - return atomic_open(nd, dentry, path, file, op, got_write, - need_lookup, opened); + /* + * Checking write permission is tricky, bacuse we don't know if we are + * going to actually need it: O_CREAT opens should work as long as the + * file exists. But checking existence breaks atomicity. The trick is + * to check access and if not granted clear O_CREAT from the flags. + * + * Another problem is returing the "right" error value (e.g. for an + * O_EXCL open we want to return EEXIST not EROFS). + */ + if (open_flag & O_CREAT) { + if (!IS_POSIXACL(dir->d_inode)) + mode &= ~current_umask(); + if (unlikely(!got_write)) { + create_error = -EROFS; + open_flag &= ~O_CREAT; + if (open_flag & (O_EXCL | O_TRUNC)) + goto no_open; + /* No side effects, safe to clear O_CREAT */ + } else { + create_error = may_o_create(&nd->path, dentry, mode); + if (create_error) { + open_flag &= ~O_CREAT; + if (open_flag & O_EXCL) + goto no_open; + } + } + } else if ((open_flag & (O_TRUNC|O_WRONLY|O_RDWR)) && + unlikely(!got_write)) { + /* + * No O_CREATE -> atomicity not a requirement -> fall + * back to lookup + open + */ + goto no_open; } - if (need_lookup) { - BUG_ON(dentry->d_inode); + if (dir_inode->i_op->atomic_open) { + error = atomic_open(nd, dentry, path, file, op, open_flag, + mode, opened); + if (unlikely(error == -ENOENT) && create_error) + error = create_error; + return error; + } - dentry = lookup_real(dir_inode, dentry, nd->flags); - if (IS_ERR(dentry)) - return PTR_ERR(dentry); +no_open: + if (d_in_lookup(dentry)) { + struct dentry *res = dir_inode->i_op->lookup(dir_inode, dentry, + nd->flags); + d_lookup_done(dentry); + if (unlikely(res)) { + if (IS_ERR(res)) { + error = PTR_ERR(res); + goto out_dput; + } + dput(dentry); + dentry = res; + } } /* Negative dentry, just create the file */ - if (!dentry->d_inode && (op->open_flag & O_CREAT)) { - umode_t mode = op->mode; - if (!IS_POSIXACL(dir->d_inode)) - mode &= ~current_umask(); - /* - * This write is needed to ensure that a - * rw->ro transition does not occur between - * the time when the file is created and when - * a permanent write count is taken through - * the 'struct file' in finish_open(). - */ - if (!got_write) { - error = -EROFS; - goto out_dput; - } + if (!dentry->d_inode && (open_flag & O_CREAT)) { *opened |= FILE_CREATED; - error = security_path_mknod(&nd->path, dentry, mode, 0); - if (error) + audit_inode_child(dir_inode, dentry, AUDIT_TYPE_CHILD_CREATE); + if (!dir_inode->i_op->create) { + error = -EACCES; goto out_dput; - error = vfs_create(dir->d_inode, dentry, mode, - nd->flags & LOOKUP_EXCL); + } + error = dir_inode->i_op->create(dir_inode, dentry, mode, + open_flag & O_EXCL); if (error) goto out_dput; + fsnotify_create(dir_inode, dentry); + } + if (unlikely(create_error) && !dentry->d_inode) { + error = create_error; + goto out_dput; } out_no_open: path->dentry = dentry; @@ -3117,7 +3126,7 @@ static int do_last(struct nameidata *nd, } retry_lookup: - if (op->open_flag & (O_CREAT | O_TRUNC | O_WRONLY | O_RDWR)) { + if (open_flag & (O_CREAT | O_TRUNC | O_WRONLY | O_RDWR)) { error = mnt_want_write(nd->path.mnt); if (!error) got_write = true; @@ -3127,9 +3136,15 @@ retry_lookup: * dropping this one anyway. */ } - inode_lock(dir->d_inode); + if (open_flag & O_CREAT) + inode_lock(dir->d_inode); + else + inode_lock_shared(dir->d_inode); error = lookup_open(nd, &path, file, op, got_write, opened); - inode_unlock(dir->d_inode); + if (open_flag & O_CREAT) + inode_unlock(dir->d_inode); + else + inode_unlock_shared(dir->d_inode); if (error <= 0) { if (error) @@ -3209,10 +3224,6 @@ finish_open: return error; } audit_inode(nd->name, nd->path.dentry, 0); - if (unlikely(d_is_symlink(nd->path.dentry)) && !(open_flag & O_PATH)) { - error = -ELOOP; - goto out; - } error = -EISDIR; if ((open_flag & O_CREAT) && d_is_dir(nd->path.dentry)) goto out; @@ -3229,11 +3240,9 @@ finish_open: got_write = true; } finish_open_created: - if (likely(!(open_flag & O_PATH))) { - error = may_open(&nd->path, acc_mode, open_flag); - if (error) - goto out; - } + error = may_open(&nd->path, acc_mode, open_flag); + if (error) + goto out; BUG_ON(*opened & FILE_OPENED); /* once it's opened, it's opened */ error = vfs_open(&nd->path, file, current_cred()); if (!error) { @@ -3245,18 +3254,13 @@ finish_open_created: } opened: error = open_check_o_direct(file); - if (error) - goto exit_fput; - error = ima_file_check(file, op->acc_mode, *opened); - if (error) - goto exit_fput; - - if (will_truncate) { + if (!error) + error = ima_file_check(file, op->acc_mode, *opened); + if (!error && will_truncate) error = handle_truncate(file); - if (error) - goto exit_fput; - } out: + if (unlikely(error) && (*opened & FILE_OPENED)) + fput(file); if (unlikely(error > 0)) { WARN_ON(1); error = -EINVAL; @@ -3266,10 +3270,6 @@ out: path_put(&save_parent); return error; -exit_fput: - fput(file); - goto out; - stale_open: /* If no saved parent or already retried then can't retry */ if (!save_parent.dentry || retried) @@ -3347,6 +3347,18 @@ out: return error; } +static int do_o_path(struct nameidata *nd, unsigned flags, struct file *file) +{ + struct path path; + int error = path_lookupat(nd, flags, &path); + if (!error) { + audit_inode(nd->name, path.dentry, 0); + error = vfs_open(&path, file, current_cred()); + path_put(&path); + } + return error; +} + static struct file *path_openat(struct nameidata *nd, const struct open_flags *op, unsigned flags) { @@ -3366,6 +3378,13 @@ static struct file *path_openat(struct nameidata *nd, goto out2; } + if (unlikely(file->f_flags & O_PATH)) { + error = do_o_path(nd, flags, file); + if (!error) + opened |= FILE_OPENED; + goto out2; + } + s = path_init(nd, flags); if (IS_ERR(s)) { put_filp(file); @@ -3608,6 +3627,8 @@ retry: switch (mode & S_IFMT) { case 0: case S_IFREG: error = vfs_create(path.dentry->d_inode,dentry,mode,true); + if (!error) + ima_post_path_mknod(dentry); break; case S_IFCHR: case S_IFBLK: error = vfs_mknod(path.dentry->d_inode,dentry,mode, @@ -4213,7 +4234,11 @@ int vfs_rename(struct inode *old_dir, struct dentry *old_dentry, bool new_is_dir = false; unsigned max_links = new_dir->i_sb->s_max_links; - if (source == target) + /* + * Check source == target. + * On overlayfs need to look at underlying inodes. + */ + if (vfs_select_inode(old_dentry, 0) == vfs_select_inode(new_dentry, 0)) return 0; error = may_delete(old_dir, old_dentry, is_dir); diff --git a/fs/nfs/dir.c b/fs/nfs/dir.c index 33eb81738d03..aaf7bd0cbae2 100644 --- a/fs/nfs/dir.c +++ b/fs/nfs/dir.c @@ -57,7 +57,7 @@ static void nfs_readdir_clear_array(struct page*); const struct file_operations nfs_dir_operations = { .llseek = nfs_llseek_dir, .read = generic_read_dir, - .iterate = nfs_readdir, + .iterate_shared = nfs_readdir, .open = nfs_opendir, .release = nfs_closedir, .fsync = nfs_fsync_dir, @@ -145,6 +145,7 @@ struct nfs_cache_array_entry { }; struct nfs_cache_array { + atomic_t refcount; int size; int eof_index; u64 last_cookie; @@ -200,11 +201,20 @@ void nfs_readdir_clear_array(struct page *page) int i; array = kmap_atomic(page); - for (i = 0; i < array->size; i++) - kfree(array->array[i].string.name); + if (atomic_dec_and_test(&array->refcount)) + for (i = 0; i < array->size; i++) + kfree(array->array[i].string.name); kunmap_atomic(array); } +static bool grab_page(struct page *page) +{ + struct nfs_cache_array *array = kmap_atomic(page); + bool res = atomic_inc_not_zero(&array->refcount); + kunmap_atomic(array); + return res; +} + /* * the caller is responsible for freeing qstr.name * when called by nfs_readdir_add_to_array, the strings will be freed in @@ -470,6 +480,7 @@ static void nfs_prime_dcache(struct dentry *parent, struct nfs_entry *entry) { struct qstr filename = QSTR_INIT(entry->name, entry->len); + DECLARE_WAIT_QUEUE_HEAD_ONSTACK(wq); struct dentry *dentry; struct dentry *alias; struct inode *dir = d_inode(parent); @@ -489,7 +500,13 @@ void nfs_prime_dcache(struct dentry *parent, struct nfs_entry *entry) filename.hash = full_name_hash(filename.name, filename.len); dentry = d_lookup(parent, &filename); - if (dentry != NULL) { +again: + if (!dentry) { + dentry = d_alloc_parallel(parent, &filename, &wq); + if (IS_ERR(dentry)) + return; + } + if (!d_in_lookup(dentry)) { /* Is there a mountpoint here? If so, just exit */ if (!nfs_fsid_equal(&NFS_SB(dentry->d_sb)->fsid, &entry->fattr->fsid)) @@ -503,26 +520,21 @@ void nfs_prime_dcache(struct dentry *parent, struct nfs_entry *entry) } else { d_invalidate(dentry); dput(dentry); + dentry = NULL; + goto again; } } - dentry = d_alloc(parent, &filename); - if (dentry == NULL) - return; - inode = nfs_fhget(dentry->d_sb, entry->fh, entry->fattr, entry->label); - if (IS_ERR(inode)) - goto out; - alias = d_splice_alias(inode, dentry); - if (IS_ERR(alias)) - goto out; - else if (alias) { - nfs_set_verifier(alias, nfs_save_change_attribute(dir)); - dput(alias); - } else - nfs_set_verifier(dentry, nfs_save_change_attribute(dir)); - + d_lookup_done(dentry); + if (alias) { + if (IS_ERR(alias)) + goto out; + dput(dentry); + dentry = alias; + } + nfs_set_verifier(dentry, nfs_save_change_attribute(dir)); out: dput(dentry); } @@ -643,6 +655,7 @@ int nfs_readdir_xdr_to_array(nfs_readdir_descriptor_t *desc, struct page *page, goto out_label_free; } memset(array, 0, sizeof(struct nfs_cache_array)); + atomic_set(&array->refcount, 1); array->eof_index = -1; status = nfs_readdir_alloc_pages(pages, array_size); @@ -705,8 +718,7 @@ int nfs_readdir_filler(nfs_readdir_descriptor_t *desc, struct page* page) static void cache_page_release(nfs_readdir_descriptor_t *desc) { - if (!desc->page->mapping) - nfs_readdir_clear_array(desc->page); + nfs_readdir_clear_array(desc->page); put_page(desc->page); desc->page = NULL; } @@ -714,8 +726,16 @@ void cache_page_release(nfs_readdir_descriptor_t *desc) static struct page *get_cache_page(nfs_readdir_descriptor_t *desc) { - return read_cache_page(file_inode(desc->file)->i_mapping, + struct page *page; + + for (;;) { + page = read_cache_page(file_inode(desc->file)->i_mapping, desc->page_index, (filler_t *)nfs_readdir_filler, desc); + if (IS_ERR(page) || grab_page(page)) + break; + put_page(page); + } + return page; } /* @@ -889,7 +909,6 @@ static int nfs_readdir(struct file *file, struct dir_context *ctx) desc->decode = NFS_PROTO(inode)->decode_dirent; desc->plus = nfs_use_readdirplus(inode, ctx) ? 1 : 0; - nfs_block_sillyrename(dentry); if (ctx->pos == 0 || nfs_dir_mapping_need_revalidate(inode)) res = nfs_revalidate_mapping(inode, file->f_mapping); if (res < 0) @@ -925,7 +944,6 @@ static int nfs_readdir(struct file *file, struct dir_context *ctx) break; } while (!desc->eof); out: - nfs_unblock_sillyrename(dentry); if (res > 0) res = 0; dfprintk(FILE, "NFS: readdir(%pD2) returns %d\n", file, res); @@ -934,13 +952,11 @@ out: static loff_t nfs_llseek_dir(struct file *filp, loff_t offset, int whence) { - struct inode *inode = file_inode(filp); struct nfs_open_dir_context *dir_ctx = filp->private_data; dfprintk(FILE, "NFS: llseek dir(%pD2, %lld, %d)\n", filp, offset, whence); - inode_lock(inode); switch (whence) { case 1: offset += filp->f_pos; @@ -948,16 +964,13 @@ static loff_t nfs_llseek_dir(struct file *filp, loff_t offset, int whence) if (offset >= 0) break; default: - offset = -EINVAL; - goto out; + return -EINVAL; } if (offset != filp->f_pos) { filp->f_pos = offset; dir_ctx->dir_cookie = 0; dir_ctx->duped = 0; } -out: - inode_unlock(inode); return offset; } @@ -1383,7 +1396,6 @@ struct dentry *nfs_lookup(struct inode *dir, struct dentry * dentry, unsigned in parent = dentry->d_parent; /* Protect against concurrent sillydeletes */ trace_nfs_lookup_enter(dir, dentry, flags); - nfs_block_sillyrename(parent); error = NFS_PROTO(dir)->lookup(dir, &dentry->d_name, fhandle, fattr, label); if (error == -ENOENT) goto no_entry; @@ -1408,7 +1420,6 @@ no_entry: } nfs_set_verifier(dentry, nfs_save_change_attribute(dir)); out_unblock_sillyrename: - nfs_unblock_sillyrename(parent); trace_nfs_lookup_exit(dir, dentry, flags, error); nfs4_label_free(label); out: @@ -1520,9 +1531,7 @@ int nfs_atomic_open(struct inode *dir, struct dentry *dentry, goto out; trace_nfs_atomic_open_enter(dir, ctx, open_flags); - nfs_block_sillyrename(dentry->d_parent); inode = NFS_PROTO(dir)->open_context(dir, ctx, open_flags, &attr, opened); - nfs_unblock_sillyrename(dentry->d_parent); if (IS_ERR(inode)) { err = PTR_ERR(inode); trace_nfs_atomic_open_exit(dir, ctx, open_flags, err); @@ -1766,7 +1775,7 @@ int nfs_rmdir(struct inode *dir, struct dentry *dentry) trace_nfs_rmdir_enter(dir, dentry); if (d_really_is_positive(dentry)) { - nfs_wait_on_sillyrename(dentry); + down_write(&NFS_I(d_inode(dentry))->rmdir_sem); error = NFS_PROTO(dir)->rmdir(dir, &dentry->d_name); /* Ensure the VFS deletes this inode */ switch (error) { @@ -1776,6 +1785,7 @@ int nfs_rmdir(struct inode *dir, struct dentry *dentry) case -ENOENT: nfs_dentry_handle_enoent(dentry); } + up_write(&NFS_I(d_inode(dentry))->rmdir_sem); } else error = NFS_PROTO(dir)->rmdir(dir, &dentry->d_name); trace_nfs_rmdir_exit(dir, dentry, error); diff --git a/fs/nfs/direct.c b/fs/nfs/direct.c index c93826e4a8c6..741a92c470bb 100644 --- a/fs/nfs/direct.c +++ b/fs/nfs/direct.c @@ -250,7 +250,7 @@ static int nfs_direct_cmp_commit_data_verf(struct nfs_direct_req *dreq, * shunt off direct read and write requests before the VFS gets them, * so this method is only ever called for swap. */ -ssize_t nfs_direct_IO(struct kiocb *iocb, struct iov_iter *iter, loff_t pos) +ssize_t nfs_direct_IO(struct kiocb *iocb, struct iov_iter *iter) { struct inode *inode = iocb->ki_filp->f_mapping->host; @@ -261,7 +261,7 @@ ssize_t nfs_direct_IO(struct kiocb *iocb, struct iov_iter *iter, loff_t pos) VM_BUG_ON(iov_iter_count(iter) != PAGE_SIZE); if (iov_iter_rw(iter) == READ) - return nfs_file_direct_read(iocb, iter, pos); + return nfs_file_direct_read(iocb, iter); return nfs_file_direct_write(iocb, iter); } @@ -396,7 +396,7 @@ static void nfs_direct_complete(struct nfs_direct_req *dreq, bool write) static void nfs_direct_readpage_release(struct nfs_page *req) { dprintk("NFS: direct read done (%s/%llu %d@%lld)\n", - d_inode(req->wb_context->dentry)->i_sb->s_id, + req->wb_context->dentry->d_sb->s_id, (unsigned long long)NFS_FILEID(d_inode(req->wb_context->dentry)), req->wb_bytes, (long long)req_offset(req)); @@ -545,7 +545,6 @@ static ssize_t nfs_direct_read_schedule_iovec(struct nfs_direct_req *dreq, * nfs_file_direct_read - file direct read operation for NFS files * @iocb: target I/O control block * @iter: vector of user buffers into which to read data - * @pos: byte offset in file where reading starts * * We use this function for direct reads instead of calling * generic_file_aio_read() in order to avoid gfar's check to see if @@ -561,8 +560,7 @@ static ssize_t nfs_direct_read_schedule_iovec(struct nfs_direct_req *dreq, * client must read the updated atime from the server back into its * cache. */ -ssize_t nfs_file_direct_read(struct kiocb *iocb, struct iov_iter *iter, - loff_t pos) +ssize_t nfs_file_direct_read(struct kiocb *iocb, struct iov_iter *iter) { struct file *file = iocb->ki_filp; struct address_space *mapping = file->f_mapping; @@ -574,7 +572,7 @@ ssize_t nfs_file_direct_read(struct kiocb *iocb, struct iov_iter *iter, nfs_add_stats(mapping->host, NFSIOS_DIRECTREADBYTES, count); dfprintk(FILE, "NFS: direct read(%pD2, %zd@%Ld)\n", - file, count, (long long) pos); + file, count, (long long) iocb->ki_pos); result = 0; if (!count) @@ -594,7 +592,7 @@ ssize_t nfs_file_direct_read(struct kiocb *iocb, struct iov_iter *iter, dreq->inode = inode; dreq->bytes_left = count; - dreq->io_start = pos; + dreq->io_start = iocb->ki_pos; dreq->ctx = get_nfs_open_context(nfs_file_open_context(iocb->ki_filp)); l_ctx = nfs_get_lock_context(dreq->ctx); if (IS_ERR(l_ctx)) { @@ -606,14 +604,14 @@ ssize_t nfs_file_direct_read(struct kiocb *iocb, struct iov_iter *iter, dreq->iocb = iocb; NFS_I(inode)->read_io += count; - result = nfs_direct_read_schedule_iovec(dreq, iter, pos); + result = nfs_direct_read_schedule_iovec(dreq, iter, iocb->ki_pos); inode_unlock(inode); if (!result) { result = nfs_direct_wait(dreq); if (result > 0) - iocb->ki_pos = pos + result; + iocb->ki_pos += result; } nfs_direct_req_release(dreq); @@ -969,7 +967,6 @@ static ssize_t nfs_direct_write_schedule_iovec(struct nfs_direct_req *dreq, * nfs_file_direct_write - file direct write operation for NFS files * @iocb: target I/O control block * @iter: vector of user buffers from which to write data - * @pos: byte offset in file where writing starts * * We use this function for direct writes instead of calling * generic_file_aio_write() in order to avoid taking the inode @@ -1057,7 +1054,9 @@ ssize_t nfs_file_direct_write(struct kiocb *iocb, struct iov_iter *iter) if (i_size_read(inode) < iocb->ki_pos) i_size_write(inode, iocb->ki_pos); spin_unlock(&inode->i_lock); - generic_write_sync(file, pos, result); + + /* XXX: should check the generic_write_sync retval */ + generic_write_sync(iocb, result); } } nfs_direct_req_release(dreq); diff --git a/fs/nfs/file.c b/fs/nfs/file.c index be01095b97ae..717a8d6af52d 100644 --- a/fs/nfs/file.c +++ b/fs/nfs/file.c @@ -164,7 +164,7 @@ nfs_file_read(struct kiocb *iocb, struct iov_iter *to) ssize_t result; if (iocb->ki_flags & IOCB_DIRECT) - return nfs_file_direct_read(iocb, to, iocb->ki_pos); + return nfs_file_direct_read(iocb, to); dprintk("NFS: read(%pD2, %zu@%lu)\n", iocb->ki_filp, diff --git a/fs/nfs/inode.c b/fs/nfs/inode.c index 738c84a42eb0..52e7d6869e3b 100644 --- a/fs/nfs/inode.c +++ b/fs/nfs/inode.c @@ -1958,9 +1958,7 @@ static void init_once(void *foo) nfsi->nrequests = 0; nfsi->commit_info.ncommit = 0; atomic_set(&nfsi->commit_info.rpcs_out, 0); - atomic_set(&nfsi->silly_count, 1); - INIT_HLIST_HEAD(&nfsi->silly_list); - init_waitqueue_head(&nfsi->waitqueue); + init_rwsem(&nfsi->rmdir_sem); nfs4_init_once(nfsi); } diff --git a/fs/nfs/nfs3acl.c b/fs/nfs/nfs3acl.c index 17c0fa1eccfa..720d92f5abfb 100644 --- a/fs/nfs/nfs3acl.c +++ b/fs/nfs/nfs3acl.c @@ -11,6 +11,38 @@ #define NFSDBG_FACILITY NFSDBG_PROC +/* + * nfs3_prepare_get_acl, nfs3_complete_get_acl, nfs3_abort_get_acl: Helpers for + * caching get_acl results in a race-free way. See fs/posix_acl.c:get_acl() + * for explanations. + */ +static void nfs3_prepare_get_acl(struct posix_acl **p) +{ + struct posix_acl *sentinel = uncached_acl_sentinel(current); + + if (cmpxchg(p, ACL_NOT_CACHED, sentinel) != ACL_NOT_CACHED) { + /* Not the first reader or sentinel already in place. */ + } +} + +static void nfs3_complete_get_acl(struct posix_acl **p, struct posix_acl *acl) +{ + struct posix_acl *sentinel = uncached_acl_sentinel(current); + + /* Only cache the ACL if our sentinel is still in place. */ + posix_acl_dup(acl); + if (cmpxchg(p, sentinel, acl) != sentinel) + posix_acl_release(acl); +} + +static void nfs3_abort_get_acl(struct posix_acl **p) +{ + struct posix_acl *sentinel = uncached_acl_sentinel(current); + + /* Remove our sentinel upon failure. */ + cmpxchg(p, sentinel, ACL_NOT_CACHED); +} + struct posix_acl *nfs3_get_acl(struct inode *inode, int type) { struct nfs_server *server = NFS_SERVER(inode); @@ -55,6 +87,11 @@ struct posix_acl *nfs3_get_acl(struct inode *inode, int type) if (res.fattr == NULL) return ERR_PTR(-ENOMEM); + if (args.mask & NFS_ACL) + nfs3_prepare_get_acl(&inode->i_acl); + if (args.mask & NFS_DFACL) + nfs3_prepare_get_acl(&inode->i_default_acl); + status = rpc_call_sync(server->client_acl, &msg, 0); dprintk("NFS reply getacl: %d\n", status); @@ -89,12 +126,12 @@ struct posix_acl *nfs3_get_acl(struct inode *inode, int type) } if (res.mask & NFS_ACL) - set_cached_acl(inode, ACL_TYPE_ACCESS, res.acl_access); + nfs3_complete_get_acl(&inode->i_acl, res.acl_access); else forget_cached_acl(inode, ACL_TYPE_ACCESS); if (res.mask & NFS_DFACL) - set_cached_acl(inode, ACL_TYPE_DEFAULT, res.acl_default); + nfs3_complete_get_acl(&inode->i_default_acl, res.acl_default); else forget_cached_acl(inode, ACL_TYPE_DEFAULT); @@ -108,6 +145,8 @@ struct posix_acl *nfs3_get_acl(struct inode *inode, int type) } getout: + nfs3_abort_get_acl(&inode->i_acl); + nfs3_abort_get_acl(&inode->i_default_acl); posix_acl_release(res.acl_access); posix_acl_release(res.acl_default); nfs_free_fattr(res.fattr); diff --git a/fs/nfs/nfs4idmap.c b/fs/nfs/nfs4idmap.c index 5ba22c6b0ffa..c444285bb1b1 100644 --- a/fs/nfs/nfs4idmap.c +++ b/fs/nfs/nfs4idmap.c @@ -201,7 +201,7 @@ int nfs_idmap_init(void) GLOBAL_ROOT_UID, GLOBAL_ROOT_GID, cred, (KEY_POS_ALL & ~KEY_POS_SETATTR) | KEY_USR_VIEW | KEY_USR_READ, - KEY_ALLOC_NOT_IN_QUOTA, NULL); + KEY_ALLOC_NOT_IN_QUOTA, NULL, NULL); if (IS_ERR(keyring)) { ret = PTR_ERR(keyring); goto failed_put_cred; diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c index 327b8c34d360..084e8570da18 100644 --- a/fs/nfs/nfs4proc.c +++ b/fs/nfs/nfs4proc.c @@ -3777,7 +3777,7 @@ static void nfs4_proc_unlink_setup(struct rpc_message *msg, struct inode *dir) static void nfs4_proc_unlink_rpc_prepare(struct rpc_task *task, struct nfs_unlinkdata *data) { - nfs4_setup_sequence(NFS_SERVER(data->dir), + nfs4_setup_sequence(NFS_SB(data->dentry->d_sb), &data->args.seq_args, &data->res.seq_res, task); @@ -6263,10 +6263,10 @@ static int nfs4_xattr_set_nfs4_acl(const struct xattr_handler *handler, } static int nfs4_xattr_get_nfs4_acl(const struct xattr_handler *handler, - struct dentry *dentry, const char *key, - void *buf, size_t buflen) + struct dentry *unused, struct inode *inode, + const char *key, void *buf, size_t buflen) { - return nfs4_proc_get_acl(d_inode(dentry), buf, buflen); + return nfs4_proc_get_acl(inode, buf, buflen); } static bool nfs4_xattr_list_nfs4_acl(struct dentry *dentry) @@ -6288,11 +6288,11 @@ static int nfs4_xattr_set_nfs4_label(const struct xattr_handler *handler, } static int nfs4_xattr_get_nfs4_label(const struct xattr_handler *handler, - struct dentry *dentry, const char *key, - void *buf, size_t buflen) + struct dentry *unused, struct inode *inode, + const char *key, void *buf, size_t buflen) { if (security_ismaclabel(key)) - return nfs4_get_security_label(d_inode(dentry), buf, buflen); + return nfs4_get_security_label(inode, buf, buflen); return -EOPNOTSUPP; } diff --git a/fs/nfs/nfstrace.h b/fs/nfs/nfstrace.h index 9f80a086b612..0b9e5cc9a747 100644 --- a/fs/nfs/nfstrace.h +++ b/fs/nfs/nfstrace.h @@ -702,7 +702,7 @@ TRACE_EVENT(nfs_sillyrename_unlink, ), TP_fast_assign( - struct inode *dir = data->dir; + struct inode *dir = d_inode(data->dentry->d_parent); size_t len = data->args.name.len; __entry->dev = dir->i_sb->s_dev; __entry->dir = NFS_FILEID(dir); diff --git a/fs/nfs/unlink.c b/fs/nfs/unlink.c index fa538b2ba251..1868246f56e6 100644 --- a/fs/nfs/unlink.c +++ b/fs/nfs/unlink.c @@ -30,45 +30,11 @@ static void nfs_free_unlinkdata(struct nfs_unlinkdata *data) { - iput(data->dir); put_rpccred(data->cred); kfree(data->args.name.name); kfree(data); } -#define NAME_ALLOC_LEN(len) ((len+16) & ~15) -/** - * nfs_copy_dname - copy dentry name to data structure - * @dentry: pointer to dentry - * @data: nfs_unlinkdata - */ -static int nfs_copy_dname(struct dentry *dentry, struct nfs_unlinkdata *data) -{ - char *str; - int len = dentry->d_name.len; - - str = kmemdup(dentry->d_name.name, NAME_ALLOC_LEN(len), GFP_KERNEL); - if (!str) - return -ENOMEM; - data->args.name.len = len; - data->args.name.name = str; - return 0; -} - -static void nfs_free_dname(struct nfs_unlinkdata *data) -{ - kfree(data->args.name.name); - data->args.name.name = NULL; - data->args.name.len = 0; -} - -static void nfs_dec_sillycount(struct inode *dir) -{ - struct nfs_inode *nfsi = NFS_I(dir); - if (atomic_dec_return(&nfsi->silly_count) == 1) - wake_up(&nfsi->waitqueue); -} - /** * nfs_async_unlink_done - Sillydelete post-processing * @task: rpc_task of the sillydelete @@ -78,7 +44,7 @@ static void nfs_dec_sillycount(struct inode *dir) static void nfs_async_unlink_done(struct rpc_task *task, void *calldata) { struct nfs_unlinkdata *data = calldata; - struct inode *dir = data->dir; + struct inode *dir = d_inode(data->dentry->d_parent); trace_nfs_sillyrename_unlink(data, task->tk_status); if (!NFS_PROTO(dir)->unlink_done(task, dir)) @@ -95,17 +61,21 @@ static void nfs_async_unlink_done(struct rpc_task *task, void *calldata) static void nfs_async_unlink_release(void *calldata) { struct nfs_unlinkdata *data = calldata; - struct super_block *sb = data->dir->i_sb; + struct dentry *dentry = data->dentry; + struct super_block *sb = dentry->d_sb; - nfs_dec_sillycount(data->dir); + up_read_non_owner(&NFS_I(d_inode(dentry->d_parent))->rmdir_sem); + d_lookup_done(dentry); nfs_free_unlinkdata(data); + dput(dentry); nfs_sb_deactive(sb); } static void nfs_unlink_prepare(struct rpc_task *task, void *calldata) { struct nfs_unlinkdata *data = calldata; - NFS_PROTO(data->dir)->unlink_rpc_prepare(task, data); + struct inode *dir = d_inode(data->dentry->d_parent); + NFS_PROTO(dir)->unlink_rpc_prepare(task, data); } static const struct rpc_call_ops nfs_unlink_ops = { @@ -114,7 +84,7 @@ static const struct rpc_call_ops nfs_unlink_ops = { .rpc_call_prepare = nfs_unlink_prepare, }; -static int nfs_do_call_unlink(struct dentry *parent, struct inode *dir, struct nfs_unlinkdata *data) +static void nfs_do_call_unlink(struct nfs_unlinkdata *data) { struct rpc_message msg = { .rpc_argp = &data->args, @@ -129,10 +99,31 @@ static int nfs_do_call_unlink(struct dentry *parent, struct inode *dir, struct n .flags = RPC_TASK_ASYNC, }; struct rpc_task *task; + struct inode *dir = d_inode(data->dentry->d_parent); + nfs_sb_active(dir->i_sb); + data->args.fh = NFS_FH(dir); + nfs_fattr_init(data->res.dir_attr); + + NFS_PROTO(dir)->unlink_setup(&msg, dir); + + task_setup_data.rpc_client = NFS_CLIENT(dir); + task = rpc_run_task(&task_setup_data); + if (!IS_ERR(task)) + rpc_put_task_async(task); +} + +static int nfs_call_unlink(struct dentry *dentry, struct nfs_unlinkdata *data) +{ + struct inode *dir = d_inode(dentry->d_parent); struct dentry *alias; - alias = d_lookup(parent, &data->args.name); - if (alias != NULL) { + down_read_non_owner(&NFS_I(dir)->rmdir_sem); + alias = d_alloc_parallel(dentry->d_parent, &data->args.name, &data->wq); + if (IS_ERR(alias)) { + up_read_non_owner(&NFS_I(dir)->rmdir_sem); + return 0; + } + if (!d_in_lookup(alias)) { int ret; void *devname_garbage = NULL; @@ -140,10 +131,8 @@ static int nfs_do_call_unlink(struct dentry *parent, struct inode *dir, struct n * Hey, we raced with lookup... See if we need to transfer * the sillyrename information to the aliased dentry. */ - nfs_free_dname(data); - ret = nfs_copy_dname(alias, data); spin_lock(&alias->d_lock); - if (ret == 0 && d_really_is_positive(alias) && + if (d_really_is_positive(alias) && !(alias->d_flags & DCACHE_NFSFS_RENAMED)) { devname_garbage = alias->d_fsdata; alias->d_fsdata = data; @@ -152,8 +141,8 @@ static int nfs_do_call_unlink(struct dentry *parent, struct inode *dir, struct n } else ret = 0; spin_unlock(&alias->d_lock); - nfs_dec_sillycount(dir); dput(alias); + up_read_non_owner(&NFS_I(dir)->rmdir_sem); /* * If we'd displaced old cached devname, free it. At that * point dentry is definitely not a root, so we won't need @@ -162,94 +151,18 @@ static int nfs_do_call_unlink(struct dentry *parent, struct inode *dir, struct n kfree(devname_garbage); return ret; } - data->dir = igrab(dir); - if (!data->dir) { - nfs_dec_sillycount(dir); - return 0; - } - nfs_sb_active(dir->i_sb); - data->args.fh = NFS_FH(dir); - nfs_fattr_init(data->res.dir_attr); - - NFS_PROTO(dir)->unlink_setup(&msg, dir); - - task_setup_data.rpc_client = NFS_CLIENT(dir); - task = rpc_run_task(&task_setup_data); - if (!IS_ERR(task)) - rpc_put_task_async(task); + data->dentry = alias; + nfs_do_call_unlink(data); return 1; } -static int nfs_call_unlink(struct dentry *dentry, struct nfs_unlinkdata *data) -{ - struct dentry *parent; - struct inode *dir; - int ret = 0; - - - parent = dget_parent(dentry); - if (parent == NULL) - goto out_free; - dir = d_inode(parent); - /* Non-exclusive lock protects against concurrent lookup() calls */ - spin_lock(&dir->i_lock); - if (atomic_inc_not_zero(&NFS_I(dir)->silly_count) == 0) { - /* Deferred delete */ - hlist_add_head(&data->list, &NFS_I(dir)->silly_list); - spin_unlock(&dir->i_lock); - ret = 1; - goto out_dput; - } - spin_unlock(&dir->i_lock); - ret = nfs_do_call_unlink(parent, dir, data); -out_dput: - dput(parent); -out_free: - return ret; -} - -void nfs_wait_on_sillyrename(struct dentry *dentry) -{ - struct nfs_inode *nfsi = NFS_I(d_inode(dentry)); - - wait_event(nfsi->waitqueue, atomic_read(&nfsi->silly_count) <= 1); -} - -void nfs_block_sillyrename(struct dentry *dentry) -{ - struct nfs_inode *nfsi = NFS_I(d_inode(dentry)); - - wait_event(nfsi->waitqueue, atomic_cmpxchg(&nfsi->silly_count, 1, 0) == 1); -} - -void nfs_unblock_sillyrename(struct dentry *dentry) -{ - struct inode *dir = d_inode(dentry); - struct nfs_inode *nfsi = NFS_I(dir); - struct nfs_unlinkdata *data; - - atomic_inc(&nfsi->silly_count); - spin_lock(&dir->i_lock); - while (!hlist_empty(&nfsi->silly_list)) { - if (!atomic_inc_not_zero(&nfsi->silly_count)) - break; - data = hlist_entry(nfsi->silly_list.first, struct nfs_unlinkdata, list); - hlist_del(&data->list); - spin_unlock(&dir->i_lock); - if (nfs_do_call_unlink(dentry, dir, data) == 0) - nfs_free_unlinkdata(data); - spin_lock(&dir->i_lock); - } - spin_unlock(&dir->i_lock); -} - /** * nfs_async_unlink - asynchronous unlinking of a file * @dir: parent directory of dentry * @dentry: dentry to unlink */ static int -nfs_async_unlink(struct inode *dir, struct dentry *dentry) +nfs_async_unlink(struct dentry *dentry, struct qstr *name) { struct nfs_unlinkdata *data; int status = -ENOMEM; @@ -258,13 +171,18 @@ nfs_async_unlink(struct inode *dir, struct dentry *dentry) data = kzalloc(sizeof(*data), GFP_KERNEL); if (data == NULL) goto out; + data->args.name.name = kstrdup(name->name, GFP_KERNEL); + if (!data->args.name.name) + goto out_free; + data->args.name.len = name->len; data->cred = rpc_lookup_cred(); if (IS_ERR(data->cred)) { status = PTR_ERR(data->cred); - goto out_free; + goto out_free_name; } data->res.dir_attr = &data->dir_attr; + init_waitqueue_head(&data->wq); status = -EBUSY; spin_lock(&dentry->d_lock); @@ -284,6 +202,8 @@ nfs_async_unlink(struct inode *dir, struct dentry *dentry) out_unlock: spin_unlock(&dentry->d_lock); put_rpccred(data->cred); +out_free_name: + kfree(data->args.name.name); out_free: kfree(data); out: @@ -302,17 +222,15 @@ out: void nfs_complete_unlink(struct dentry *dentry, struct inode *inode) { - struct nfs_unlinkdata *data = NULL; + struct nfs_unlinkdata *data; spin_lock(&dentry->d_lock); - if (dentry->d_flags & DCACHE_NFSFS_RENAMED) { - dentry->d_flags &= ~DCACHE_NFSFS_RENAMED; - data = dentry->d_fsdata; - dentry->d_fsdata = NULL; - } + dentry->d_flags &= ~DCACHE_NFSFS_RENAMED; + data = dentry->d_fsdata; + dentry->d_fsdata = NULL; spin_unlock(&dentry->d_lock); - if (data != NULL && (NFS_STALE(inode) || !nfs_call_unlink(dentry, data))) + if (NFS_STALE(inode) || !nfs_call_unlink(dentry, data)) nfs_free_unlinkdata(data); } @@ -559,18 +477,10 @@ nfs_sillyrename(struct inode *dir, struct dentry *dentry) /* queue unlink first. Can't do this from rpc_release as it * has to allocate memory */ - error = nfs_async_unlink(dir, dentry); + error = nfs_async_unlink(dentry, &sdentry->d_name); if (error) goto out_dput; - /* populate unlinkdata with the right dname */ - error = nfs_copy_dname(sdentry, - (struct nfs_unlinkdata *)dentry->d_fsdata); - if (error) { - nfs_cancel_async_unlink(dentry); - goto out_dput; - } - /* run the rename task, undo unlink if it fails */ task = nfs_async_rename(dir, dir, dentry, sdentry, nfs_complete_sillyrename); diff --git a/fs/nfsd/nfs3proc.c b/fs/nfsd/nfs3proc.c index 51c3b06e8036..d818e4ffd79f 100644 --- a/fs/nfsd/nfs3proc.c +++ b/fs/nfsd/nfs3proc.c @@ -552,7 +552,7 @@ nfsd3_proc_fsinfo(struct svc_rqst * rqstp, struct nfsd_fhandle *argp, * different read/write sizes for file systems known to have * problems with large blocks */ if (nfserr == 0) { - struct super_block *sb = d_inode(argp->fh.fh_dentry)->i_sb; + struct super_block *sb = argp->fh.fh_dentry->d_sb; /* Note that we don't care for remote fs's here */ if (sb->s_magic == MSDOS_SUPER_MAGIC) { @@ -588,7 +588,7 @@ nfsd3_proc_pathconf(struct svc_rqst * rqstp, struct nfsd_fhandle *argp, nfserr = fh_verify(rqstp, &argp->fh, 0, NFSD_MAY_NOP); if (nfserr == 0) { - struct super_block *sb = d_inode(argp->fh.fh_dentry)->i_sb; + struct super_block *sb = argp->fh.fh_dentry->d_sb; /* Note that we don't care for remote fs's here */ switch (sb->s_magic) { diff --git a/fs/nfsd/nfs3xdr.c b/fs/nfsd/nfs3xdr.c index 2246454dec76..93d5853f8c99 100644 --- a/fs/nfsd/nfs3xdr.c +++ b/fs/nfsd/nfs3xdr.c @@ -146,7 +146,7 @@ static __be32 *encode_fsid(__be32 *p, struct svc_fh *fhp) default: case FSIDSOURCE_DEV: p = xdr_encode_hyper(p, (u64)huge_encode_dev - (d_inode(fhp->fh_dentry)->i_sb->s_dev)); + (fhp->fh_dentry->d_sb->s_dev)); break; case FSIDSOURCE_FSID: p = xdr_encode_hyper(p, (u64) fhp->fh_export->ex_fsid); diff --git a/fs/nfsd/nfsfh.c b/fs/nfsd/nfsfh.c index c1681ce894c5..a8919444c460 100644 --- a/fs/nfsd/nfsfh.c +++ b/fs/nfsd/nfsfh.c @@ -426,7 +426,7 @@ static bool is_root_export(struct svc_export *exp) static struct super_block *exp_sb(struct svc_export *exp) { - return d_inode(exp->ex_path.dentry)->i_sb; + return exp->ex_path.dentry->d_sb; } static bool fsid_type_ok_for_exp(u8 fsid_type, struct svc_export *exp) diff --git a/fs/nfsd/vfs.c b/fs/nfsd/vfs.c index d40010e4f1a9..6fbd81ecb410 100644 --- a/fs/nfsd/vfs.c +++ b/fs/nfsd/vfs.c @@ -935,8 +935,8 @@ nfsd_vfs_write(struct svc_rqst *rqstp, struct svc_fh *fhp, struct file *file, int stable = *stablep; int use_wgather; loff_t pos = offset; - loff_t end = LLONG_MAX; unsigned int pflags = current->flags; + int flags = 0; if (test_bit(RQ_LOCAL, &rqstp->rq_flags)) /* @@ -955,9 +955,12 @@ nfsd_vfs_write(struct svc_rqst *rqstp, struct svc_fh *fhp, struct file *file, if (!EX_ISSYNC(exp)) stable = 0; + if (stable && !use_wgather) + flags |= RWF_SYNC; + /* Write the data. */ oldfs = get_fs(); set_fs(KERNEL_DS); - host_err = vfs_writev(file, (struct iovec __user *)vec, vlen, &pos, 0); + host_err = vfs_writev(file, (struct iovec __user *)vec, vlen, &pos, flags); set_fs(oldfs); if (host_err < 0) goto out_nfserr; @@ -965,15 +968,8 @@ nfsd_vfs_write(struct svc_rqst *rqstp, struct svc_fh *fhp, struct file *file, nfsdstats.io_write += host_err; fsnotify_modify(file); - if (stable) { - if (use_wgather) { - host_err = wait_for_concurrent_writes(file); - } else { - if (*cnt) - end = offset + *cnt - 1; - host_err = vfs_fsync_range(file, offset, end, 0); - } - } + if (stable && use_wgather) + host_err = wait_for_concurrent_writes(file); out_nfserr: dprintk("nfsd: write complete host_err=%d\n", host_err); diff --git a/fs/nilfs2/alloc.c b/fs/nilfs2/alloc.c index 2ccbf5531554..1a85d94f5b25 100644 --- a/fs/nilfs2/alloc.c +++ b/fs/nilfs2/alloc.c @@ -13,13 +13,8 @@ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA - * - * Original code was written by Koji Sato <koji@osrg.net>. - * Two allocators were unified by Ryusuke Konishi <ryusuke@osrg.net>, - * Amagai Yoshiji <amagai@osrg.net>. + * Originally written by Koji Sato. + * Two allocators were unified by Ryusuke Konishi and Amagai Yoshiji. */ #include <linux/types.h> @@ -58,7 +53,7 @@ nilfs_palloc_groups_count(const struct inode *inode) * @inode: inode of metadata file using this allocator * @entry_size: size of the persistent object */ -int nilfs_palloc_init_blockgroup(struct inode *inode, unsigned entry_size) +int nilfs_palloc_init_blockgroup(struct inode *inode, unsigned int entry_size) { struct nilfs_mdt_info *mi = NILFS_MDT(inode); @@ -73,13 +68,17 @@ int nilfs_palloc_init_blockgroup(struct inode *inode, unsigned entry_size) mi->mi_blocks_per_group = DIV_ROUND_UP(nilfs_palloc_entries_per_group(inode), mi->mi_entries_per_block) + 1; - /* Number of blocks in a group including entry blocks and - a bitmap block */ + /* + * Number of blocks in a group including entry blocks + * and a bitmap block + */ mi->mi_blocks_per_desc_block = nilfs_palloc_groups_per_desc_block(inode) * mi->mi_blocks_per_group + 1; - /* Number of blocks per descriptor including the - descriptor block */ + /* + * Number of blocks per descriptor including the + * descriptor block + */ return 0; } @@ -389,7 +388,7 @@ void *nilfs_palloc_block_get_entry(const struct inode *inode, __u64 nr, */ static int nilfs_palloc_find_available_slot(unsigned char *bitmap, unsigned long target, - unsigned bsize, + unsigned int bsize, spinlock_t *lock) { int pos, end = bsize; @@ -624,7 +623,7 @@ void nilfs_palloc_commit_free_entry(struct inode *inode, if (!nilfs_clear_bit_atomic(lock, group_offset, bitmap)) nilfs_warning(inode->i_sb, __func__, - "entry number %llu already freed: ino=%lu\n", + "entry number %llu already freed: ino=%lu", (unsigned long long)req->pr_entry_nr, (unsigned long)inode->i_ino); else @@ -665,7 +664,7 @@ void nilfs_palloc_abort_alloc_entry(struct inode *inode, if (!nilfs_clear_bit_atomic(lock, group_offset, bitmap)) nilfs_warning(inode->i_sb, __func__, - "entry number %llu already freed: ino=%lu\n", + "entry number %llu already freed: ino=%lu", (unsigned long long)req->pr_entry_nr, (unsigned long)inode->i_ino); else @@ -740,8 +739,8 @@ int nilfs_palloc_freev(struct inode *inode, __u64 *entry_nrs, size_t nitems) unsigned long group, group_offset; __u64 group_min_nr, last_nrs[8]; const unsigned long epg = nilfs_palloc_entries_per_group(inode); - const unsigned epb = NILFS_MDT(inode)->mi_entries_per_block; - unsigned entry_start, end, pos; + const unsigned int epb = NILFS_MDT(inode)->mi_entries_per_block; + unsigned int entry_start, end, pos; spinlock_t *lock; int i, j, k, ret; u32 nfree; @@ -774,7 +773,7 @@ int nilfs_palloc_freev(struct inode *inode, __u64 *entry_nrs, size_t nitems) if (!nilfs_clear_bit_atomic(lock, group_offset, bitmap)) { nilfs_warning(inode->i_sb, __func__, - "entry number %llu already freed: ino=%lu\n", + "entry number %llu already freed: ino=%lu", (unsigned long long)entry_nrs[j], (unsigned long)inode->i_ino); } else { @@ -819,7 +818,7 @@ int nilfs_palloc_freev(struct inode *inode, __u64 *entry_nrs, size_t nitems) last_nrs[k]); if (ret && ret != -ENOENT) { nilfs_warning(inode->i_sb, __func__, - "failed to delete block of entry %llu: ino=%lu, err=%d\n", + "failed to delete block of entry %llu: ino=%lu, err=%d", (unsigned long long)last_nrs[k], (unsigned long)inode->i_ino, ret); } @@ -838,7 +837,7 @@ int nilfs_palloc_freev(struct inode *inode, __u64 *entry_nrs, size_t nitems) ret = nilfs_palloc_delete_bitmap_block(inode, group); if (ret && ret != -ENOENT) { nilfs_warning(inode->i_sb, __func__, - "failed to delete bitmap block of group %lu: ino=%lu, err=%d\n", + "failed to delete bitmap block of group %lu: ino=%lu, err=%d", group, (unsigned long)inode->i_ino, ret); } diff --git a/fs/nilfs2/alloc.h b/fs/nilfs2/alloc.h index 6e6f49aa53df..05149e606a78 100644 --- a/fs/nilfs2/alloc.h +++ b/fs/nilfs2/alloc.h @@ -13,13 +13,8 @@ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA - * - * Original code was written by Koji Sato <koji@osrg.net>. - * Two allocators were unified by Ryusuke Konishi <ryusuke@osrg.net>, - * Amagai Yoshiji <amagai@osrg.net>. + * Originally written by Koji Sato. + * Two allocators were unified by Ryusuke Konishi and Amagai Yoshiji. */ #ifndef _NILFS_ALLOC_H @@ -42,7 +37,7 @@ nilfs_palloc_entries_per_group(const struct inode *inode) return 1UL << (inode->i_blkbits + 3 /* log2(8 = CHAR_BITS) */); } -int nilfs_palloc_init_blockgroup(struct inode *, unsigned); +int nilfs_palloc_init_blockgroup(struct inode *, unsigned int); int nilfs_palloc_get_entry_block(struct inode *, __u64, int, struct buffer_head **); void *nilfs_palloc_block_get_entry(const struct inode *, __u64, diff --git a/fs/nilfs2/bmap.c b/fs/nilfs2/bmap.c index a9fb3636c142..f2a7877e0c8c 100644 --- a/fs/nilfs2/bmap.c +++ b/fs/nilfs2/bmap.c @@ -13,11 +13,7 @@ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA - * - * Written by Koji Sato <koji@osrg.net>. + * Written by Koji Sato. */ #include <linux/fs.h> @@ -46,7 +42,7 @@ static int nilfs_bmap_convert_error(struct nilfs_bmap *bmap, if (err == -EINVAL) { nilfs_error(inode->i_sb, fname, - "broken bmap (inode number=%lu)\n", inode->i_ino); + "broken bmap (inode number=%lu)", inode->i_ino); err = -EIO; } return err; @@ -97,7 +93,7 @@ int nilfs_bmap_lookup_at_level(struct nilfs_bmap *bmap, __u64 key, int level, } int nilfs_bmap_lookup_contig(struct nilfs_bmap *bmap, __u64 key, __u64 *ptrp, - unsigned maxblocks) + unsigned int maxblocks) { int ret; diff --git a/fs/nilfs2/bmap.h b/fs/nilfs2/bmap.h index bfa817ce40b3..b6a4c8f93ac8 100644 --- a/fs/nilfs2/bmap.h +++ b/fs/nilfs2/bmap.h @@ -13,11 +13,7 @@ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA - * - * Written by Koji Sato <koji@osrg.net>. + * Written by Koji Sato. */ #ifndef _NILFS_BMAP_H @@ -61,7 +57,7 @@ struct nilfs_bmap_stats { struct nilfs_bmap_operations { int (*bop_lookup)(const struct nilfs_bmap *, __u64, int, __u64 *); int (*bop_lookup_contig)(const struct nilfs_bmap *, __u64, __u64 *, - unsigned); + unsigned int); int (*bop_insert)(struct nilfs_bmap *, __u64, __u64); int (*bop_delete)(struct nilfs_bmap *, __u64); void (*bop_clear)(struct nilfs_bmap *); @@ -126,10 +122,14 @@ struct nilfs_bmap { /* pointer type */ #define NILFS_BMAP_PTR_P 0 /* physical block number (i.e. LBN) */ -#define NILFS_BMAP_PTR_VS 1 /* virtual block number (single - version) */ -#define NILFS_BMAP_PTR_VM 2 /* virtual block number (has multiple - versions) */ +#define NILFS_BMAP_PTR_VS 1 /* + * virtual block number (single + * version) + */ +#define NILFS_BMAP_PTR_VM 2 /* + * virtual block number (has multiple + * versions) + */ #define NILFS_BMAP_PTR_U (-1) /* never perform pointer operations */ #define NILFS_BMAP_USE_VBN(bmap) ((bmap)->b_ptr_type > 0) @@ -154,7 +154,7 @@ struct nilfs_bmap_store { int nilfs_bmap_test_and_clear_dirty(struct nilfs_bmap *); int nilfs_bmap_read(struct nilfs_bmap *, struct nilfs_inode *); void nilfs_bmap_write(struct nilfs_bmap *, struct nilfs_inode *); -int nilfs_bmap_lookup_contig(struct nilfs_bmap *, __u64, __u64 *, unsigned); +int nilfs_bmap_lookup_contig(struct nilfs_bmap *, __u64, __u64 *, unsigned int); int nilfs_bmap_insert(struct nilfs_bmap *bmap, __u64 key, unsigned long rec); int nilfs_bmap_delete(struct nilfs_bmap *bmap, __u64 key); int nilfs_bmap_seek_key(struct nilfs_bmap *bmap, __u64 start, __u64 *keyp); diff --git a/fs/nilfs2/btnode.c b/fs/nilfs2/btnode.c index e0c9daf9aa22..0576033699bc 100644 --- a/fs/nilfs2/btnode.c +++ b/fs/nilfs2/btnode.c @@ -13,13 +13,8 @@ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA - * - * This file was originally written by Seiji Kihara <kihara@osrg.net> - * and fully revised by Ryusuke Konishi <ryusuke@osrg.net> for - * stabilization and simplification. + * Originally written by Seiji Kihara. + * Fully revised by Ryusuke Konishi for stabilization and simplification. * */ diff --git a/fs/nilfs2/btnode.h b/fs/nilfs2/btnode.h index d876b565ce64..2cc1b80e18f7 100644 --- a/fs/nilfs2/btnode.h +++ b/fs/nilfs2/btnode.h @@ -13,12 +13,8 @@ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA - * - * Written by Seiji Kihara <kihara@osrg.net> - * Revised by Ryusuke Konishi <ryusuke@osrg.net> + * Written by Seiji Kihara. + * Revised by Ryusuke Konishi. */ #ifndef _NILFS_BTNODE_H diff --git a/fs/nilfs2/btree.c b/fs/nilfs2/btree.c index 3a3821b00486..eccb1c89ccbb 100644 --- a/fs/nilfs2/btree.c +++ b/fs/nilfs2/btree.c @@ -13,11 +13,7 @@ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA - * - * Written by Koji Sato <koji@osrg.net>. + * Written by Koji Sato. */ #include <linux/slab.h> @@ -689,7 +685,8 @@ static int nilfs_btree_lookup(const struct nilfs_bmap *btree, } static int nilfs_btree_lookup_contig(const struct nilfs_bmap *btree, - __u64 key, __u64 *ptrp, unsigned maxblocks) + __u64 key, __u64 *ptrp, + unsigned int maxblocks) { struct nilfs_btree_path *path; struct nilfs_btree_node *node; @@ -1032,12 +1029,12 @@ static __u64 nilfs_btree_find_target_v(const struct nilfs_bmap *btree, if (ptr != NILFS_BMAP_INVALID_PTR) /* sequential access */ return ptr; - else { - ptr = nilfs_btree_find_near(btree, path); - if (ptr != NILFS_BMAP_INVALID_PTR) - /* near */ - return ptr; - } + + ptr = nilfs_btree_find_near(btree, path); + if (ptr != NILFS_BMAP_INVALID_PTR) + /* near */ + return ptr; + /* block group */ return nilfs_bmap_find_target_in_group(btree); } diff --git a/fs/nilfs2/btree.h b/fs/nilfs2/btree.h index 22c02e35b6ef..df1a25faa83b 100644 --- a/fs/nilfs2/btree.h +++ b/fs/nilfs2/btree.h @@ -13,11 +13,7 @@ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA - * - * Written by Koji Sato <koji@osrg.net>. + * Written by Koji Sato. */ #ifndef _NILFS_BTREE_H diff --git a/fs/nilfs2/cpfile.c b/fs/nilfs2/cpfile.c index b6596cab9e99..8a3d3b65af3f 100644 --- a/fs/nilfs2/cpfile.c +++ b/fs/nilfs2/cpfile.c @@ -13,11 +13,7 @@ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA - * - * Written by Koji Sato <koji@osrg.net>. + * Written by Koji Sato. */ #include <linux/kernel.h> @@ -41,6 +37,7 @@ static unsigned long nilfs_cpfile_get_blkoff(const struct inode *cpfile, __u64 cno) { __u64 tcno = cno + NILFS_MDT(cpfile)->mi_first_entry_offset - 1; + do_div(tcno, nilfs_cpfile_checkpoints_per_block(cpfile)); return (unsigned long)tcno; } @@ -50,6 +47,7 @@ static unsigned long nilfs_cpfile_get_offset(const struct inode *cpfile, __u64 cno) { __u64 tcno = cno + NILFS_MDT(cpfile)->mi_first_entry_offset - 1; + return do_div(tcno, nilfs_cpfile_checkpoints_per_block(cpfile)); } @@ -433,7 +431,8 @@ static void nilfs_cpfile_checkpoint_to_cpinfo(struct inode *cpfile, } static ssize_t nilfs_cpfile_do_get_cpinfo(struct inode *cpfile, __u64 *cnop, - void *buf, unsigned cisz, size_t nci) + void *buf, unsigned int cisz, + size_t nci) { struct nilfs_checkpoint *cp; struct nilfs_cpinfo *ci = buf; @@ -484,7 +483,8 @@ static ssize_t nilfs_cpfile_do_get_cpinfo(struct inode *cpfile, __u64 *cnop, } static ssize_t nilfs_cpfile_do_get_ssinfo(struct inode *cpfile, __u64 *cnop, - void *buf, unsigned cisz, size_t nci) + void *buf, unsigned int cisz, + size_t nci) { struct buffer_head *bh; struct nilfs_cpfile_header *header; @@ -570,7 +570,7 @@ static ssize_t nilfs_cpfile_do_get_ssinfo(struct inode *cpfile, __u64 *cnop, */ ssize_t nilfs_cpfile_get_cpinfo(struct inode *cpfile, __u64 *cnop, int mode, - void *buf, unsigned cisz, size_t nci) + void *buf, unsigned int cisz, size_t nci) { switch (mode) { case NILFS_CHECKPOINT: @@ -870,8 +870,10 @@ int nilfs_cpfile_is_snapshot(struct inode *cpfile, __u64 cno) void *kaddr; int ret; - /* CP number is invalid if it's zero or larger than the - largest exist one.*/ + /* + * CP number is invalid if it's zero or larger than the + * largest existing one. + */ if (cno == 0 || cno >= nilfs_mdt_cno(cpfile)) return -ENOENT; down_read(&NILFS_MDT(cpfile)->mi_sem); diff --git a/fs/nilfs2/cpfile.h b/fs/nilfs2/cpfile.h index a242b9a314f9..0249744ae234 100644 --- a/fs/nilfs2/cpfile.h +++ b/fs/nilfs2/cpfile.h @@ -13,11 +13,7 @@ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA - * - * Written by Koji Sato <koji@osrg.net>. + * Written by Koji Sato. */ #ifndef _NILFS_CPFILE_H @@ -37,8 +33,8 @@ int nilfs_cpfile_delete_checkpoint(struct inode *, __u64); int nilfs_cpfile_change_cpmode(struct inode *, __u64, int); int nilfs_cpfile_is_snapshot(struct inode *, __u64); int nilfs_cpfile_get_stat(struct inode *, struct nilfs_cpstat *); -ssize_t nilfs_cpfile_get_cpinfo(struct inode *, __u64 *, int, void *, unsigned, - size_t); +ssize_t nilfs_cpfile_get_cpinfo(struct inode *, __u64 *, int, void *, + unsigned int, size_t); int nilfs_cpfile_read(struct super_block *sb, size_t cpsize, struct nilfs_inode *raw_inode, struct inode **inodep); diff --git a/fs/nilfs2/dat.c b/fs/nilfs2/dat.c index 7dc23f100e57..7367610ea807 100644 --- a/fs/nilfs2/dat.c +++ b/fs/nilfs2/dat.c @@ -13,11 +13,7 @@ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA - * - * Written by Koji Sato <koji@osrg.net>. + * Written by Koji Sato. */ #include <linux/types.h> @@ -428,7 +424,7 @@ int nilfs_dat_translate(struct inode *dat, __u64 vblocknr, sector_t *blocknrp) return ret; } -ssize_t nilfs_dat_get_vinfo(struct inode *dat, void *buf, unsigned visz, +ssize_t nilfs_dat_get_vinfo(struct inode *dat, void *buf, unsigned int visz, size_t nvi) { struct buffer_head *entry_bh; diff --git a/fs/nilfs2/dat.h b/fs/nilfs2/dat.h index cbd8e9732503..abbfdabcabea 100644 --- a/fs/nilfs2/dat.h +++ b/fs/nilfs2/dat.h @@ -13,11 +13,7 @@ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA - * - * Written by Koji Sato <koji@osrg.net>. + * Written by Koji Sato. */ #ifndef _NILFS_DAT_H @@ -51,7 +47,7 @@ void nilfs_dat_abort_update(struct inode *, struct nilfs_palloc_req *, int nilfs_dat_mark_dirty(struct inode *, __u64); int nilfs_dat_freev(struct inode *, __u64 *, size_t); int nilfs_dat_move(struct inode *, __u64, sector_t); -ssize_t nilfs_dat_get_vinfo(struct inode *, void *, unsigned, size_t); +ssize_t nilfs_dat_get_vinfo(struct inode *, void *, unsigned int, size_t); int nilfs_dat_read(struct super_block *sb, size_t entry_size, struct nilfs_inode *raw_inode, struct inode **inodep); diff --git a/fs/nilfs2/dir.c b/fs/nilfs2/dir.c index e08f064e4bd7..e506f4f7120a 100644 --- a/fs/nilfs2/dir.c +++ b/fs/nilfs2/dir.c @@ -13,11 +13,7 @@ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA - * - * Modified for NILFS by Amagai Yoshiji <amagai@osrg.net> + * Modified for NILFS by Amagai Yoshiji. */ /* * linux/fs/ext2/dir.c @@ -50,7 +46,7 @@ * nilfs uses block-sized chunks. Arguably, sector-sized ones would be * more robust, but we have what we have */ -static inline unsigned nilfs_chunk_size(struct inode *inode) +static inline unsigned int nilfs_chunk_size(struct inode *inode) { return inode->i_sb->s_blocksize; } @@ -65,9 +61,9 @@ static inline void nilfs_put_page(struct page *page) * Return the offset into page `page_nr' of the last valid * byte in that page, plus one. */ -static unsigned nilfs_last_byte(struct inode *inode, unsigned long page_nr) +static unsigned int nilfs_last_byte(struct inode *inode, unsigned long page_nr) { - unsigned last_byte = inode->i_size; + unsigned int last_byte = inode->i_size; last_byte -= page_nr << PAGE_SHIFT; if (last_byte > PAGE_SIZE) @@ -75,20 +71,22 @@ static unsigned nilfs_last_byte(struct inode *inode, unsigned long page_nr) return last_byte; } -static int nilfs_prepare_chunk(struct page *page, unsigned from, unsigned to) +static int nilfs_prepare_chunk(struct page *page, unsigned int from, + unsigned int to) { loff_t pos = page_offset(page) + from; + return __block_write_begin(page, pos, to - from, nilfs_get_block); } static void nilfs_commit_chunk(struct page *page, struct address_space *mapping, - unsigned from, unsigned to) + unsigned int from, unsigned int to) { struct inode *dir = mapping->host; loff_t pos = page_offset(page) + from; - unsigned len = to - from; - unsigned nr_dirty, copied; + unsigned int len = to - from; + unsigned int nr_dirty, copied; int err; nr_dirty = nilfs_page_count_clean_buffers(page, from, to); @@ -102,14 +100,14 @@ static void nilfs_commit_chunk(struct page *page, unlock_page(page); } -static void nilfs_check_page(struct page *page) +static bool nilfs_check_page(struct page *page) { struct inode *dir = page->mapping->host; struct super_block *sb = dir->i_sb; - unsigned chunk_size = nilfs_chunk_size(dir); + unsigned int chunk_size = nilfs_chunk_size(dir); char *kaddr = page_address(page); - unsigned offs, rec_len; - unsigned limit = PAGE_SIZE; + unsigned int offs, rec_len; + unsigned int limit = PAGE_SIZE; struct nilfs_dir_entry *p; char *error; @@ -137,7 +135,7 @@ static void nilfs_check_page(struct page *page) goto Eend; out: SetPageChecked(page); - return; + return true; /* Too bad, we had an error */ @@ -173,8 +171,8 @@ Eend: dir->i_ino, (page->index<<PAGE_SHIFT)+offs, (unsigned long) le64_to_cpu(p->inode)); fail: - SetPageChecked(page); SetPageError(page); + return false; } static struct page *nilfs_get_page(struct inode *dir, unsigned long n) @@ -184,10 +182,10 @@ static struct page *nilfs_get_page(struct inode *dir, unsigned long n) if (!IS_ERR(page)) { kmap(page); - if (!PageChecked(page)) - nilfs_check_page(page); - if (PageError(page)) - goto fail; + if (unlikely(!PageChecked(page))) { + if (PageError(page) || !nilfs_check_page(page)) + goto fail; + } } return page; @@ -259,7 +257,6 @@ static int nilfs_readdir(struct file *file, struct dir_context *ctx) unsigned int offset = pos & ~PAGE_MASK; unsigned long n = pos >> PAGE_SHIFT; unsigned long npages = dir_pages(inode); -/* unsigned chunk_mask = ~(nilfs_chunk_size(inode)-1); */ if (pos > inode->i_size - NILFS_DIR_REC_LEN(1)) return 0; @@ -321,7 +318,7 @@ nilfs_find_entry(struct inode *dir, const struct qstr *qstr, { const unsigned char *name = qstr->name; int namelen = qstr->len; - unsigned reclen = NILFS_DIR_REC_LEN(namelen); + unsigned int reclen = NILFS_DIR_REC_LEN(namelen); unsigned long start, n; unsigned long npages = dir_pages(dir); struct page *page = NULL; @@ -340,6 +337,7 @@ nilfs_find_entry(struct inode *dir, const struct qstr *qstr, n = start; do { char *kaddr; + page = nilfs_get_page(dir, n); if (!IS_ERR(page)) { kaddr = page_address(page); @@ -410,8 +408,8 @@ ino_t nilfs_inode_by_name(struct inode *dir, const struct qstr *qstr) void nilfs_set_link(struct inode *dir, struct nilfs_dir_entry *de, struct page *page, struct inode *inode) { - unsigned from = (char *) de - (char *) page_address(page); - unsigned to = from + nilfs_rec_len_from_disk(de->rec_len); + unsigned int from = (char *)de - (char *)page_address(page); + unsigned int to = from + nilfs_rec_len_from_disk(de->rec_len); struct address_space *mapping = page->mapping; int err; @@ -433,15 +431,15 @@ int nilfs_add_link(struct dentry *dentry, struct inode *inode) struct inode *dir = d_inode(dentry->d_parent); const unsigned char *name = dentry->d_name.name; int namelen = dentry->d_name.len; - unsigned chunk_size = nilfs_chunk_size(dir); - unsigned reclen = NILFS_DIR_REC_LEN(namelen); + unsigned int chunk_size = nilfs_chunk_size(dir); + unsigned int reclen = NILFS_DIR_REC_LEN(namelen); unsigned short rec_len, name_len; struct page *page = NULL; struct nilfs_dir_entry *de; unsigned long npages = dir_pages(dir); unsigned long n; char *kaddr; - unsigned from, to; + unsigned int from, to; int err; /* @@ -533,13 +531,14 @@ int nilfs_delete_entry(struct nilfs_dir_entry *dir, struct page *page) struct address_space *mapping = page->mapping; struct inode *inode = mapping->host; char *kaddr = page_address(page); - unsigned from = ((char *)dir - kaddr) & ~(nilfs_chunk_size(inode) - 1); - unsigned to = ((char *)dir - kaddr) + - nilfs_rec_len_from_disk(dir->rec_len); - struct nilfs_dir_entry *pde = NULL; - struct nilfs_dir_entry *de = (struct nilfs_dir_entry *)(kaddr + from); + unsigned int from, to; + struct nilfs_dir_entry *de, *pde = NULL; int err; + from = ((char *)dir - kaddr) & ~(nilfs_chunk_size(inode) - 1); + to = ((char *)dir - kaddr) + nilfs_rec_len_from_disk(dir->rec_len); + de = (struct nilfs_dir_entry *)(kaddr + from); + while ((char *)de < (char *)dir) { if (de->rec_len == 0) { nilfs_error(inode->i_sb, __func__, @@ -572,7 +571,7 @@ int nilfs_make_empty(struct inode *inode, struct inode *parent) { struct address_space *mapping = inode->i_mapping; struct page *page = grab_cache_page(mapping, 0); - unsigned chunk_size = nilfs_chunk_size(inode); + unsigned int chunk_size = nilfs_chunk_size(inode); struct nilfs_dir_entry *de; int err; void *kaddr; @@ -630,8 +629,8 @@ int nilfs_empty_dir(struct inode *inode) while ((char *)de <= kaddr) { if (de->rec_len == 0) { nilfs_error(inode->i_sb, __func__, - "zero-length directory entry " - "(kaddr=%p, de=%p)\n", kaddr, de); + "zero-length directory entry (kaddr=%p, de=%p)", + kaddr, de); goto not_empty; } if (de->inode != 0) { @@ -661,7 +660,7 @@ not_empty: const struct file_operations nilfs_dir_operations = { .llseek = generic_file_llseek, .read = generic_read_dir, - .iterate = nilfs_readdir, + .iterate_shared = nilfs_readdir, .unlocked_ioctl = nilfs_ioctl, #ifdef CONFIG_COMPAT .compat_ioctl = nilfs_compat_ioctl, diff --git a/fs/nilfs2/direct.c b/fs/nilfs2/direct.c index ebf89fd8ac1a..251a44928405 100644 --- a/fs/nilfs2/direct.c +++ b/fs/nilfs2/direct.c @@ -13,11 +13,7 @@ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA - * - * Written by Koji Sato <koji@osrg.net>. + * Written by Koji Sato. */ #include <linux/errno.h> @@ -62,7 +58,7 @@ static int nilfs_direct_lookup(const struct nilfs_bmap *direct, static int nilfs_direct_lookup_contig(const struct nilfs_bmap *direct, __u64 key, __u64 *ptrp, - unsigned maxblocks) + unsigned int maxblocks) { struct inode *dat = NULL; __u64 ptr, ptr2; @@ -83,7 +79,8 @@ static int nilfs_direct_lookup_contig(const struct nilfs_bmap *direct, ptr = blocknr; } - maxblocks = min_t(unsigned, maxblocks, NILFS_DIRECT_KEY_MAX - key + 1); + maxblocks = min_t(unsigned int, maxblocks, + NILFS_DIRECT_KEY_MAX - key + 1); for (cnt = 1; cnt < maxblocks && (ptr2 = nilfs_direct_get_ptr(direct, key + cnt)) != NILFS_BMAP_INVALID_PTR; @@ -110,9 +107,9 @@ nilfs_direct_find_target_v(const struct nilfs_bmap *direct, __u64 key) if (ptr != NILFS_BMAP_INVALID_PTR) /* sequential access */ return ptr; - else - /* block group */ - return nilfs_bmap_find_target_in_group(direct); + + /* block group */ + return nilfs_bmap_find_target_in_group(direct); } static int nilfs_direct_insert(struct nilfs_bmap *bmap, __u64 key, __u64 ptr) diff --git a/fs/nilfs2/direct.h b/fs/nilfs2/direct.h index dc643de20a25..3015a6e78724 100644 --- a/fs/nilfs2/direct.h +++ b/fs/nilfs2/direct.h @@ -13,11 +13,7 @@ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA - * - * Written by Koji Sato <koji@osrg.net>. + * Written by Koji Sato. */ #ifndef _NILFS_DIRECT_H diff --git a/fs/nilfs2/export.h b/fs/nilfs2/export.h index 19ccbf9522ab..00107fdb9343 100644 --- a/fs/nilfs2/export.h +++ b/fs/nilfs2/export.h @@ -20,6 +20,6 @@ struct nilfs_fid { u32 parent_gen; u64 parent_ino; -} __attribute__ ((packed)); +} __packed; #endif diff --git a/fs/nilfs2/file.c b/fs/nilfs2/file.c index 088ba001c6ef..547381f3ce13 100644 --- a/fs/nilfs2/file.c +++ b/fs/nilfs2/file.c @@ -13,12 +13,7 @@ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA - * - * Written by Amagai Yoshiji <amagai@osrg.net>, - * Ryusuke Konishi <ryusuke@osrg.net> + * Written by Amagai Yoshiji and Ryusuke Konishi. */ #include <linux/fs.h> diff --git a/fs/nilfs2/gcinode.c b/fs/nilfs2/gcinode.c index 0224b7826ace..693aded72498 100644 --- a/fs/nilfs2/gcinode.c +++ b/fs/nilfs2/gcinode.c @@ -13,13 +13,8 @@ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA - * - * Written by Seiji Kihara <kihara@osrg.net>, Amagai Yoshiji <amagai@osrg.net>, - * and Ryusuke Konishi <ryusuke@osrg.net>. - * Revised by Ryusuke Konishi <ryusuke@osrg.net>. + * Written by Seiji Kihara, Amagai Yoshiji, and Ryusuke Konishi. + * Revised by Ryusuke Konishi. * */ /* diff --git a/fs/nilfs2/ifile.c b/fs/nilfs2/ifile.c index 6548c7851b48..1d2b1805327a 100644 --- a/fs/nilfs2/ifile.c +++ b/fs/nilfs2/ifile.c @@ -13,12 +13,8 @@ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA - * - * Written by Amagai Yoshiji <amagai@osrg.net>. - * Revised by Ryusuke Konishi <ryusuke@osrg.net>. + * Written by Amagai Yoshiji. + * Revised by Ryusuke Konishi. * */ @@ -68,8 +64,10 @@ int nilfs_ifile_create_inode(struct inode *ifile, ino_t *out_ino, struct nilfs_palloc_req req; int ret; - req.pr_entry_nr = 0; /* 0 says find free inode from beginning of - a group. dull code!! */ + req.pr_entry_nr = 0; /* + * 0 says find free inode from beginning + * of a group. dull code!! + */ req.pr_entry_bh = NULL; ret = nilfs_palloc_prepare_alloc_entry(ifile, &req); diff --git a/fs/nilfs2/ifile.h b/fs/nilfs2/ifile.h index 679674d13372..23ad2f091e76 100644 --- a/fs/nilfs2/ifile.h +++ b/fs/nilfs2/ifile.h @@ -13,12 +13,8 @@ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA - * - * Written by Amagai Yoshiji <amagai@osrg.net> - * Revised by Ryusuke Konishi <ryusuke@osrg.net> + * Written by Amagai Yoshiji. + * Revised by Ryusuke Konishi. * */ @@ -36,6 +32,7 @@ static inline struct nilfs_inode * nilfs_ifile_map_inode(struct inode *ifile, ino_t ino, struct buffer_head *ibh) { void *kaddr = kmap(ibh->b_page); + return nilfs_palloc_block_get_entry(ifile, ino, ibh, kaddr); } diff --git a/fs/nilfs2/inode.c b/fs/nilfs2/inode.c index 534631358b13..a0ebdb17e912 100644 --- a/fs/nilfs2/inode.c +++ b/fs/nilfs2/inode.c @@ -13,11 +13,7 @@ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA - * - * Written by Ryusuke Konishi <ryusuke@osrg.net> + * Written by Ryusuke Konishi. * */ @@ -87,7 +83,7 @@ int nilfs_get_block(struct inode *inode, sector_t blkoff, struct the_nilfs *nilfs = inode->i_sb->s_fs_info; __u64 blknum = 0; int err = 0, ret; - unsigned maxblocks = bh_result->b_size >> inode->i_blkbits; + unsigned int maxblocks = bh_result->b_size >> inode->i_blkbits; down_read(&NILFS_MDT(nilfs->ns_dat)->mi_sem); ret = nilfs_bmap_lookup_contig(ii->i_bmap, blkoff, &blknum, maxblocks); @@ -133,11 +129,14 @@ int nilfs_get_block(struct inode *inode, sector_t blkoff, /* Error handling should be detailed */ set_buffer_new(bh_result); set_buffer_delay(bh_result); - map_bh(bh_result, inode->i_sb, 0); /* dbn must be changed - to proper value */ + map_bh(bh_result, inode->i_sb, 0); + /* Disk block number must be changed to proper value */ + } else if (ret == -ENOENT) { - /* not found is not error (e.g. hole); must return without - the mapped state flag. */ + /* + * not found is not error (e.g. hole); must return without + * the mapped state flag. + */ ; } else { err = ret; @@ -167,7 +166,7 @@ static int nilfs_readpage(struct file *file, struct page *page) * @nr_pages - number of pages to be read */ static int nilfs_readpages(struct file *file, struct address_space *mapping, - struct list_head *pages, unsigned nr_pages) + struct list_head *pages, unsigned int nr_pages) { return mpage_readpages(mapping, pages, nr_pages, nilfs_get_block); } @@ -226,7 +225,7 @@ static int nilfs_set_page_dirty(struct page *page) int ret = __set_page_dirty_nobuffers(page); if (page_has_buffers(page)) { - unsigned nr_dirty = 0; + unsigned int nr_dirty = 0; struct buffer_head *bh, *head; /* @@ -249,7 +248,7 @@ static int nilfs_set_page_dirty(struct page *page) if (nr_dirty) nilfs_set_file_dirty(inode, nr_dirty); } else if (ret) { - unsigned nr_dirty = 1 << (PAGE_SHIFT - inode->i_blkbits); + unsigned int nr_dirty = 1 << (PAGE_SHIFT - inode->i_blkbits); nilfs_set_file_dirty(inode, nr_dirty); } @@ -291,8 +290,8 @@ static int nilfs_write_end(struct file *file, struct address_space *mapping, struct page *page, void *fsdata) { struct inode *inode = mapping->host; - unsigned start = pos & (PAGE_SIZE - 1); - unsigned nr_dirty; + unsigned int start = pos & (PAGE_SIZE - 1); + unsigned int nr_dirty; int err; nr_dirty = nilfs_page_count_clean_buffers(page, start, @@ -305,7 +304,7 @@ static int nilfs_write_end(struct file *file, struct address_space *mapping, } static ssize_t -nilfs_direct_IO(struct kiocb *iocb, struct iov_iter *iter, loff_t offset) +nilfs_direct_IO(struct kiocb *iocb, struct iov_iter *iter) { struct inode *inode = file_inode(iocb->ki_filp); @@ -313,7 +312,7 @@ nilfs_direct_IO(struct kiocb *iocb, struct iov_iter *iter, loff_t offset) return 0; /* Needs synchronization with the cleaner */ - return blockdev_direct_IO(iocb, inode, iter, offset, nilfs_get_block); + return blockdev_direct_IO(iocb, inode, iter, nilfs_get_block); } const struct address_space_operations nilfs_aops = { @@ -399,23 +398,26 @@ struct inode *nilfs_new_inode(struct inode *dir, umode_t mode) err = nilfs_init_acl(inode, dir); if (unlikely(err)) - goto failed_after_creation; /* never occur. When supporting - nilfs_init_acl(), proper cancellation of - above jobs should be considered */ + /* + * Never occur. When supporting nilfs_init_acl(), + * proper cancellation of above jobs should be considered. + */ + goto failed_after_creation; return inode; failed_after_creation: clear_nlink(inode); unlock_new_inode(inode); - iput(inode); /* raw_inode will be deleted through - nilfs_evict_inode() */ + iput(inode); /* + * raw_inode will be deleted through + * nilfs_evict_inode(). + */ goto failed; failed_ifile_create_inode: make_bad_inode(inode); - iput(inode); /* if i_nlink == 1, generic_forget_inode() will be - called */ + iput(inode); failed: return ERR_PTR(err); } @@ -666,8 +668,10 @@ void nilfs_write_inode_common(struct inode *inode, else if (S_ISCHR(inode->i_mode) || S_ISBLK(inode->i_mode)) raw_inode->i_device_code = cpu_to_le64(huge_encode_dev(inode->i_rdev)); - /* When extending inode, nilfs->ns_inode_size should be checked - for substitutions of appended fields */ + /* + * When extending inode, nilfs->ns_inode_size should be checked + * for substitutions of appended fields. + */ } void nilfs_update_inode(struct inode *inode, struct buffer_head *ibh, int flags) @@ -685,9 +689,12 @@ void nilfs_update_inode(struct inode *inode, struct buffer_head *ibh, int flags) set_bit(NILFS_I_INODE_SYNC, &ii->i_state); nilfs_write_inode_common(inode, raw_inode, 0); - /* XXX: call with has_bmap = 0 is a workaround to avoid - deadlock of bmap. This delays update of i_bmap to just - before writing */ + /* + * XXX: call with has_bmap = 0 is a workaround to avoid + * deadlock of bmap. This delays update of i_bmap to just + * before writing. + */ + nilfs_ifile_unmap_inode(ifile, ino, ibh); } @@ -752,14 +759,15 @@ void nilfs_truncate(struct inode *inode) nilfs_mark_inode_dirty(inode); nilfs_set_file_dirty(inode, 0); nilfs_transaction_commit(sb); - /* May construct a logical segment and may fail in sync mode. - But truncate has no return value. */ + /* + * May construct a logical segment and may fail in sync mode. + * But truncate has no return value. + */ } static void nilfs_clear_inode(struct inode *inode) { struct nilfs_inode_info *ii = NILFS_I(inode); - struct nilfs_mdt_info *mdi = NILFS_MDT(inode); /* * Free resources allocated in nilfs_read_inode(), here. @@ -768,8 +776,8 @@ static void nilfs_clear_inode(struct inode *inode) brelse(ii->i_bh); ii->i_bh = NULL; - if (mdi && mdi->mi_palloc_cache) - nilfs_palloc_destroy_cache(inode); + if (nilfs_is_metadata_file_inode(inode)) + nilfs_mdt_clear(inode); if (test_bit(NILFS_I_BMAP, &ii->i_state)) nilfs_bmap_clear(ii->i_bmap); @@ -811,8 +819,10 @@ void nilfs_evict_inode(struct inode *inode) if (IS_SYNC(inode)) nilfs_set_transaction_flag(NILFS_TI_SYNC); nilfs_transaction_commit(sb); - /* May construct a logical segment and may fail in sync mode. - But delete_inode has no return value. */ + /* + * May construct a logical segment and may fail in sync mode. + * But delete_inode has no return value. + */ } int nilfs_setattr(struct dentry *dentry, struct iattr *iattr) @@ -856,6 +866,7 @@ out_err: int nilfs_permission(struct inode *inode, int mask) { struct nilfs_root *root = NILFS_I(inode)->i_root; + if ((mask & MAY_WRITE) && root && root->cno != NILFS_CPTREE_CURRENT_CNO) return -EROFS; /* snapshot is not writable */ @@ -906,7 +917,7 @@ int nilfs_inode_dirty(struct inode *inode) return ret; } -int nilfs_set_file_dirty(struct inode *inode, unsigned nr_dirty) +int nilfs_set_file_dirty(struct inode *inode, unsigned int nr_dirty) { struct nilfs_inode_info *ii = NILFS_I(inode); struct the_nilfs *nilfs = inode->i_sb->s_fs_info; @@ -919,17 +930,23 @@ int nilfs_set_file_dirty(struct inode *inode, unsigned nr_dirty) spin_lock(&nilfs->ns_inode_lock); if (!test_bit(NILFS_I_QUEUED, &ii->i_state) && !test_bit(NILFS_I_BUSY, &ii->i_state)) { - /* Because this routine may race with nilfs_dispose_list(), - we have to check NILFS_I_QUEUED here, too. */ + /* + * Because this routine may race with nilfs_dispose_list(), + * we have to check NILFS_I_QUEUED here, too. + */ if (list_empty(&ii->i_dirty) && igrab(inode) == NULL) { - /* This will happen when somebody is freeing - this inode. */ + /* + * This will happen when somebody is freeing + * this inode. + */ nilfs_warning(inode->i_sb, __func__, - "cannot get inode (ino=%lu)\n", + "cannot get inode (ino=%lu)", inode->i_ino); spin_unlock(&nilfs->ns_inode_lock); - return -EINVAL; /* NILFS_I_DIRTY may remain for - freeing inode */ + return -EINVAL; /* + * NILFS_I_DIRTY may remain for + * freeing inode. + */ } list_move_tail(&ii->i_dirty, &nilfs->ns_dirty_files); set_bit(NILFS_I_QUEUED, &ii->i_state); @@ -946,7 +963,7 @@ int __nilfs_mark_inode_dirty(struct inode *inode, int flags) err = nilfs_load_inode_block(inode, &ibh); if (unlikely(err)) { nilfs_warning(inode->i_sb, __func__, - "failed to reget inode block.\n"); + "failed to reget inode block."); return err; } nilfs_update_inode(inode, ibh, flags); @@ -973,7 +990,7 @@ void nilfs_dirty_inode(struct inode *inode, int flags) if (is_bad_inode(inode)) { nilfs_warning(inode->i_sb, __func__, - "tried to mark bad_inode dirty. ignored.\n"); + "tried to mark bad_inode dirty. ignored."); dump_stack(); return; } diff --git a/fs/nilfs2/ioctl.c b/fs/nilfs2/ioctl.c index e8fe24882b5b..358b57e2cdf9 100644 --- a/fs/nilfs2/ioctl.c +++ b/fs/nilfs2/ioctl.c @@ -13,11 +13,7 @@ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA - * - * Written by Koji Sato <koji@osrg.net>. + * Written by Koji Sato. */ #include <linux/fs.h> @@ -783,6 +779,7 @@ static int nilfs_ioctl_mark_blocks_dirty(struct the_nilfs *nilfs, size_t nmembs = argv->v_nmembs; struct nilfs_bmap *bmap = NILFS_I(nilfs->ns_dat)->i_bmap; struct nilfs_bdesc *bdescs = buf; + struct buffer_head *bh; int ret, i; for (i = 0; i < nmembs; i++) { @@ -800,12 +797,16 @@ static int nilfs_ioctl_mark_blocks_dirty(struct the_nilfs *nilfs, /* skip dead block */ continue; if (bdescs[i].bd_level == 0) { - ret = nilfs_mdt_mark_block_dirty(nilfs->ns_dat, - bdescs[i].bd_offset); - if (ret < 0) { + ret = nilfs_mdt_get_block(nilfs->ns_dat, + bdescs[i].bd_offset, + false, NULL, &bh); + if (unlikely(ret)) { WARN_ON(ret == -ENOENT); return ret; } + mark_buffer_dirty(bh); + nilfs_mdt_mark_dirty(nilfs->ns_dat); + put_bh(bh); } else { ret = nilfs_bmap_mark(bmap, bdescs[i].bd_offset, bdescs[i].bd_level); diff --git a/fs/nilfs2/mdt.c b/fs/nilfs2/mdt.c index f6982b9153d5..3417d859a03c 100644 --- a/fs/nilfs2/mdt.c +++ b/fs/nilfs2/mdt.c @@ -13,11 +13,7 @@ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA - * - * Written by Ryusuke Konishi <ryusuke@osrg.net> + * Written by Ryusuke Konishi. */ #include <linux/buffer_head.h> @@ -32,6 +28,7 @@ #include "segment.h" #include "page.h" #include "mdt.h" +#include "alloc.h" /* nilfs_palloc_destroy_cache() */ #include <trace/events/nilfs2.h> @@ -393,34 +390,6 @@ int nilfs_mdt_forget_block(struct inode *inode, unsigned long block) return ret; } -/** - * nilfs_mdt_mark_block_dirty - mark a block on the meta data file dirty. - * @inode: inode of the meta data file - * @block: block offset - * - * Return Value: On success, it returns 0. On error, the following negative - * error code is returned. - * - * %-ENOMEM - Insufficient memory available. - * - * %-EIO - I/O error - * - * %-ENOENT - the specified block does not exist (hole block) - */ -int nilfs_mdt_mark_block_dirty(struct inode *inode, unsigned long block) -{ - struct buffer_head *bh; - int err; - - err = nilfs_mdt_read_block(inode, block, 0, &bh); - if (unlikely(err)) - return err; - mark_buffer_dirty(bh); - nilfs_mdt_mark_dirty(inode); - brelse(bh); - return 0; -} - int nilfs_mdt_fetch_dirty(struct inode *inode) { struct nilfs_inode_info *ii = NILFS_I(inode); @@ -497,8 +466,32 @@ int nilfs_mdt_init(struct inode *inode, gfp_t gfp_mask, size_t objsz) return 0; } -void nilfs_mdt_set_entry_size(struct inode *inode, unsigned entry_size, - unsigned header_size) +/** + * nilfs_mdt_clear - do cleanup for the metadata file + * @inode: inode of the metadata file + */ +void nilfs_mdt_clear(struct inode *inode) +{ + struct nilfs_mdt_info *mdi = NILFS_MDT(inode); + + if (mdi->mi_palloc_cache) + nilfs_palloc_destroy_cache(inode); +} + +/** + * nilfs_mdt_destroy - release resources used by the metadata file + * @inode: inode of the metadata file + */ +void nilfs_mdt_destroy(struct inode *inode) +{ + struct nilfs_mdt_info *mdi = NILFS_MDT(inode); + + kfree(mdi->mi_bgl); /* kfree(NULL) is safe */ + kfree(mdi); +} + +void nilfs_mdt_set_entry_size(struct inode *inode, unsigned int entry_size, + unsigned int header_size) { struct nilfs_mdt_info *mi = NILFS_MDT(inode); diff --git a/fs/nilfs2/mdt.h b/fs/nilfs2/mdt.h index 03246cac3338..3f67f3932097 100644 --- a/fs/nilfs2/mdt.h +++ b/fs/nilfs2/mdt.h @@ -13,11 +13,7 @@ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA - * - * Written by Ryusuke Konishi <ryusuke@osrg.net> + * Written by Ryusuke Konishi. */ #ifndef _NILFS_MDT_H @@ -57,8 +53,8 @@ struct nilfs_shadow_map { struct nilfs_mdt_info { struct rw_semaphore mi_sem; struct blockgroup_lock *mi_bgl; - unsigned mi_entry_size; - unsigned mi_first_entry_offset; + unsigned int mi_entry_size; + unsigned int mi_first_entry_offset; unsigned long mi_entries_per_block; struct nilfs_palloc_cache *mi_palloc_cache; struct nilfs_shadow_map *mi_shadow; @@ -71,6 +67,11 @@ static inline struct nilfs_mdt_info *NILFS_MDT(const struct inode *inode) return inode->i_private; } +static inline int nilfs_is_metadata_file_inode(const struct inode *inode) +{ + return inode->i_private != NULL; +} + /* Default GFP flags using highmem */ #define NILFS_MDT_GFP (__GFP_RECLAIM | __GFP_IO | __GFP_HIGHMEM) @@ -83,11 +84,13 @@ int nilfs_mdt_find_block(struct inode *inode, unsigned long start, struct buffer_head **out_bh); int nilfs_mdt_delete_block(struct inode *, unsigned long); int nilfs_mdt_forget_block(struct inode *, unsigned long); -int nilfs_mdt_mark_block_dirty(struct inode *, unsigned long); int nilfs_mdt_fetch_dirty(struct inode *); int nilfs_mdt_init(struct inode *inode, gfp_t gfp_mask, size_t objsz); -void nilfs_mdt_set_entry_size(struct inode *, unsigned, unsigned); +void nilfs_mdt_clear(struct inode *inode); +void nilfs_mdt_destroy(struct inode *inode); + +void nilfs_mdt_set_entry_size(struct inode *, unsigned int, unsigned int); int nilfs_mdt_setup_shadow_map(struct inode *inode, struct nilfs_shadow_map *shadow); diff --git a/fs/nilfs2/namei.c b/fs/nilfs2/namei.c index 151bc19d47c0..1ec8ae5995a5 100644 --- a/fs/nilfs2/namei.c +++ b/fs/nilfs2/namei.c @@ -13,12 +13,7 @@ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA - * - * Modified for NILFS by Amagai Yoshiji <amagai@osrg.net>, - * Ryusuke Konishi <ryusuke@osrg.net> + * Modified for NILFS by Amagai Yoshiji and Ryusuke Konishi. */ /* * linux/fs/ext2/namei.c @@ -49,6 +44,7 @@ static inline int nilfs_add_nondir(struct dentry *dentry, struct inode *inode) { int err = nilfs_add_link(dentry, inode); + if (!err) { d_instantiate(dentry, inode); unlock_new_inode(inode); @@ -143,7 +139,7 @@ static int nilfs_symlink(struct inode *dir, struct dentry *dentry, { struct nilfs_transaction_info ti; struct super_block *sb = dir->i_sb; - unsigned l = strlen(symname)+1; + unsigned int l = strlen(symname) + 1; struct inode *inode; int err; @@ -288,7 +284,7 @@ static int nilfs_do_unlink(struct inode *dir, struct dentry *dentry) if (!inode->i_nlink) { nilfs_warning(inode->i_sb, __func__, - "deleting nonexistent file (%lu), %d\n", + "deleting nonexistent file (%lu), %d", inode->i_ino, inode->i_nlink); set_nlink(inode, 1); } @@ -457,7 +453,7 @@ static struct dentry *nilfs_get_parent(struct dentry *child) root = NILFS_I(d_inode(child))->i_root; - inode = nilfs_iget(d_inode(child)->i_sb, root, ino); + inode = nilfs_iget(child->d_sb, root, ino); if (IS_ERR(inode)) return ERR_CAST(inode); diff --git a/fs/nilfs2/nilfs.h b/fs/nilfs2/nilfs.h index 385704027575..b1d48bc0532d 100644 --- a/fs/nilfs2/nilfs.h +++ b/fs/nilfs2/nilfs.h @@ -13,12 +13,7 @@ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA - * - * Written by Koji Sato <koji@osrg.net> - * Ryusuke Konishi <ryusuke@osrg.net> + * Written by Koji Sato and Ryusuke Konishi. */ #ifndef _NILFS_H @@ -69,8 +64,10 @@ struct nilfs_inode_info { */ struct rw_semaphore xattr_sem; #endif - struct buffer_head *i_bh; /* i_bh contains a new or dirty - disk inode */ + struct buffer_head *i_bh; /* + * i_bh contains a new or dirty + * disk inode. + */ struct nilfs_root *i_root; struct inode vfs_inode; }; @@ -100,8 +97,10 @@ enum { NILFS_I_NEW = 0, /* Inode is newly created */ NILFS_I_DIRTY, /* The file is dirty */ NILFS_I_QUEUED, /* inode is in dirty_files list */ - NILFS_I_BUSY, /* inode is grabbed by a segment - constructor */ + NILFS_I_BUSY, /* + * Inode is grabbed by a segment + * constructor + */ NILFS_I_COLLECTED, /* All dirty blocks are collected */ NILFS_I_UPDATED, /* The file has been written back */ NILFS_I_INODE_SYNC, /* dsync is not allowed for inode */ @@ -145,8 +144,10 @@ enum { struct nilfs_transaction_info { u32 ti_magic; void *ti_save; - /* This should never used. If this happens, - one of other filesystems has a bug. */ + /* + * This should never be used. If it happens, + * one of other filesystems has a bug. + */ unsigned short ti_flags; unsigned short ti_count; }; @@ -156,8 +157,10 @@ struct nilfs_transaction_info { /* ti_flags */ #define NILFS_TI_DYNAMIC_ALLOC 0x0001 /* Allocated from slab */ -#define NILFS_TI_SYNC 0x0002 /* Force to construct segment at the - end of transaction. */ +#define NILFS_TI_SYNC 0x0002 /* + * Force to construct segment at the + * end of transaction. + */ #define NILFS_TI_GC 0x0004 /* GC context */ #define NILFS_TI_COMMIT 0x0008 /* Change happened or not */ #define NILFS_TI_WRITER 0x0010 /* Constructor context */ @@ -279,7 +282,7 @@ extern void nilfs_write_failed(struct address_space *mapping, loff_t to); int nilfs_permission(struct inode *inode, int mask); int nilfs_load_inode_block(struct inode *inode, struct buffer_head **pbh); extern int nilfs_inode_dirty(struct inode *); -int nilfs_set_file_dirty(struct inode *inode, unsigned nr_dirty); +int nilfs_set_file_dirty(struct inode *inode, unsigned int nr_dirty); extern int __nilfs_mark_inode_dirty(struct inode *, int); extern void nilfs_dirty_inode(struct inode *, int flags); int nilfs_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo, diff --git a/fs/nilfs2/page.c b/fs/nilfs2/page.c index 489391561cda..d97ba5f11b77 100644 --- a/fs/nilfs2/page.c +++ b/fs/nilfs2/page.c @@ -13,12 +13,7 @@ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA - * - * Written by Ryusuke Konishi <ryusuke@osrg.net>, - * Seiji Kihara <kihara@osrg.net>. + * Written by Ryusuke Konishi and Seiji Kihara. */ #include <linux/pagemap.h> @@ -440,12 +435,12 @@ void nilfs_clear_dirty_page(struct page *page, bool silent) __nilfs_clear_page_dirty(page); } -unsigned nilfs_page_count_clean_buffers(struct page *page, - unsigned from, unsigned to) +unsigned int nilfs_page_count_clean_buffers(struct page *page, + unsigned int from, unsigned int to) { - unsigned block_start, block_end; + unsigned int block_start, block_end; struct buffer_head *bh, *head; - unsigned nc = 0; + unsigned int nc = 0; for (bh = head = page_buffers(page), block_start = 0; bh != head || !block_start; diff --git a/fs/nilfs2/page.h b/fs/nilfs2/page.h index a43b8287d012..f3687c958fa8 100644 --- a/fs/nilfs2/page.h +++ b/fs/nilfs2/page.h @@ -13,12 +13,7 @@ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA - * - * Written by Ryusuke Konishi <ryusuke@osrg.net>, - * Seiji Kihara <kihara@osrg.net>. + * Written by Ryusuke Konishi and Seiji Kihara. */ #ifndef _NILFS_PAGE_H @@ -58,7 +53,8 @@ void nilfs_copy_back_pages(struct address_space *, struct address_space *); void nilfs_clear_dirty_page(struct page *, bool); void nilfs_clear_dirty_pages(struct address_space *, bool); void nilfs_mapping_init(struct address_space *mapping, struct inode *inode); -unsigned nilfs_page_count_clean_buffers(struct page *, unsigned, unsigned); +unsigned int nilfs_page_count_clean_buffers(struct page *, unsigned int, + unsigned int); unsigned long nilfs_find_uncommitted_extent(struct inode *inode, sector_t start_blk, sector_t *blkoff); diff --git a/fs/nilfs2/recovery.c b/fs/nilfs2/recovery.c index 5afa77fadc11..d893dc912b62 100644 --- a/fs/nilfs2/recovery.c +++ b/fs/nilfs2/recovery.c @@ -13,11 +13,7 @@ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA - * - * Written by Ryusuke Konishi <ryusuke@osrg.net> + * Written by Ryusuke Konishi. */ #include <linux/buffer_head.h> @@ -47,8 +43,10 @@ enum { /* work structure for recovery */ struct nilfs_recovery_block { - ino_t ino; /* Inode number of the file that this block - belongs to */ + ino_t ino; /* + * Inode number of the file that this block + * belongs to + */ sector_t blocknr; /* block number */ __u64 vblocknr; /* virtual block number */ unsigned long blkoff; /* File offset of the data block (per block) */ @@ -156,7 +154,7 @@ int nilfs_read_super_root_block(struct the_nilfs *nilfs, sector_t sr_block, sr = (struct nilfs_super_root *)bh_sr->b_data; if (check) { - unsigned bytes = le16_to_cpu(sr->sr_bytes); + unsigned int bytes = le16_to_cpu(sr->sr_bytes); if (bytes == 0 || bytes > nilfs->ns_blocksize) { ret = NILFS_SEG_FAIL_CHECKSUM_SUPER_ROOT; @@ -508,7 +506,7 @@ static int nilfs_recover_dsync_blocks(struct the_nilfs *nilfs, { struct inode *inode; struct nilfs_recovery_block *rb, *n; - unsigned blocksize = nilfs->ns_blocksize; + unsigned int blocksize = nilfs->ns_blocksize; struct page *page; loff_t pos; int err = 0, err2 = 0; @@ -526,6 +524,7 @@ static int nilfs_recover_dsync_blocks(struct the_nilfs *nilfs, 0, &page, nilfs_get_block); if (unlikely(err)) { loff_t isize = inode->i_size; + if (pos + blocksize > isize) nilfs_write_failed(inode->i_mapping, pos + blocksize); @@ -872,9 +871,11 @@ int nilfs_search_super_root(struct the_nilfs *nilfs, flags = le16_to_cpu(sum->ss_flags); if (!(flags & NILFS_SS_SR) && !scan_newer) { - /* This will never happen because a superblock - (last_segment) always points to a pseg - having a super root. */ + /* + * This will never happen because a superblock + * (last_segment) always points to a pseg with + * a super root. + */ ret = NILFS_SEG_FAIL_CONSISTENCY; goto failed; } diff --git a/fs/nilfs2/segbuf.c b/fs/nilfs2/segbuf.c index f63620ce3892..bf36df10540b 100644 --- a/fs/nilfs2/segbuf.c +++ b/fs/nilfs2/segbuf.c @@ -13,11 +13,7 @@ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA - * - * Written by Ryusuke Konishi <ryusuke@osrg.net> + * Written by Ryusuke Konishi. * */ @@ -133,7 +129,7 @@ int nilfs_segbuf_extend_payload(struct nilfs_segment_buffer *segbuf, return 0; } -int nilfs_segbuf_reset(struct nilfs_segment_buffer *segbuf, unsigned flags, +int nilfs_segbuf_reset(struct nilfs_segment_buffer *segbuf, unsigned int flags, time_t ctime, __u64 cno) { int err; @@ -240,7 +236,7 @@ nilfs_segbuf_fill_in_super_root_crc(struct nilfs_segment_buffer *segbuf, { struct nilfs_super_root *raw_sr; struct the_nilfs *nilfs = segbuf->sb_super->s_fs_info; - unsigned srsize; + unsigned int srsize; u32 crc; raw_sr = (struct nilfs_super_root *)segbuf->sb_super_root->b_data; diff --git a/fs/nilfs2/segbuf.h b/fs/nilfs2/segbuf.h index b04f08cc2397..7bbccc099709 100644 --- a/fs/nilfs2/segbuf.h +++ b/fs/nilfs2/segbuf.h @@ -13,11 +13,7 @@ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA - * - * Written by Ryusuke Konishi <ryusuke@osrg.net> + * Written by Ryusuke Konishi. * */ #ifndef _NILFS_SEGBUF_H @@ -82,7 +78,7 @@ struct nilfs_segment_buffer { __u64 sb_nextnum; sector_t sb_fseg_start, sb_fseg_end; sector_t sb_pseg_start; - unsigned sb_rest_blocks; + unsigned int sb_rest_blocks; /* Buffers */ struct list_head sb_segsum_buffers; @@ -124,7 +120,8 @@ void nilfs_segbuf_map_cont(struct nilfs_segment_buffer *segbuf, struct nilfs_segment_buffer *prev); void nilfs_segbuf_set_next_segnum(struct nilfs_segment_buffer *, __u64, struct the_nilfs *); -int nilfs_segbuf_reset(struct nilfs_segment_buffer *, unsigned, time_t, __u64); +int nilfs_segbuf_reset(struct nilfs_segment_buffer *, unsigned int, time_t, + __u64); int nilfs_segbuf_extend_segsum(struct nilfs_segment_buffer *); int nilfs_segbuf_extend_payload(struct nilfs_segment_buffer *, struct buffer_head **); diff --git a/fs/nilfs2/segment.c b/fs/nilfs2/segment.c index 4317f72568e6..e78b68a81aec 100644 --- a/fs/nilfs2/segment.c +++ b/fs/nilfs2/segment.c @@ -13,11 +13,7 @@ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA - * - * Written by Ryusuke Konishi <ryusuke@osrg.net> + * Written by Ryusuke Konishi. * */ @@ -49,18 +45,26 @@ */ #define SC_N_INODEVEC 16 /* Size of locally allocated inode vector */ -#define SC_MAX_SEGDELTA 64 /* Upper limit of the number of segments - appended in collection retry loop */ +#define SC_MAX_SEGDELTA 64 /* + * Upper limit of the number of segments + * appended in collection retry loop + */ /* Construction mode */ enum { SC_LSEG_SR = 1, /* Make a logical segment having a super root */ - SC_LSEG_DSYNC, /* Flush data blocks of a given file and make - a logical segment without a super root */ - SC_FLUSH_FILE, /* Flush data files, leads to segment writes without - creating a checkpoint */ - SC_FLUSH_DAT, /* Flush DAT file. This also creates segments without - a checkpoint */ + SC_LSEG_DSYNC, /* + * Flush data blocks of a given file and make + * a logical segment without a super root. + */ + SC_FLUSH_FILE, /* + * Flush data files, leads to segment writes without + * creating a checkpoint. + */ + SC_FLUSH_DAT, /* + * Flush DAT file. This also creates segments + * without a checkpoint. + */ }; /* Stage numbers of dirty block collection */ @@ -154,17 +158,15 @@ static int nilfs_prepare_segment_lock(struct nilfs_transaction_info *ti) if (cur_ti) { if (cur_ti->ti_magic == NILFS_TI_MAGIC) return ++cur_ti->ti_count; - else { - /* - * If journal_info field is occupied by other FS, - * it is saved and will be restored on - * nilfs_transaction_commit(). - */ - printk(KERN_WARNING - "NILFS warning: journal info from a different " - "FS\n"); - save = current->journal_info; - } + + /* + * If journal_info field is occupied by other FS, + * it is saved and will be restored on + * nilfs_transaction_commit(). + */ + printk(KERN_WARNING + "NILFS warning: journal info from a different FS\n"); + save = current->journal_info; } if (!ti) { ti = kmem_cache_alloc(nilfs_transaction_cachep, GFP_NOFS); @@ -397,10 +399,10 @@ static void nilfs_transaction_unlock(struct super_block *sb) static void *nilfs_segctor_map_segsum_entry(struct nilfs_sc_info *sci, struct nilfs_segsum_pointer *ssp, - unsigned bytes) + unsigned int bytes) { struct nilfs_segment_buffer *segbuf = sci->sc_curseg; - unsigned blocksize = sci->sc_super->s_blocksize; + unsigned int blocksize = sci->sc_super->s_blocksize; void *p; if (unlikely(ssp->offset + bytes > blocksize)) { @@ -422,8 +424,8 @@ static int nilfs_segctor_reset_segment_buffer(struct nilfs_sc_info *sci) { struct nilfs_segment_buffer *segbuf = sci->sc_curseg; struct buffer_head *sumbh; - unsigned sumbytes; - unsigned flags = 0; + unsigned int sumbytes; + unsigned int flags = 0; int err; if (nilfs_doing_gc()) @@ -444,8 +446,10 @@ static int nilfs_segctor_feed_segment(struct nilfs_sc_info *sci) { sci->sc_nblk_this_inc += sci->sc_curseg->sb_sum.nblocks; if (NILFS_SEGBUF_IS_LAST(sci->sc_curseg, &sci->sc_segbufs)) - return -E2BIG; /* The current segment is filled up - (internal code) */ + return -E2BIG; /* + * The current segment is filled up + * (internal code) + */ sci->sc_curseg = NILFS_NEXT_SEGBUF(sci->sc_curseg); return nilfs_segctor_reset_segment_buffer(sci); } @@ -472,9 +476,9 @@ static int nilfs_segctor_add_super_root(struct nilfs_sc_info *sci) */ static int nilfs_segctor_segsum_block_required( struct nilfs_sc_info *sci, const struct nilfs_segsum_pointer *ssp, - unsigned binfo_size) + unsigned int binfo_size) { - unsigned blocksize = sci->sc_super->s_blocksize; + unsigned int blocksize = sci->sc_super->s_blocksize; /* Size of finfo and binfo is enough small against blocksize */ return ssp->offset + binfo_size + @@ -533,7 +537,7 @@ static void nilfs_segctor_end_finfo(struct nilfs_sc_info *sci, static int nilfs_segctor_add_file_block(struct nilfs_sc_info *sci, struct buffer_head *bh, struct inode *inode, - unsigned binfo_size) + unsigned int binfo_size) { struct nilfs_segment_buffer *segbuf; int required, err = 0; @@ -617,7 +621,7 @@ static void nilfs_write_file_node_binfo(struct nilfs_sc_info *sci, *vblocknr = binfo->bi_v.bi_vblocknr; } -static struct nilfs_sc_operations nilfs_sc_file_ops = { +static const struct nilfs_sc_operations nilfs_sc_file_ops = { .collect_data = nilfs_collect_file_data, .collect_node = nilfs_collect_file_node, .collect_bmap = nilfs_collect_file_bmap, @@ -666,7 +670,7 @@ static void nilfs_write_dat_node_binfo(struct nilfs_sc_info *sci, *binfo_dat = binfo->bi_dat; } -static struct nilfs_sc_operations nilfs_sc_dat_ops = { +static const struct nilfs_sc_operations nilfs_sc_dat_ops = { .collect_data = nilfs_collect_dat_data, .collect_node = nilfs_collect_file_node, .collect_bmap = nilfs_collect_dat_bmap, @@ -674,7 +678,7 @@ static struct nilfs_sc_operations nilfs_sc_dat_ops = { .write_node_binfo = nilfs_write_dat_node_binfo, }; -static struct nilfs_sc_operations nilfs_sc_dsync_ops = { +static const struct nilfs_sc_operations nilfs_sc_dsync_ops = { .collect_data = nilfs_collect_file_data, .collect_node = NULL, .collect_bmap = NULL, @@ -777,7 +781,7 @@ static void nilfs_dispose_list(struct the_nilfs *nilfs, { struct nilfs_inode_info *ii, *n; struct nilfs_inode_info *ivec[SC_N_INODEVEC], **pii; - unsigned nv = 0; + unsigned int nv = 0; while (!list_empty(head)) { spin_lock(&nilfs->ns_inode_lock); @@ -875,9 +879,11 @@ static int nilfs_segctor_create_checkpoint(struct nilfs_sc_info *sci) err = nilfs_cpfile_get_checkpoint(nilfs->ns_cpfile, nilfs->ns_cno, 1, &raw_cp, &bh_cp); if (likely(!err)) { - /* The following code is duplicated with cpfile. But, it is - needed to collect the checkpoint even if it was not newly - created */ + /* + * The following code is duplicated with cpfile. But, it is + * needed to collect the checkpoint even if it was not newly + * created. + */ mark_buffer_dirty(bh_cp); nilfs_mdt_mark_dirty(nilfs->ns_cpfile); nilfs_cpfile_put_checkpoint( @@ -958,7 +964,7 @@ static void nilfs_segctor_fill_in_super_root(struct nilfs_sc_info *sci, { struct buffer_head *bh_sr; struct nilfs_super_root *raw_sr; - unsigned isz, srsz; + unsigned int isz, srsz; bh_sr = NILFS_LAST_SEGBUF(&sci->sc_segbufs)->sb_super_root; raw_sr = (struct nilfs_super_root *)bh_sr->b_data; @@ -1043,7 +1049,7 @@ static size_t nilfs_segctor_buffer_rest(struct nilfs_sc_info *sci) static int nilfs_segctor_scan_file(struct nilfs_sc_info *sci, struct inode *inode, - struct nilfs_sc_operations *sc_ops) + const struct nilfs_sc_operations *sc_ops) { LIST_HEAD(data_buffers); LIST_HEAD(node_buffers); @@ -1406,8 +1412,10 @@ static void nilfs_free_incomplete_logs(struct list_head *logs, if (atomic_read(&segbuf->sb_err)) { /* Case 1: The first segment failed */ if (segbuf->sb_pseg_start != segbuf->sb_fseg_start) - /* Case 1a: Partial segment appended into an existing - segment */ + /* + * Case 1a: Partial segment appended into an existing + * segment + */ nilfs_terminate_segment(nilfs, segbuf->sb_fseg_start, segbuf->sb_fseg_end); else /* Case 1b: New full segment */ @@ -1550,7 +1558,7 @@ nilfs_segctor_update_payload_blocknr(struct nilfs_sc_info *sci, sector_t blocknr; unsigned long nfinfo = segbuf->sb_sum.nfinfo; unsigned long nblocks = 0, ndatablk = 0; - struct nilfs_sc_operations *sc_op = NULL; + const struct nilfs_sc_operations *sc_op = NULL; struct nilfs_segsum_pointer ssp; struct nilfs_finfo *finfo = NULL; union nilfs_binfo binfo; @@ -1631,8 +1639,10 @@ static int nilfs_segctor_assign(struct nilfs_sc_info *sci, int mode) static void nilfs_begin_page_io(struct page *page) { if (!page || PageWriteback(page)) - /* For split b-tree node pages, this function may be called - twice. We ignore the 2nd or later calls by this check. */ + /* + * For split b-tree node pages, this function may be called + * twice. We ignore the 2nd or later calls by this check. + */ return; lock_page(page); @@ -1942,7 +1952,7 @@ static int nilfs_segctor_collect_dirty_files(struct nilfs_sc_info *sci, ifile, ii->vfs_inode.i_ino, &ibh); if (unlikely(err)) { nilfs_warning(sci->sc_super, __func__, - "failed to get inode block.\n"); + "failed to get inode block."); return err; } mark_buffer_dirty(ibh); @@ -2395,6 +2405,7 @@ static int nilfs_segctor_construct(struct nilfs_sc_info *sci, int mode) static void nilfs_construction_timeout(unsigned long data) { struct task_struct *p = (struct task_struct *)data; + wake_up_process(p); } @@ -2555,10 +2566,10 @@ static int nilfs_segctor_thread(void *arg) if (timeout || sci->sc_seq_request != sci->sc_seq_done) mode = SC_LSEG_SR; - else if (!sci->sc_flush_request) - break; - else + else if (sci->sc_flush_request) mode = nilfs_segctor_flush_mode(sci); + else + break; spin_unlock(&sci->sc_state_lock); nilfs_segctor_thread_construct(sci, mode); @@ -2684,8 +2695,10 @@ static void nilfs_segctor_write_out(struct nilfs_sc_info *sci) { int ret, retrycount = NILFS_SC_CLEANUP_RETRY; - /* The segctord thread was stopped and its timer was removed. - But some tasks remain. */ + /* + * The segctord thread was stopped and its timer was removed. + * But some tasks remain. + */ do { struct nilfs_transaction_info ti; @@ -2727,13 +2740,13 @@ static void nilfs_segctor_destroy(struct nilfs_sc_info *sci) if (!list_empty(&sci->sc_dirty_files)) { nilfs_warning(sci->sc_super, __func__, - "dirty file(s) after the final construction\n"); + "dirty file(s) after the final construction"); nilfs_dispose_list(nilfs, &sci->sc_dirty_files, 1); } if (!list_empty(&sci->sc_iput_queue)) { nilfs_warning(sci->sc_super, __func__, - "iput queue is not empty\n"); + "iput queue is not empty"); nilfs_dispose_list(nilfs, &sci->sc_iput_queue, 1); } @@ -2810,7 +2823,7 @@ void nilfs_detach_log_writer(struct super_block *sb) if (!list_empty(&nilfs->ns_dirty_files)) { list_splice_init(&nilfs->ns_dirty_files, &garbage_list); nilfs_warning(sb, __func__, - "Hit dirty file after stopped log writer\n"); + "Hit dirty file after stopped log writer"); } spin_unlock(&nilfs->ns_inode_lock); up_write(&nilfs->ns_segctor_sem); diff --git a/fs/nilfs2/segment.h b/fs/nilfs2/segment.h index 0408b9b2814b..6565c10b7b76 100644 --- a/fs/nilfs2/segment.h +++ b/fs/nilfs2/segment.h @@ -13,11 +13,7 @@ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA - * - * Written by Ryusuke Konishi <ryusuke@osrg.net> + * Written by Ryusuke Konishi. * */ #ifndef _NILFS_SEGMENT_H @@ -75,7 +71,7 @@ struct nilfs_recovery_info { */ struct nilfs_cstage { int scnt; - unsigned flags; + unsigned int flags; struct nilfs_inode_info *dirty_file_ptr; struct nilfs_inode_info *gc_inode_ptr; }; @@ -84,7 +80,7 @@ struct nilfs_segment_buffer; struct nilfs_segsum_pointer { struct buffer_head *bh; - unsigned offset; /* offset in bytes */ + unsigned int offset; /* offset in bytes */ }; /** @@ -193,11 +189,15 @@ enum { NILFS_SC_DIRTY, /* One or more dirty meta-data blocks exist */ NILFS_SC_UNCLOSED, /* Logical segment is not closed */ NILFS_SC_SUPER_ROOT, /* The latest segment has a super root */ - NILFS_SC_PRIOR_FLUSH, /* Requesting immediate flush without making a - checkpoint */ - NILFS_SC_HAVE_DELTA, /* Next checkpoint will have update of files - other than DAT, cpfile, sufile, or files - moved by GC */ + NILFS_SC_PRIOR_FLUSH, /* + * Requesting immediate flush without making a + * checkpoint + */ + NILFS_SC_HAVE_DELTA, /* + * Next checkpoint will have update of files + * other than DAT, cpfile, sufile, or files + * moved by GC. + */ }; /* sc_state */ @@ -207,17 +207,23 @@ enum { /* * Constant parameters */ -#define NILFS_SC_CLEANUP_RETRY 3 /* Retry count of construction when - destroying segctord */ +#define NILFS_SC_CLEANUP_RETRY 3 /* + * Retry count of construction when + * destroying segctord + */ /* * Default values of timeout, in seconds. */ -#define NILFS_SC_DEFAULT_TIMEOUT 5 /* Timeout value of dirty blocks. - It triggers construction of a - logical segment with a super root */ -#define NILFS_SC_DEFAULT_SR_FREQ 30 /* Maximum frequency of super root - creation */ +#define NILFS_SC_DEFAULT_TIMEOUT 5 /* + * Timeout value of dirty blocks. + * It triggers construction of a + * logical segment with a super root. + */ +#define NILFS_SC_DEFAULT_SR_FREQ 30 /* + * Maximum frequency of super root + * creation + */ /* * The default threshold amount of data, in block counts. diff --git a/fs/nilfs2/sufile.c b/fs/nilfs2/sufile.c index 52821ffc11f4..1963595a1580 100644 --- a/fs/nilfs2/sufile.c +++ b/fs/nilfs2/sufile.c @@ -13,12 +13,8 @@ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA - * - * Written by Koji Sato <koji@osrg.net>. - * Revised by Ryusuke Konishi <ryusuke@osrg.net>. + * Written by Koji Sato. + * Revised by Ryusuke Konishi. */ #include <linux/kernel.h> @@ -61,6 +57,7 @@ static unsigned long nilfs_sufile_get_blkoff(const struct inode *sufile, __u64 segnum) { __u64 t = segnum + NILFS_MDT(sufile)->mi_first_entry_offset; + do_div(t, nilfs_sufile_segment_usages_per_block(sufile)); return (unsigned long)t; } @@ -69,6 +66,7 @@ static unsigned long nilfs_sufile_get_offset(const struct inode *sufile, __u64 segnum) { __u64 t = segnum + NILFS_MDT(sufile)->mi_first_entry_offset; + return do_div(t, nilfs_sufile_segment_usages_per_block(sufile)); } @@ -819,7 +817,7 @@ out: * %-ENOMEM - Insufficient amount of memory available. */ ssize_t nilfs_sufile_get_suinfo(struct inode *sufile, __u64 segnum, void *buf, - unsigned sisz, size_t nsi) + unsigned int sisz, size_t nsi) { struct buffer_head *su_bh; struct nilfs_segment_usage *su; @@ -897,7 +895,7 @@ ssize_t nilfs_sufile_get_suinfo(struct inode *sufile, __u64 segnum, void *buf, * %-EINVAL - Invalid values in input (segment number, flags or nblocks) */ ssize_t nilfs_sufile_set_suinfo(struct inode *sufile, void *buf, - unsigned supsz, size_t nsup) + unsigned int supsz, size_t nsup) { struct the_nilfs *nilfs = sufile->i_sb->s_fs_info; struct buffer_head *header_bh, *bh; diff --git a/fs/nilfs2/sufile.h b/fs/nilfs2/sufile.h index b8afd72f2379..46e89872294c 100644 --- a/fs/nilfs2/sufile.h +++ b/fs/nilfs2/sufile.h @@ -13,11 +13,7 @@ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA - * - * Written by Koji Sato <koji@osrg.net>. + * Written by Koji Sato. */ #ifndef _NILFS_SUFILE_H @@ -42,9 +38,9 @@ int nilfs_sufile_mark_dirty(struct inode *sufile, __u64 segnum); int nilfs_sufile_set_segment_usage(struct inode *sufile, __u64 segnum, unsigned long nblocks, time_t modtime); int nilfs_sufile_get_stat(struct inode *, struct nilfs_sustat *); -ssize_t nilfs_sufile_get_suinfo(struct inode *, __u64, void *, unsigned, +ssize_t nilfs_sufile_get_suinfo(struct inode *, __u64, void *, unsigned int, size_t); -ssize_t nilfs_sufile_set_suinfo(struct inode *, void *, unsigned , size_t); +ssize_t nilfs_sufile_set_suinfo(struct inode *, void *, unsigned int, size_t); int nilfs_sufile_updatev(struct inode *, __u64 *, size_t, int, size_t *, void (*dofunc)(struct inode *, __u64, diff --git a/fs/nilfs2/super.c b/fs/nilfs2/super.c index 7f5d3d9f1c37..666107a18a22 100644 --- a/fs/nilfs2/super.c +++ b/fs/nilfs2/super.c @@ -13,11 +13,7 @@ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA - * - * Written by Ryusuke Konishi <ryusuke@osrg.net> + * Written by Ryusuke Konishi. */ /* * linux/fs/ext2/super.c @@ -173,12 +169,10 @@ struct inode *nilfs_alloc_inode(struct super_block *sb) static void nilfs_i_callback(struct rcu_head *head) { struct inode *inode = container_of(head, struct inode, i_rcu); - struct nilfs_mdt_info *mdi = NILFS_MDT(inode); - if (mdi) { - kfree(mdi->mi_bgl); /* kfree(NULL) is safe */ - kfree(mdi); - } + if (nilfs_is_metadata_file_inode(inode)) + nilfs_mdt_destroy(inode); + kmem_cache_free(nilfs_inode_cachep, NILFS_I(inode)); } @@ -279,7 +273,7 @@ struct nilfs_super_block **nilfs_prepare_super(struct super_block *sb, } } else if (sbp[1] && sbp[1]->s_magic != cpu_to_le16(NILFS_SUPER_MAGIC)) { - memcpy(sbp[1], sbp[0], nilfs->ns_sbsize); + memcpy(sbp[1], sbp[0], nilfs->ns_sbsize); } if (flip && sbp[1]) @@ -749,6 +743,7 @@ static int parse_options(char *options, struct super_block *sb, int is_remount) while ((p = strsep(&options, ",")) != NULL) { int token; + if (!*p) continue; @@ -891,7 +886,7 @@ int nilfs_store_magic_and_option(struct super_block *sb, nilfs->ns_interval = le32_to_cpu(sbp->s_c_interval); nilfs->ns_watermark = le32_to_cpu(sbp->s_c_block_max); - return !parse_options(data, sb, 0) ? -EINVAL : 0 ; + return !parse_options(data, sb, 0) ? -EINVAL : 0; } int nilfs_check_feature_compatibility(struct super_block *sb, @@ -1316,7 +1311,7 @@ nilfs_mount(struct file_system_type *fs_type, int flags, } if (!s->s_root) { - s_new = true; + s_new = true; /* New superblock instance created */ s->s_mode = mode; diff --git a/fs/nilfs2/sysfs.c b/fs/nilfs2/sysfs.c index bbb0dcc35905..8ffa42b704d8 100644 --- a/fs/nilfs2/sysfs.c +++ b/fs/nilfs2/sysfs.c @@ -68,7 +68,7 @@ static ssize_t nilfs_##name##_attr_store(struct kobject *kobj, \ static const struct sysfs_ops nilfs_##name##_attr_ops = { \ .show = nilfs_##name##_attr_show, \ .store = nilfs_##name##_attr_store, \ -}; +} #define NILFS_DEV_INT_GROUP_TYPE(name, parent_name) \ static void nilfs_##name##_attr_release(struct kobject *kobj) \ @@ -84,7 +84,7 @@ static struct kobj_type nilfs_##name##_ktype = { \ .default_attrs = nilfs_##name##_attrs, \ .sysfs_ops = &nilfs_##name##_attr_ops, \ .release = nilfs_##name##_attr_release, \ -}; +} #define NILFS_DEV_INT_GROUP_FNS(name, parent_name) \ static int nilfs_sysfs_create_##name##_group(struct the_nilfs *nilfs) \ @@ -756,7 +756,7 @@ nilfs_superblock_sb_write_count_show(struct nilfs_superblock_attr *attr, struct the_nilfs *nilfs, char *buf) { - unsigned sbwcount; + unsigned int sbwcount; down_read(&nilfs->ns_sem); sbwcount = nilfs->ns_sbwcount; @@ -770,7 +770,7 @@ nilfs_superblock_sb_update_frequency_show(struct nilfs_superblock_attr *attr, struct the_nilfs *nilfs, char *buf) { - unsigned sb_update_freq; + unsigned int sb_update_freq; down_read(&nilfs->ns_sem); sb_update_freq = nilfs->ns_sb_update_freq; @@ -784,7 +784,7 @@ nilfs_superblock_sb_update_frequency_store(struct nilfs_superblock_attr *attr, struct the_nilfs *nilfs, const char *buf, size_t count) { - unsigned val; + unsigned int val; int err; err = kstrtouint(skip_spaces(buf), 0, &val); diff --git a/fs/nilfs2/sysfs.h b/fs/nilfs2/sysfs.h index 677e3a1a8370..648cedf9c06e 100644 --- a/fs/nilfs2/sysfs.h +++ b/fs/nilfs2/sysfs.h @@ -66,7 +66,7 @@ struct nilfs_##name##_attr { \ char *); \ ssize_t (*store)(struct kobject *, struct attribute *, \ const char *, size_t); \ -}; +} NILFS_COMMON_ATTR_STRUCT(feature); @@ -77,7 +77,7 @@ struct nilfs_##name##_attr { \ char *); \ ssize_t (*store)(struct nilfs_##name##_attr *, struct the_nilfs *, \ const char *, size_t); \ -}; +} NILFS_DEV_ATTR_STRUCT(dev); NILFS_DEV_ATTR_STRUCT(segments); @@ -93,7 +93,7 @@ struct nilfs_##name##_attr { \ char *); \ ssize_t (*store)(struct nilfs_##name##_attr *, struct nilfs_root *, \ const char *, size_t); \ -}; +} NILFS_CP_ATTR_STRUCT(snapshot); diff --git a/fs/nilfs2/the_nilfs.c b/fs/nilfs2/the_nilfs.c index 69bd801afb53..809bd2de7ad0 100644 --- a/fs/nilfs2/the_nilfs.c +++ b/fs/nilfs2/the_nilfs.c @@ -13,11 +13,7 @@ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA - * - * Written by Ryusuke Konishi <ryusuke@osrg.net> + * Written by Ryusuke Konishi. * */ @@ -112,8 +108,8 @@ static int nilfs_load_super_root(struct the_nilfs *nilfs, struct nilfs_super_root *raw_sr; struct nilfs_super_block **sbp = nilfs->ns_sbp; struct nilfs_inode *rawi; - unsigned dat_entry_size, segment_usage_size, checkpoint_size; - unsigned inode_size; + unsigned int dat_entry_size, segment_usage_size, checkpoint_size; + unsigned int inode_size; int err; err = nilfs_read_super_root_block(nilfs, sr_block, &bh_sr, 1); @@ -621,8 +617,10 @@ int init_nilfs(struct the_nilfs *nilfs, struct super_block *sb, char *data) err = nilfs_load_super_block(nilfs, sb, blocksize, &sbp); if (err) goto out; - /* not failed_sbh; sbh is released automatically - when reloading fails. */ + /* + * Not to failed_sbh; sbh is released automatically + * when reloading fails. + */ } nilfs->ns_blocksize_bits = sb->s_blocksize_bits; nilfs->ns_blocksize = blocksize; diff --git a/fs/nilfs2/the_nilfs.h b/fs/nilfs2/the_nilfs.h index 23778d385836..79369fd6b13b 100644 --- a/fs/nilfs2/the_nilfs.h +++ b/fs/nilfs2/the_nilfs.h @@ -13,11 +13,7 @@ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA - * - * Written by Ryusuke Konishi <ryusuke@osrg.net> + * Written by Ryusuke Konishi. * */ @@ -118,10 +114,10 @@ struct the_nilfs { struct buffer_head *ns_sbh[2]; struct nilfs_super_block *ns_sbp[2]; time_t ns_sbwtime; - unsigned ns_sbwcount; - unsigned ns_sbsize; - unsigned ns_mount_state; - unsigned ns_sb_update_freq; + unsigned int ns_sbwcount; + unsigned int ns_sbsize; + unsigned int ns_mount_state; + unsigned int ns_sb_update_freq; /* * Following fields are dedicated to a writable FS-instance. @@ -226,15 +222,14 @@ THE_NILFS_FNS(SB_DIRTY, sb_dirty) * Mount option operations */ #define nilfs_clear_opt(nilfs, opt) \ - do { (nilfs)->ns_mount_opt &= ~NILFS_MOUNT_##opt; } while (0) + ((nilfs)->ns_mount_opt &= ~NILFS_MOUNT_##opt) #define nilfs_set_opt(nilfs, opt) \ - do { (nilfs)->ns_mount_opt |= NILFS_MOUNT_##opt; } while (0) + ((nilfs)->ns_mount_opt |= NILFS_MOUNT_##opt) #define nilfs_test_opt(nilfs, opt) ((nilfs)->ns_mount_opt & NILFS_MOUNT_##opt) #define nilfs_write_opt(nilfs, mask, opt) \ - do { (nilfs)->ns_mount_opt = \ + ((nilfs)->ns_mount_opt = \ (((nilfs)->ns_mount_opt & ~NILFS_MOUNT_##mask) | \ - NILFS_MOUNT_##opt); \ - } while (0) + NILFS_MOUNT_##opt)) \ /** * struct nilfs_root - nilfs root object @@ -273,6 +268,7 @@ struct nilfs_root { static inline int nilfs_sb_need_update(struct the_nilfs *nilfs) { u64 t = get_seconds(); + return t < nilfs->ns_sbwtime || t > nilfs->ns_sbwtime + nilfs->ns_sb_update_freq; } @@ -280,6 +276,7 @@ static inline int nilfs_sb_need_update(struct the_nilfs *nilfs) static inline int nilfs_sb_will_flip(struct the_nilfs *nilfs) { int flip_bits = nilfs->ns_sbwcount & 0x0FL; + return (flip_bits != 0x08 && flip_bits != 0x0F); } @@ -308,7 +305,7 @@ static inline void nilfs_get_root(struct nilfs_root *root) static inline int nilfs_valid_fs(struct the_nilfs *nilfs) { - unsigned valid_fs; + unsigned int valid_fs; down_read(&nilfs->ns_sem); valid_fs = (nilfs->ns_mount_state & NILFS_VALID_FS); diff --git a/fs/notify/fsnotify.h b/fs/notify/fsnotify.h index b44c68a857e7..0a3bc2cf192c 100644 --- a/fs/notify/fsnotify.h +++ b/fs/notify/fsnotify.h @@ -56,6 +56,13 @@ static inline void fsnotify_clear_marks_by_mount(struct vfsmount *mnt) fsnotify_destroy_marks(&real_mount(mnt)->mnt_fsnotify_marks, &mnt->mnt_root->d_lock); } +/* prepare for freeing all marks associated with given group */ +extern void fsnotify_detach_group_marks(struct fsnotify_group *group); +/* + * wait for fsnotify_mark_srcu period to end and free all marks in destroy_list + */ +extern void fsnotify_mark_destroy_list(void); + /* * update the dentry->d_flags of all of inode's children to indicate if inode cares * about events that happen to its children. diff --git a/fs/notify/group.c b/fs/notify/group.c index d16b62cb2854..3e2dd85be5dd 100644 --- a/fs/notify/group.c +++ b/fs/notify/group.c @@ -47,12 +47,21 @@ static void fsnotify_final_destroy_group(struct fsnotify_group *group) */ void fsnotify_destroy_group(struct fsnotify_group *group) { - /* clear all inode marks for this group */ - fsnotify_clear_marks_by_group(group); + /* clear all inode marks for this group, attach them to destroy_list */ + fsnotify_detach_group_marks(group); - synchronize_srcu(&fsnotify_mark_srcu); + /* + * Wait for fsnotify_mark_srcu period to end and free all marks in + * destroy_list + */ + fsnotify_mark_destroy_list(); - /* clear the notification queue of all events */ + /* + * Since we have waited for fsnotify_mark_srcu in + * fsnotify_mark_destroy_list() there can be no outstanding event + * notification against this group. So clearing the notification queue + * of all events is reliable now. + */ fsnotify_flush_notify(group); /* diff --git a/fs/notify/mark.c b/fs/notify/mark.c index 7115c5d7d373..d3fea0bd89e2 100644 --- a/fs/notify/mark.c +++ b/fs/notify/mark.c @@ -97,8 +97,8 @@ struct srcu_struct fsnotify_mark_srcu; static DEFINE_SPINLOCK(destroy_lock); static LIST_HEAD(destroy_list); -static void fsnotify_mark_destroy(struct work_struct *work); -static DECLARE_DELAYED_WORK(reaper_work, fsnotify_mark_destroy); +static void fsnotify_mark_destroy_workfn(struct work_struct *work); +static DECLARE_DELAYED_WORK(reaper_work, fsnotify_mark_destroy_workfn); void fsnotify_get_mark(struct fsnotify_mark *mark) { @@ -173,11 +173,15 @@ void fsnotify_detach_mark(struct fsnotify_mark *mark) } /* - * Free fsnotify mark. The freeing is actually happening from a kthread which - * first waits for srcu period end. Caller must have a reference to the mark - * or be protected by fsnotify_mark_srcu. + * Prepare mark for freeing and add it to the list of marks prepared for + * freeing. The actual freeing must happen after SRCU period ends and the + * caller is responsible for this. + * + * The function returns true if the mark was added to the list of marks for + * freeing. The function returns false if someone else has already called + * __fsnotify_free_mark() for the mark. */ -void fsnotify_free_mark(struct fsnotify_mark *mark) +static bool __fsnotify_free_mark(struct fsnotify_mark *mark) { struct fsnotify_group *group = mark->group; @@ -185,17 +189,11 @@ void fsnotify_free_mark(struct fsnotify_mark *mark) /* something else already called this function on this mark */ if (!(mark->flags & FSNOTIFY_MARK_FLAG_ALIVE)) { spin_unlock(&mark->lock); - return; + return false; } mark->flags &= ~FSNOTIFY_MARK_FLAG_ALIVE; spin_unlock(&mark->lock); - spin_lock(&destroy_lock); - list_add(&mark->g_list, &destroy_list); - spin_unlock(&destroy_lock); - queue_delayed_work(system_unbound_wq, &reaper_work, - FSNOTIFY_REAPER_DELAY); - /* * Some groups like to know that marks are being freed. This is a * callback to the group function to let it know that this mark @@ -203,6 +201,25 @@ void fsnotify_free_mark(struct fsnotify_mark *mark) */ if (group->ops->freeing_mark) group->ops->freeing_mark(mark, group); + + spin_lock(&destroy_lock); + list_add(&mark->g_list, &destroy_list); + spin_unlock(&destroy_lock); + + return true; +} + +/* + * Free fsnotify mark. The freeing is actually happening from a workqueue which + * first waits for srcu period end. Caller must have a reference to the mark + * or be protected by fsnotify_mark_srcu. + */ +void fsnotify_free_mark(struct fsnotify_mark *mark) +{ + if (__fsnotify_free_mark(mark)) { + queue_delayed_work(system_unbound_wq, &reaper_work, + FSNOTIFY_REAPER_DELAY); + } } void fsnotify_destroy_mark(struct fsnotify_mark *mark, @@ -468,11 +485,29 @@ void fsnotify_clear_marks_by_group_flags(struct fsnotify_group *group, } /* - * Given a group, destroy all of the marks associated with that group. + * Given a group, prepare for freeing all the marks associated with that group. + * The marks are attached to the list of marks prepared for destruction, the + * caller is responsible for freeing marks in that list after SRCU period has + * ended. */ -void fsnotify_clear_marks_by_group(struct fsnotify_group *group) +void fsnotify_detach_group_marks(struct fsnotify_group *group) { - fsnotify_clear_marks_by_group_flags(group, (unsigned int)-1); + struct fsnotify_mark *mark; + + while (1) { + mutex_lock_nested(&group->mark_mutex, SINGLE_DEPTH_NESTING); + if (list_empty(&group->marks_list)) { + mutex_unlock(&group->mark_mutex); + break; + } + mark = list_first_entry(&group->marks_list, + struct fsnotify_mark, g_list); + fsnotify_get_mark(mark); + fsnotify_detach_mark(mark); + mutex_unlock(&group->mark_mutex); + __fsnotify_free_mark(mark); + fsnotify_put_mark(mark); + } } void fsnotify_duplicate_mark(struct fsnotify_mark *new, struct fsnotify_mark *old) @@ -499,7 +534,11 @@ void fsnotify_init_mark(struct fsnotify_mark *mark, mark->free_mark = free_mark; } -static void fsnotify_mark_destroy(struct work_struct *work) +/* + * Destroy all marks in destroy_list, waits for SRCU period to finish before + * actually freeing marks. + */ +void fsnotify_mark_destroy_list(void) { struct fsnotify_mark *mark, *next; struct list_head private_destroy_list; @@ -516,3 +555,8 @@ static void fsnotify_mark_destroy(struct work_struct *work) fsnotify_put_mark(mark); } } + +static void fsnotify_mark_destroy_workfn(struct work_struct *work) +{ + fsnotify_mark_destroy_list(); +} diff --git a/fs/ntfs/file.c b/fs/ntfs/file.c index 91117ada8528..5622ed5a201e 100644 --- a/fs/ntfs/file.c +++ b/fs/ntfs/file.c @@ -1952,12 +1952,9 @@ static ssize_t ntfs_file_write_iter(struct kiocb *iocb, struct iov_iter *from) written = ntfs_perform_write(file, from, iocb->ki_pos); current->backing_dev_info = NULL; inode_unlock(vi); - if (likely(written > 0)) { - err = generic_write_sync(file, iocb->ki_pos, written); - if (err < 0) - written = 0; - } iocb->ki_pos += written; + if (likely(written > 0)) + written = generic_write_sync(iocb, written); return written ? written : err; } diff --git a/fs/ocfs2/acl.c b/fs/ocfs2/acl.c index 0cdf497c91ef..2162434728c0 100644 --- a/fs/ocfs2/acl.c +++ b/fs/ocfs2/acl.c @@ -322,3 +322,90 @@ struct posix_acl *ocfs2_iop_get_acl(struct inode *inode, int type) brelse(di_bh); return acl; } + +int ocfs2_acl_chmod(struct inode *inode, struct buffer_head *bh) +{ + struct ocfs2_super *osb = OCFS2_SB(inode->i_sb); + struct posix_acl *acl; + int ret; + + if (S_ISLNK(inode->i_mode)) + return -EOPNOTSUPP; + + if (!(osb->s_mount_opt & OCFS2_MOUNT_POSIX_ACL)) + return 0; + + acl = ocfs2_get_acl_nolock(inode, ACL_TYPE_ACCESS, bh); + if (IS_ERR(acl) || !acl) + return PTR_ERR(acl); + ret = __posix_acl_chmod(&acl, GFP_KERNEL, inode->i_mode); + if (ret) + return ret; + ret = ocfs2_set_acl(NULL, inode, NULL, ACL_TYPE_ACCESS, + acl, NULL, NULL); + posix_acl_release(acl); + return ret; +} + +/* + * Initialize the ACLs of a new inode. If parent directory has default ACL, + * then clone to new inode. Called from ocfs2_mknod. + */ +int ocfs2_init_acl(handle_t *handle, + struct inode *inode, + struct inode *dir, + struct buffer_head *di_bh, + struct buffer_head *dir_bh, + struct ocfs2_alloc_context *meta_ac, + struct ocfs2_alloc_context *data_ac) +{ + struct ocfs2_super *osb = OCFS2_SB(inode->i_sb); + struct posix_acl *acl = NULL; + int ret = 0, ret2; + umode_t mode; + + if (!S_ISLNK(inode->i_mode)) { + if (osb->s_mount_opt & OCFS2_MOUNT_POSIX_ACL) { + acl = ocfs2_get_acl_nolock(dir, ACL_TYPE_DEFAULT, + dir_bh); + if (IS_ERR(acl)) + return PTR_ERR(acl); + } + if (!acl) { + mode = inode->i_mode & ~current_umask(); + ret = ocfs2_acl_set_mode(inode, di_bh, handle, mode); + if (ret) { + mlog_errno(ret); + goto cleanup; + } + } + } + if ((osb->s_mount_opt & OCFS2_MOUNT_POSIX_ACL) && acl) { + if (S_ISDIR(inode->i_mode)) { + ret = ocfs2_set_acl(handle, inode, di_bh, + ACL_TYPE_DEFAULT, acl, + meta_ac, data_ac); + if (ret) + goto cleanup; + } + mode = inode->i_mode; + ret = __posix_acl_create(&acl, GFP_NOFS, &mode); + if (ret < 0) + return ret; + + ret2 = ocfs2_acl_set_mode(inode, di_bh, handle, mode); + if (ret2) { + mlog_errno(ret2); + ret = ret2; + goto cleanup; + } + if (ret > 0) { + ret = ocfs2_set_acl(handle, inode, + di_bh, ACL_TYPE_ACCESS, + acl, meta_ac, data_ac); + } + } +cleanup: + posix_acl_release(acl); + return ret; +} diff --git a/fs/ocfs2/acl.h b/fs/ocfs2/acl.h index 3fce68d08625..2783a75b3999 100644 --- a/fs/ocfs2/acl.h +++ b/fs/ocfs2/acl.h @@ -35,5 +35,10 @@ int ocfs2_set_acl(handle_t *handle, struct posix_acl *acl, struct ocfs2_alloc_context *meta_ac, struct ocfs2_alloc_context *data_ac); +extern int ocfs2_acl_chmod(struct inode *, struct buffer_head *); +extern int ocfs2_init_acl(handle_t *, struct inode *, struct inode *, + struct buffer_head *, struct buffer_head *, + struct ocfs2_alloc_context *, + struct ocfs2_alloc_context *); #endif /* OCFS2_ACL_H */ diff --git a/fs/ocfs2/alloc.c b/fs/ocfs2/alloc.c index e361d1a0ca09..460c0cedab3a 100644 --- a/fs/ocfs2/alloc.c +++ b/fs/ocfs2/alloc.c @@ -5351,7 +5351,7 @@ static int ocfs2_truncate_rec(handle_t *handle, { int ret; u32 left_cpos, rec_range, trunc_range; - int wants_rotate = 0, is_rightmost_tree_rec = 0; + int is_rightmost_tree_rec = 0; struct super_block *sb = ocfs2_metadata_cache_get_super(et->et_ci); struct ocfs2_path *left_path = NULL; struct ocfs2_extent_list *el = path_leaf_el(path); @@ -5457,7 +5457,6 @@ static int ocfs2_truncate_rec(handle_t *handle, memset(rec, 0, sizeof(*rec)); ocfs2_cleanup_merge(el, index); - wants_rotate = 1; next_free = le16_to_cpu(el->l_next_free_rec); if (is_rightmost_tree_rec && next_free > 1) { diff --git a/fs/ocfs2/aops.c b/fs/ocfs2/aops.c index ad1577348a92..c034edf3ef38 100644 --- a/fs/ocfs2/aops.c +++ b/fs/ocfs2/aops.c @@ -2311,7 +2311,7 @@ static void ocfs2_dio_end_io_write(struct inode *inode, /* ocfs2_file_write_iter will get i_mutex, so we need not lock if we * are in that context. */ if (dwc->dw_writer_pid != task_pid_nr(current)) { - mutex_lock(&inode->i_mutex); + inode_lock(inode); locked = 1; } @@ -2390,7 +2390,7 @@ out: ocfs2_free_alloc_context(meta_ac); ocfs2_run_deallocs(osb, &dealloc); if (locked) - mutex_unlock(&inode->i_mutex); + inode_unlock(inode); ocfs2_dio_free_write_ctx(inode, dwc); } @@ -2423,13 +2423,11 @@ static int ocfs2_dio_end_io(struct kiocb *iocb, return 0; } -static ssize_t ocfs2_direct_IO(struct kiocb *iocb, struct iov_iter *iter, - loff_t offset) +static ssize_t ocfs2_direct_IO(struct kiocb *iocb, struct iov_iter *iter) { struct file *file = iocb->ki_filp; struct inode *inode = file_inode(file)->i_mapping->host; struct ocfs2_super *osb = OCFS2_SB(inode->i_sb); - loff_t end = offset + iter->count; get_block_t *get_block; /* @@ -2440,7 +2438,8 @@ static ssize_t ocfs2_direct_IO(struct kiocb *iocb, struct iov_iter *iter, return 0; /* Fallback to buffered I/O if we do not support append dio. */ - if (end > i_size_read(inode) && !ocfs2_supports_append_dio(osb)) + if (iocb->ki_pos + iter->count > i_size_read(inode) && + !ocfs2_supports_append_dio(osb)) return 0; if (iov_iter_rw(iter) == READ) @@ -2449,7 +2448,7 @@ static ssize_t ocfs2_direct_IO(struct kiocb *iocb, struct iov_iter *iter, get_block = ocfs2_dio_get_block; return __blockdev_direct_IO(iocb, inode, inode->i_sb->s_bdev, - iter, offset, get_block, + iter, get_block, ocfs2_dio_end_io, NULL, 0); } diff --git a/fs/ocfs2/cluster/heartbeat.c b/fs/ocfs2/cluster/heartbeat.c index 1934abb6b680..a8d15beee5cb 100644 --- a/fs/ocfs2/cluster/heartbeat.c +++ b/fs/ocfs2/cluster/heartbeat.c @@ -1456,7 +1456,6 @@ static void o2hb_region_release(struct config_item *item) static int o2hb_read_block_input(struct o2hb_region *reg, const char *page, - size_t count, unsigned long *ret_bytes, unsigned int *ret_bits) { @@ -1499,8 +1498,8 @@ static ssize_t o2hb_region_block_bytes_store(struct config_item *item, if (reg->hr_bdev) return -EINVAL; - status = o2hb_read_block_input(reg, page, count, - &block_bytes, &block_bits); + status = o2hb_read_block_input(reg, page, &block_bytes, + &block_bits); if (status) return status; diff --git a/fs/ocfs2/cluster/tcp.c b/fs/ocfs2/cluster/tcp.c index 2d0acd6678fe..4238eb28889f 100644 --- a/fs/ocfs2/cluster/tcp.c +++ b/fs/ocfs2/cluster/tcp.c @@ -600,10 +600,11 @@ static void o2net_set_nn_state(struct o2net_node *nn, static void o2net_data_ready(struct sock *sk) { void (*ready)(struct sock *sk); + struct o2net_sock_container *sc; - read_lock(&sk->sk_callback_lock); - if (sk->sk_user_data) { - struct o2net_sock_container *sc = sk->sk_user_data; + read_lock_bh(&sk->sk_callback_lock); + sc = sk->sk_user_data; + if (sc) { sclog(sc, "data_ready hit\n"); o2net_set_data_ready_time(sc); o2net_sc_queue_work(sc, &sc->sc_rx_work); @@ -611,7 +612,7 @@ static void o2net_data_ready(struct sock *sk) } else { ready = sk->sk_data_ready; } - read_unlock(&sk->sk_callback_lock); + read_unlock_bh(&sk->sk_callback_lock); ready(sk); } @@ -622,7 +623,7 @@ static void o2net_state_change(struct sock *sk) void (*state_change)(struct sock *sk); struct o2net_sock_container *sc; - read_lock(&sk->sk_callback_lock); + read_lock_bh(&sk->sk_callback_lock); sc = sk->sk_user_data; if (sc == NULL) { state_change = sk->sk_state_change; @@ -649,7 +650,7 @@ static void o2net_state_change(struct sock *sk) break; } out: - read_unlock(&sk->sk_callback_lock); + read_unlock_bh(&sk->sk_callback_lock); state_change(sk); } @@ -2012,7 +2013,7 @@ static void o2net_listen_data_ready(struct sock *sk) { void (*ready)(struct sock *sk); - read_lock(&sk->sk_callback_lock); + read_lock_bh(&sk->sk_callback_lock); ready = sk->sk_user_data; if (ready == NULL) { /* check for teardown race */ ready = sk->sk_data_ready; @@ -2039,7 +2040,7 @@ static void o2net_listen_data_ready(struct sock *sk) } out: - read_unlock(&sk->sk_callback_lock); + read_unlock_bh(&sk->sk_callback_lock); if (ready != NULL) ready(sk); } diff --git a/fs/ocfs2/dlm/dlmmaster.c b/fs/ocfs2/dlm/dlmmaster.c index 9aed6e202201..13719d3f35f8 100644 --- a/fs/ocfs2/dlm/dlmmaster.c +++ b/fs/ocfs2/dlm/dlmmaster.c @@ -2455,6 +2455,8 @@ int dlm_deref_lockres_done_handler(struct o2net_msg *msg, u32 len, void *data, spin_unlock(&dlm->spinlock); + ret = 0; + done: dlm_put(dlm); return ret; diff --git a/fs/ocfs2/dlmglue.c b/fs/ocfs2/dlmglue.c index 474e57f834e6..1eaa9100c889 100644 --- a/fs/ocfs2/dlmglue.c +++ b/fs/ocfs2/dlmglue.c @@ -54,6 +54,7 @@ #include "uptodate.h" #include "quota.h" #include "refcounttree.h" +#include "acl.h" #include "buffer_head_io.h" @@ -3623,6 +3624,8 @@ static int ocfs2_data_convert_worker(struct ocfs2_lock_res *lockres, filemap_fdatawait(mapping); } + forget_all_cached_acls(inode); + out: return UNBLOCK_CONTINUE; } diff --git a/fs/ocfs2/file.c b/fs/ocfs2/file.c index 5308841756be..4e7b0dc22450 100644 --- a/fs/ocfs2/file.c +++ b/fs/ocfs2/file.c @@ -1268,20 +1268,20 @@ bail_unlock_rw: if (size_change) ocfs2_rw_unlock(inode, 1); bail: - brelse(bh); /* Release quota pointers in case we acquired them */ for (qtype = 0; qtype < OCFS2_MAXQUOTAS; qtype++) dqput(transfer_to[qtype]); if (!status && attr->ia_valid & ATTR_MODE) { - status = posix_acl_chmod(inode, inode->i_mode); + status = ocfs2_acl_chmod(inode, bh); if (status < 0) mlog_errno(status); } if (inode_locked) ocfs2_inode_unlock(inode, 1); + brelse(bh); return status; } @@ -1290,7 +1290,7 @@ int ocfs2_getattr(struct vfsmount *mnt, struct kstat *stat) { struct inode *inode = d_inode(dentry); - struct super_block *sb = d_inode(dentry)->i_sb; + struct super_block *sb = dentry->d_sb; struct ocfs2_super *osb = sb->s_fs_info; int err; diff --git a/fs/ocfs2/inode.c b/fs/ocfs2/inode.c index 12f4a9e9800f..0748777f2e2a 100644 --- a/fs/ocfs2/inode.c +++ b/fs/ocfs2/inode.c @@ -262,7 +262,7 @@ static int ocfs2_init_locked_inode(struct inode *inode, void *opaque) inode->i_ino = args->fi_ino; OCFS2_I(inode)->ip_blkno = args->fi_blkno; if (args->fi_sysfile_type != 0) - lockdep_set_class(&inode->i_mutex, + lockdep_set_class(&inode->i_rwsem, &ocfs2_sysfile_lock_key[args->fi_sysfile_type]); if (args->fi_sysfile_type == USER_QUOTA_SYSTEM_INODE || args->fi_sysfile_type == GROUP_QUOTA_SYSTEM_INODE || diff --git a/fs/ocfs2/namei.c b/fs/ocfs2/namei.c index 6b3e87189a64..a8f1225e6d9b 100644 --- a/fs/ocfs2/namei.c +++ b/fs/ocfs2/namei.c @@ -259,7 +259,6 @@ static int ocfs2_mknod(struct inode *dir, struct ocfs2_dir_lookup_result lookup = { NULL, }; sigset_t oldset; int did_block_signals = 0; - struct posix_acl *default_acl = NULL, *acl = NULL; struct ocfs2_dentry_lock *dl = NULL; trace_ocfs2_mknod(dir, dentry, dentry->d_name.len, dentry->d_name.name, @@ -367,12 +366,6 @@ static int ocfs2_mknod(struct inode *dir, goto leave; } - status = posix_acl_create(dir, &inode->i_mode, &default_acl, &acl); - if (status) { - mlog_errno(status); - goto leave; - } - handle = ocfs2_start_trans(osb, ocfs2_mknod_credits(osb->sb, S_ISDIR(mode), xattr_credits)); @@ -421,16 +414,8 @@ static int ocfs2_mknod(struct inode *dir, inc_nlink(dir); } - if (default_acl) { - status = ocfs2_set_acl(handle, inode, new_fe_bh, - ACL_TYPE_DEFAULT, default_acl, - meta_ac, data_ac); - } - if (!status && acl) { - status = ocfs2_set_acl(handle, inode, new_fe_bh, - ACL_TYPE_ACCESS, acl, - meta_ac, data_ac); - } + status = ocfs2_init_acl(handle, inode, dir, new_fe_bh, parent_fe_bh, + meta_ac, data_ac); if (status < 0) { mlog_errno(status); @@ -472,10 +457,6 @@ static int ocfs2_mknod(struct inode *dir, d_instantiate(dentry, inode); status = 0; leave: - if (default_acl) - posix_acl_release(default_acl); - if (acl) - posix_acl_release(acl); if (status < 0 && did_quota_inode) dquot_free_inode(inode); if (handle) diff --git a/fs/ocfs2/ocfs2_fs.h b/fs/ocfs2/ocfs2_fs.h index 540ab5b75dbb..44d178b8d1aa 100644 --- a/fs/ocfs2/ocfs2_fs.h +++ b/fs/ocfs2/ocfs2_fs.h @@ -580,7 +580,7 @@ struct ocfs2_extended_slot { /*00*/ __u8 es_valid; __u8 es_reserved1[3]; __le32 es_node_num; -/*10*/ +/*08*/ }; /* diff --git a/fs/ocfs2/refcounttree.c b/fs/ocfs2/refcounttree.c index 744d5d90c363..92bbe93bfe10 100644 --- a/fs/ocfs2/refcounttree.c +++ b/fs/ocfs2/refcounttree.c @@ -4248,20 +4248,12 @@ static int ocfs2_reflink(struct dentry *old_dentry, struct inode *dir, struct inode *inode = d_inode(old_dentry); struct buffer_head *old_bh = NULL; struct inode *new_orphan_inode = NULL; - struct posix_acl *default_acl, *acl; - umode_t mode; if (!ocfs2_refcount_tree(OCFS2_SB(inode->i_sb))) return -EOPNOTSUPP; - mode = inode->i_mode; - error = posix_acl_create(dir, &mode, &default_acl, &acl); - if (error) { - mlog_errno(error); - return error; - } - error = ocfs2_create_inode_in_orphan(dir, mode, + error = ocfs2_create_inode_in_orphan(dir, inode->i_mode, &new_orphan_inode); if (error) { mlog_errno(error); @@ -4300,16 +4292,11 @@ static int ocfs2_reflink(struct dentry *old_dentry, struct inode *dir, /* If the security isn't preserved, we need to re-initialize them. */ if (!preserve) { error = ocfs2_init_security_and_acl(dir, new_orphan_inode, - &new_dentry->d_name, - default_acl, acl); + &new_dentry->d_name); if (error) mlog_errno(error); } out: - if (default_acl) - posix_acl_release(default_acl); - if (acl) - posix_acl_release(acl); if (!error) { error = ocfs2_mv_orphaned_inode_to_new(dir, new_orphan_inode, new_dentry); diff --git a/fs/ocfs2/slot_map.c b/fs/ocfs2/slot_map.c index 1e09592148ad..d7407994f308 100644 --- a/fs/ocfs2/slot_map.c +++ b/fs/ocfs2/slot_map.c @@ -535,12 +535,8 @@ void ocfs2_put_slot(struct ocfs2_super *osb) spin_unlock(&osb->osb_lock); status = ocfs2_update_disk_slot(osb, si, slot_num); - if (status < 0) { + if (status < 0) mlog_errno(status); - goto bail; - } -bail: ocfs2_free_slot_info(osb); } - diff --git a/fs/ocfs2/xattr.c b/fs/ocfs2/xattr.c index 7d3d979f57d9..ad16995c9e7a 100644 --- a/fs/ocfs2/xattr.c +++ b/fs/ocfs2/xattr.c @@ -7216,12 +7216,10 @@ out: */ int ocfs2_init_security_and_acl(struct inode *dir, struct inode *inode, - const struct qstr *qstr, - struct posix_acl *default_acl, - struct posix_acl *acl) + const struct qstr *qstr) { - struct buffer_head *dir_bh = NULL; int ret = 0; + struct buffer_head *dir_bh = NULL; ret = ocfs2_init_security_get(inode, dir, qstr, NULL); if (ret) { @@ -7234,11 +7232,9 @@ int ocfs2_init_security_and_acl(struct inode *dir, mlog_errno(ret); goto leave; } - - if (!ret && default_acl) - ret = ocfs2_iop_set_acl(inode, default_acl, ACL_TYPE_DEFAULT); - if (!ret && acl) - ret = ocfs2_iop_set_acl(inode, acl, ACL_TYPE_ACCESS); + ret = ocfs2_init_acl(NULL, inode, dir, NULL, dir_bh, NULL, NULL); + if (ret) + mlog_errno(ret); ocfs2_inode_unlock(dir, 0); brelse(dir_bh); @@ -7250,10 +7246,10 @@ leave: * 'security' attributes support */ static int ocfs2_xattr_security_get(const struct xattr_handler *handler, - struct dentry *dentry, const char *name, - void *buffer, size_t size) + struct dentry *unused, struct inode *inode, + const char *name, void *buffer, size_t size) { - return ocfs2_xattr_get(d_inode(dentry), OCFS2_XATTR_INDEX_SECURITY, + return ocfs2_xattr_get(inode, OCFS2_XATTR_INDEX_SECURITY, name, buffer, size); } @@ -7321,10 +7317,10 @@ const struct xattr_handler ocfs2_xattr_security_handler = { * 'trusted' attributes support */ static int ocfs2_xattr_trusted_get(const struct xattr_handler *handler, - struct dentry *dentry, const char *name, - void *buffer, size_t size) + struct dentry *unused, struct inode *inode, + const char *name, void *buffer, size_t size) { - return ocfs2_xattr_get(d_inode(dentry), OCFS2_XATTR_INDEX_TRUSTED, + return ocfs2_xattr_get(inode, OCFS2_XATTR_INDEX_TRUSTED, name, buffer, size); } @@ -7346,14 +7342,14 @@ const struct xattr_handler ocfs2_xattr_trusted_handler = { * 'user' attributes support */ static int ocfs2_xattr_user_get(const struct xattr_handler *handler, - struct dentry *dentry, const char *name, - void *buffer, size_t size) + struct dentry *unusde, struct inode *inode, + const char *name, void *buffer, size_t size) { - struct ocfs2_super *osb = OCFS2_SB(dentry->d_sb); + struct ocfs2_super *osb = OCFS2_SB(inode->i_sb); if (osb->s_mount_opt & OCFS2_MOUNT_NOUSERXATTR) return -EOPNOTSUPP; - return ocfs2_xattr_get(d_inode(dentry), OCFS2_XATTR_INDEX_USER, name, + return ocfs2_xattr_get(inode, OCFS2_XATTR_INDEX_USER, name, buffer, size); } diff --git a/fs/ocfs2/xattr.h b/fs/ocfs2/xattr.h index f10d5b93c366..1633cc15ea1f 100644 --- a/fs/ocfs2/xattr.h +++ b/fs/ocfs2/xattr.h @@ -94,7 +94,5 @@ int ocfs2_reflink_xattrs(struct inode *old_inode, bool preserve_security); int ocfs2_init_security_and_acl(struct inode *dir, struct inode *inode, - const struct qstr *qstr, - struct posix_acl *default_acl, - struct posix_acl *acl); + const struct qstr *qstr); #endif /* OCFS2_XATTR_H */ diff --git a/fs/omfs/dir.c b/fs/omfs/dir.c index f833bf8d5792..c8cbf3b60645 100644 --- a/fs/omfs/dir.c +++ b/fs/omfs/dir.c @@ -452,6 +452,6 @@ const struct inode_operations omfs_dir_inops = { const struct file_operations omfs_dir_operations = { .read = generic_read_dir, - .iterate = omfs_readdir, + .iterate_shared = omfs_readdir, .llseek = generic_file_llseek, }; diff --git a/fs/open.c b/fs/open.c index 17cb6b1dab75..93ae3cdee4ab 100644 --- a/fs/open.c +++ b/fs/open.c @@ -65,7 +65,7 @@ int do_truncate(struct dentry *dentry, loff_t length, unsigned int time_attrs, return ret; } -long vfs_truncate(struct path *path, loff_t length) +long vfs_truncate(const struct path *path, loff_t length) { struct inode *inode; long error; @@ -499,7 +499,7 @@ out: return error; } -static int chmod_common(struct path *path, umode_t mode) +static int chmod_common(const struct path *path, umode_t mode) { struct inode *inode = path->dentry->d_inode; struct inode *delegated_inode = NULL; @@ -564,7 +564,7 @@ SYSCALL_DEFINE2(chmod, const char __user *, filename, umode_t, mode) return sys_fchmodat(AT_FDCWD, filename, mode); } -static int chown_common(struct path *path, uid_t user, gid_t group) +static int chown_common(const struct path *path, uid_t user, gid_t group) { struct inode *inode = path->dentry->d_inode; struct inode *delegated_inode = NULL; @@ -713,7 +713,7 @@ static int do_dentry_open(struct file *f, } /* POSIX.1-2008/SUSv4 Section XSI 2.9.7 */ - if (S_ISREG(inode->i_mode)) + if (S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode)) f->f_mode |= FMODE_ATOMIC_POS; f->f_op = fops_get(inode->i_fop); @@ -840,16 +840,12 @@ EXPORT_SYMBOL(file_path); int vfs_open(const struct path *path, struct file *file, const struct cred *cred) { - struct dentry *dentry = path->dentry; - struct inode *inode = dentry->d_inode; + struct inode *inode = vfs_select_inode(path->dentry, file->f_flags); - file->f_path = *path; - if (dentry->d_flags & DCACHE_OP_SELECT_INODE) { - inode = dentry->d_op->d_select_inode(dentry, file->f_flags); - if (IS_ERR(inode)) - return PTR_ERR(inode); - } + if (IS_ERR(inode)) + return PTR_ERR(inode); + file->f_path = *path; return do_dentry_open(file, inode, NULL, cred); } diff --git a/fs/openpromfs/inode.c b/fs/openpromfs/inode.c index b61b883c8ff8..c7a86993d97e 100644 --- a/fs/openpromfs/inode.c +++ b/fs/openpromfs/inode.c @@ -166,7 +166,7 @@ static int openpromfs_readdir(struct file *, struct dir_context *); static const struct file_operations openprom_operations = { .read = generic_read_dir, - .iterate = openpromfs_readdir, + .iterate_shared = openpromfs_readdir, .llseek = generic_file_llseek, }; diff --git a/fs/orangefs/file.c b/fs/orangefs/file.c index ae92795ed965..491e82c6f705 100644 --- a/fs/orangefs/file.c +++ b/fs/orangefs/file.c @@ -445,7 +445,7 @@ static ssize_t orangefs_file_write_iter(struct kiocb *iocb, struct iov_iter *ite gossip_debug(GOSSIP_FILE_DEBUG, "orangefs_file_write_iter\n"); - mutex_lock(&file->f_mapping->host->i_mutex); + inode_lock(file->f_mapping->host); /* Make sure generic_write_checks sees an up to date inode size. */ if (file->f_flags & O_APPEND) { @@ -492,7 +492,7 @@ static ssize_t orangefs_file_write_iter(struct kiocb *iocb, struct iov_iter *ite out: - mutex_unlock(&file->f_mapping->host->i_mutex); + inode_unlock(file->f_mapping->host); return rc; } diff --git a/fs/orangefs/orangefs-kernel.h b/fs/orangefs/orangefs-kernel.h index a9925e296ceb..2281882f718e 100644 --- a/fs/orangefs/orangefs-kernel.h +++ b/fs/orangefs/orangefs-kernel.h @@ -612,11 +612,11 @@ do { \ static inline void orangefs_i_size_write(struct inode *inode, loff_t i_size) { #if BITS_PER_LONG == 32 && defined(CONFIG_SMP) - mutex_lock(&inode->i_mutex); + inode_lock(inode); #endif i_size_write(inode, i_size); #if BITS_PER_LONG == 32 && defined(CONFIG_SMP) - mutex_unlock(&inode->i_mutex); + inode_unlock(inode); #endif } diff --git a/fs/orangefs/xattr.c b/fs/orangefs/xattr.c index 63a6280d8c3a..99c19545752c 100644 --- a/fs/orangefs/xattr.c +++ b/fs/orangefs/xattr.c @@ -463,12 +463,13 @@ static int orangefs_xattr_set_default(const struct xattr_handler *handler, } static int orangefs_xattr_get_default(const struct xattr_handler *handler, - struct dentry *dentry, + struct dentry *unused, + struct inode *inode, const char *name, void *buffer, size_t size) { - return orangefs_inode_getxattr(dentry->d_inode, + return orangefs_inode_getxattr(inode, ORANGEFS_XATTR_NAME_DEFAULT_PREFIX, name, buffer, @@ -492,12 +493,13 @@ static int orangefs_xattr_set_trusted(const struct xattr_handler *handler, } static int orangefs_xattr_get_trusted(const struct xattr_handler *handler, - struct dentry *dentry, + struct dentry *unused, + struct inode *inode, const char *name, void *buffer, size_t size) { - return orangefs_inode_getxattr(dentry->d_inode, + return orangefs_inode_getxattr(inode, ORANGEFS_XATTR_NAME_TRUSTED_PREFIX, name, buffer, diff --git a/fs/overlayfs/inode.c b/fs/overlayfs/inode.c index a4ff5d0d7db9..c7b31a03dc9c 100644 --- a/fs/overlayfs/inode.c +++ b/fs/overlayfs/inode.c @@ -246,8 +246,8 @@ static bool ovl_need_xattr_filter(struct dentry *dentry, return false; } -ssize_t ovl_getxattr(struct dentry *dentry, const char *name, - void *value, size_t size) +ssize_t ovl_getxattr(struct dentry *dentry, struct inode *inode, + const char *name, void *value, size_t size) { struct path realpath; enum ovl_path_type type = ovl_path_real(dentry, &realpath); diff --git a/fs/overlayfs/overlayfs.h b/fs/overlayfs/overlayfs.h index 6a7090f4a441..99ec4b035237 100644 --- a/fs/overlayfs/overlayfs.h +++ b/fs/overlayfs/overlayfs.h @@ -173,8 +173,8 @@ int ovl_setattr(struct dentry *dentry, struct iattr *attr); int ovl_permission(struct inode *inode, int mask); int ovl_setxattr(struct dentry *dentry, const char *name, const void *value, size_t size, int flags); -ssize_t ovl_getxattr(struct dentry *dentry, const char *name, - void *value, size_t size); +ssize_t ovl_getxattr(struct dentry *dentry, struct inode *inode, + const char *name, void *value, size_t size); ssize_t ovl_listxattr(struct dentry *dentry, char *list, size_t size); int ovl_removexattr(struct dentry *dentry, const char *name); struct inode *ovl_d_select_inode(struct dentry *dentry, unsigned file_flags); diff --git a/fs/overlayfs/readdir.c b/fs/overlayfs/readdir.c index 6ec1e43a9a54..da186ee4f846 100644 --- a/fs/overlayfs/readdir.c +++ b/fs/overlayfs/readdir.c @@ -218,7 +218,9 @@ static int ovl_check_whiteouts(struct dentry *dir, struct ovl_readdir_data *rdd) cap_raise(override_cred->cap_effective, CAP_DAC_OVERRIDE); old_cred = override_creds(override_cred); - err = mutex_lock_killable(&dir->d_inode->i_mutex); + inode_lock(dir->d_inode); + err = 0; + // XXX: err = mutex_lock_killable(&dir->d_inode->i_mutex); if (!err) { while (rdd->first_maybe_whiteout) { p = rdd->first_maybe_whiteout; diff --git a/fs/overlayfs/super.c b/fs/overlayfs/super.c index 5d972e6cd3fe..ed53ae0fe868 100644 --- a/fs/overlayfs/super.c +++ b/fs/overlayfs/super.c @@ -274,7 +274,7 @@ static bool ovl_is_opaquedir(struct dentry *dentry) if (!S_ISDIR(inode->i_mode) || !inode->i_op->getxattr) return false; - res = inode->i_op->getxattr(dentry, OVL_XATTR_OPAQUE, &val, 1); + res = inode->i_op->getxattr(dentry, inode, OVL_XATTR_OPAQUE, &val, 1); if (res == 1 && val == 'y') return true; @@ -411,9 +411,7 @@ static inline struct dentry *ovl_lookup_real(struct dentry *dir, { struct dentry *dentry; - inode_lock(dir->d_inode); - dentry = lookup_one_len(name->name, dir, name->len); - inode_unlock(dir->d_inode); + dentry = lookup_hash(name, dir); if (IS_ERR(dentry)) { if (PTR_ERR(dentry) == -ENOENT) diff --git a/fs/pnode.c b/fs/pnode.c index c524fdddc7fb..99899705b105 100644 --- a/fs/pnode.c +++ b/fs/pnode.c @@ -198,7 +198,7 @@ static struct mount *next_group(struct mount *m, struct mount *origin) /* all accesses are serialized by namespace_sem */ static struct user_namespace *user_ns; -static struct mount *last_dest, *last_source, *dest_master; +static struct mount *last_dest, *first_source, *last_source, *dest_master; static struct mountpoint *mp; static struct hlist_head *list; @@ -221,20 +221,22 @@ static int propagate_one(struct mount *m) type = CL_MAKE_SHARED; } else { struct mount *n, *p; + bool done; for (n = m; ; n = p) { p = n->mnt_master; - if (p == dest_master || IS_MNT_MARKED(p)) { - while (last_dest->mnt_master != p) { - last_source = last_source->mnt_master; - last_dest = last_source->mnt_parent; - } - if (!peers(n, last_dest)) { - last_source = last_source->mnt_master; - last_dest = last_source->mnt_parent; - } + if (p == dest_master || IS_MNT_MARKED(p)) break; - } } + do { + struct mount *parent = last_source->mnt_parent; + if (last_source == first_source) + break; + done = parent->mnt_master == p; + if (done && peers(n, parent)) + break; + last_source = last_source->mnt_master; + } while (!done); + type = CL_SLAVE; /* beginning of peer group among the slaves? */ if (IS_MNT_SHARED(m)) @@ -286,6 +288,7 @@ int propagate_mnt(struct mount *dest_mnt, struct mountpoint *dest_mp, */ user_ns = current->nsproxy->mnt_ns->user_ns; last_dest = dest_mnt; + first_source = source_mnt; last_source = source_mnt; mp = dest_mp; list = tree_list; diff --git a/fs/posix_acl.c b/fs/posix_acl.c index 711dd5170376..2c60f17e7d92 100644 --- a/fs/posix_acl.c +++ b/fs/posix_acl.c @@ -21,7 +21,7 @@ #include <linux/export.h> #include <linux/user_namespace.h> -struct posix_acl **acl_by_type(struct inode *inode, int type) +static struct posix_acl **acl_by_type(struct inode *inode, int type) { switch (type) { case ACL_TYPE_ACCESS: @@ -32,19 +32,22 @@ struct posix_acl **acl_by_type(struct inode *inode, int type) BUG(); } } -EXPORT_SYMBOL(acl_by_type); struct posix_acl *get_cached_acl(struct inode *inode, int type) { struct posix_acl **p = acl_by_type(inode, type); - struct posix_acl *acl = ACCESS_ONCE(*p); - if (acl) { - spin_lock(&inode->i_lock); - acl = *p; - if (acl != ACL_NOT_CACHED) - acl = posix_acl_dup(acl); - spin_unlock(&inode->i_lock); + struct posix_acl *acl; + + for (;;) { + rcu_read_lock(); + acl = rcu_dereference(*p); + if (!acl || is_uncached_acl(acl) || + atomic_inc_not_zero(&acl->a_refcount)) + break; + rcu_read_unlock(); + cpu_relax(); } + rcu_read_unlock(); return acl; } EXPORT_SYMBOL(get_cached_acl); @@ -59,58 +62,72 @@ void set_cached_acl(struct inode *inode, int type, struct posix_acl *acl) { struct posix_acl **p = acl_by_type(inode, type); struct posix_acl *old; - spin_lock(&inode->i_lock); - old = *p; - rcu_assign_pointer(*p, posix_acl_dup(acl)); - spin_unlock(&inode->i_lock); - if (old != ACL_NOT_CACHED) + + old = xchg(p, posix_acl_dup(acl)); + if (!is_uncached_acl(old)) posix_acl_release(old); } EXPORT_SYMBOL(set_cached_acl); -void forget_cached_acl(struct inode *inode, int type) +static void __forget_cached_acl(struct posix_acl **p) { - struct posix_acl **p = acl_by_type(inode, type); struct posix_acl *old; - spin_lock(&inode->i_lock); - old = *p; - *p = ACL_NOT_CACHED; - spin_unlock(&inode->i_lock); - if (old != ACL_NOT_CACHED) + + old = xchg(p, ACL_NOT_CACHED); + if (!is_uncached_acl(old)) posix_acl_release(old); } + +void forget_cached_acl(struct inode *inode, int type) +{ + __forget_cached_acl(acl_by_type(inode, type)); +} EXPORT_SYMBOL(forget_cached_acl); void forget_all_cached_acls(struct inode *inode) { - struct posix_acl *old_access, *old_default; - spin_lock(&inode->i_lock); - old_access = inode->i_acl; - old_default = inode->i_default_acl; - inode->i_acl = inode->i_default_acl = ACL_NOT_CACHED; - spin_unlock(&inode->i_lock); - if (old_access != ACL_NOT_CACHED) - posix_acl_release(old_access); - if (old_default != ACL_NOT_CACHED) - posix_acl_release(old_default); + __forget_cached_acl(&inode->i_acl); + __forget_cached_acl(&inode->i_default_acl); } EXPORT_SYMBOL(forget_all_cached_acls); struct posix_acl *get_acl(struct inode *inode, int type) { + void *sentinel; + struct posix_acl **p; struct posix_acl *acl; + /* + * The sentinel is used to detect when another operation like + * set_cached_acl() or forget_cached_acl() races with get_acl(). + * It is guaranteed that is_uncached_acl(sentinel) is true. + */ + acl = get_cached_acl(inode, type); - if (acl != ACL_NOT_CACHED) + if (!is_uncached_acl(acl)) return acl; if (!IS_POSIXACL(inode)) return NULL; + sentinel = uncached_acl_sentinel(current); + p = acl_by_type(inode, type); + /* - * A filesystem can force a ACL callback by just never filling the - * ACL cache. But normally you'd fill the cache either at inode - * instantiation time, or on the first ->get_acl call. + * If the ACL isn't being read yet, set our sentinel. Otherwise, the + * current value of the ACL will not be ACL_NOT_CACHED and so our own + * sentinel will not be set; another task will update the cache. We + * could wait for that other task to complete its job, but it's easier + * to just call ->get_acl to fetch the ACL ourself. (This is going to + * be an unlikely race.) + */ + if (cmpxchg(p, ACL_NOT_CACHED, sentinel) != ACL_NOT_CACHED) + /* fall through */ ; + + /* + * Normally, the ACL returned by ->get_acl will be cached. + * A filesystem can prevent that by calling + * forget_cached_acl(inode, type) in ->get_acl. * * If the filesystem doesn't have a get_acl() function at all, we'll * just create the negative cache entry. @@ -119,7 +136,24 @@ struct posix_acl *get_acl(struct inode *inode, int type) set_cached_acl(inode, type, NULL); return NULL; } - return inode->i_op->get_acl(inode, type); + acl = inode->i_op->get_acl(inode, type); + + if (IS_ERR(acl)) { + /* + * Remove our sentinel so that we don't block future attempts + * to cache the ACL. + */ + cmpxchg(p, sentinel, ACL_NOT_CACHED); + return acl; + } + + /* + * Cache the result, but only if our sentinel is still in place. + */ + posix_acl_dup(acl); + if (unlikely(cmpxchg(p, sentinel, acl) != sentinel)) + posix_acl_release(acl); + return acl; } EXPORT_SYMBOL(get_acl); @@ -763,18 +797,18 @@ EXPORT_SYMBOL (posix_acl_to_xattr); static int posix_acl_xattr_get(const struct xattr_handler *handler, - struct dentry *dentry, const char *name, - void *value, size_t size) + struct dentry *unused, struct inode *inode, + const char *name, void *value, size_t size) { struct posix_acl *acl; int error; - if (!IS_POSIXACL(d_backing_inode(dentry))) + if (!IS_POSIXACL(inode)) return -EOPNOTSUPP; - if (d_is_symlink(dentry)) + if (S_ISLNK(inode->i_mode)) return -EOPNOTSUPP; - acl = get_acl(d_backing_inode(dentry), handler->flags); + acl = get_acl(inode, handler->flags); if (IS_ERR(acl)) return PTR_ERR(acl); if (acl == NULL) diff --git a/fs/proc/array.c b/fs/proc/array.c index b6c00ce0e29e..88c7de12197b 100644 --- a/fs/proc/array.c +++ b/fs/proc/array.c @@ -83,6 +83,7 @@ #include <linux/tracehook.h> #include <linux/string_helpers.h> #include <linux/user_namespace.h> +#include <linux/fs_struct.h> #include <asm/pgtable.h> #include <asm/processor.h> @@ -139,12 +140,25 @@ static inline const char *get_task_state(struct task_struct *tsk) return task_state_array[fls(state)]; } +static inline int get_task_umask(struct task_struct *tsk) +{ + struct fs_struct *fs; + int umask = -ENOENT; + + task_lock(tsk); + fs = tsk->fs; + if (fs) + umask = fs->umask; + task_unlock(tsk); + return umask; +} + static inline void task_state(struct seq_file *m, struct pid_namespace *ns, struct pid *pid, struct task_struct *p) { struct user_namespace *user_ns = seq_user_ns(m); struct group_info *group_info; - int g; + int g, umask; struct task_struct *tracer; const struct cred *cred; pid_t ppid, tpid = 0, tgid, ngid; @@ -162,6 +176,10 @@ static inline void task_state(struct seq_file *m, struct pid_namespace *ns, ngid = task_numa_group_id(p); cred = get_task_cred(p); + umask = get_task_umask(p); + if (umask >= 0) + seq_printf(m, "Umask:\t%#04o\n", umask); + task_lock(p); if (p->files) max_fds = files_fdtable(p->files)->max_fds; diff --git a/fs/proc/base.c b/fs/proc/base.c index b1755b23893e..a11eb7196ec8 100644 --- a/fs/proc/base.c +++ b/fs/proc/base.c @@ -434,7 +434,7 @@ static int proc_pid_wchan(struct seq_file *m, struct pid_namespace *ns, && !lookup_symbol_name(wchan, symname)) seq_printf(m, "%s", symname); else - seq_puts(m, "0\n"); + seq_putc(m, '0'); return 0; } @@ -955,7 +955,8 @@ static ssize_t environ_read(struct file *file, char __user *buf, struct mm_struct *mm = file->private_data; unsigned long env_start, env_end; - if (!mm) + /* Ensure the process spawned far enough to have an environment. */ + if (!mm || !mm->env_end) return 0; page = (char *)__get_free_page(GFP_TEMPORARY); @@ -1819,12 +1820,17 @@ bool proc_fill_cache(struct file *file, struct dir_context *ctx, child = d_hash_and_lookup(dir, &qname); if (!child) { - child = d_alloc(dir, &qname); - if (!child) - goto end_instantiate; - if (instantiate(d_inode(dir), child, task, ptr) < 0) { - dput(child); + DECLARE_WAIT_QUEUE_HEAD_ONSTACK(wq); + child = d_alloc_parallel(dir, &qname, &wq); + if (IS_ERR(child)) goto end_instantiate; + if (d_in_lookup(child)) { + int err = instantiate(d_inode(dir), child, task, ptr); + d_lookup_done(child); + if (err < 0) { + dput(child); + goto end_instantiate; + } } } inode = d_inode(child); @@ -2154,8 +2160,8 @@ out: static const struct file_operations proc_map_files_operations = { .read = generic_read_dir, - .iterate = proc_map_files_readdir, - .llseek = default_llseek, + .iterate_shared = proc_map_files_readdir, + .llseek = generic_file_llseek, }; #ifdef CONFIG_CHECKPOINT_RESTORE @@ -2502,8 +2508,8 @@ static int proc_attr_dir_readdir(struct file *file, struct dir_context *ctx) static const struct file_operations proc_attr_dir_operations = { .read = generic_read_dir, - .iterate = proc_attr_dir_readdir, - .llseek = default_llseek, + .iterate_shared = proc_attr_dir_readdir, + .llseek = generic_file_llseek, }; static struct dentry *proc_attr_dir_lookup(struct inode *dir, @@ -2910,8 +2916,8 @@ static int proc_tgid_base_readdir(struct file *file, struct dir_context *ctx) static const struct file_operations proc_tgid_base_operations = { .read = generic_read_dir, - .iterate = proc_tgid_base_readdir, - .llseek = default_llseek, + .iterate_shared = proc_tgid_base_readdir, + .llseek = generic_file_llseek, }; static struct dentry *proc_tgid_base_lookup(struct inode *dir, struct dentry *dentry, unsigned int flags) @@ -3157,6 +3163,44 @@ int proc_pid_readdir(struct file *file, struct dir_context *ctx) } /* + * proc_tid_comm_permission is a special permission function exclusively + * used for the node /proc/<pid>/task/<tid>/comm. + * It bypasses generic permission checks in the case where a task of the same + * task group attempts to access the node. + * The rationale behind this is that glibc and bionic access this node for + * cross thread naming (pthread_set/getname_np(!self)). However, if + * PR_SET_DUMPABLE gets set to 0 this node among others becomes uid=0 gid=0, + * which locks out the cross thread naming implementation. + * This function makes sure that the node is always accessible for members of + * same thread group. + */ +static int proc_tid_comm_permission(struct inode *inode, int mask) +{ + bool is_same_tgroup; + struct task_struct *task; + + task = get_proc_task(inode); + if (!task) + return -ESRCH; + is_same_tgroup = same_thread_group(current, task); + put_task_struct(task); + + if (likely(is_same_tgroup && !(mask & MAY_EXEC))) { + /* This file (/proc/<pid>/task/<tid>/comm) can always be + * read or written by the members of the corresponding + * thread group. + */ + return 0; + } + + return generic_permission(inode, mask); +} + +static const struct inode_operations proc_tid_comm_inode_operations = { + .permission = proc_tid_comm_permission, +}; + +/* * Tasks */ static const struct pid_entry tid_base_stuff[] = { @@ -3174,7 +3218,9 @@ static const struct pid_entry tid_base_stuff[] = { #ifdef CONFIG_SCHED_DEBUG REG("sched", S_IRUGO|S_IWUSR, proc_pid_sched_operations), #endif - REG("comm", S_IRUGO|S_IWUSR, proc_pid_set_comm_operations), + NOD("comm", S_IFREG|S_IRUGO|S_IWUSR, + &proc_tid_comm_inode_operations, + &proc_pid_set_comm_operations, {}), #ifdef CONFIG_HAVE_ARCH_TRACEHOOK ONE("syscall", S_IRUSR, proc_pid_syscall), #endif @@ -3258,8 +3304,8 @@ static struct dentry *proc_tid_base_lookup(struct inode *dir, struct dentry *den static const struct file_operations proc_tid_base_operations = { .read = generic_read_dir, - .iterate = proc_tid_base_readdir, - .llseek = default_llseek, + .iterate_shared = proc_tid_base_readdir, + .llseek = generic_file_llseek, }; static const struct inode_operations proc_tid_base_inode_operations = { @@ -3469,6 +3515,6 @@ static const struct inode_operations proc_task_inode_operations = { static const struct file_operations proc_task_operations = { .read = generic_read_dir, - .iterate = proc_task_readdir, - .llseek = default_llseek, + .iterate_shared = proc_task_readdir, + .llseek = generic_file_llseek, }; diff --git a/fs/proc/fd.c b/fs/proc/fd.c index 56afa5ef08f2..01df23cc81f6 100644 --- a/fs/proc/fd.c +++ b/fs/proc/fd.c @@ -276,8 +276,8 @@ static int proc_readfd(struct file *file, struct dir_context *ctx) const struct file_operations proc_fd_operations = { .read = generic_read_dir, - .iterate = proc_readfd, - .llseek = default_llseek, + .iterate_shared = proc_readfd, + .llseek = generic_file_llseek, }; static struct dentry *proc_lookupfd(struct inode *dir, struct dentry *dentry, @@ -361,6 +361,6 @@ const struct inode_operations proc_fdinfo_inode_operations = { const struct file_operations proc_fdinfo_operations = { .read = generic_read_dir, - .iterate = proc_readfdinfo, - .llseek = default_llseek, + .iterate_shared = proc_readfdinfo, + .llseek = generic_file_llseek, }; diff --git a/fs/proc/generic.c b/fs/proc/generic.c index ff3ffc76a937..c633476616e0 100644 --- a/fs/proc/generic.c +++ b/fs/proc/generic.c @@ -318,7 +318,7 @@ int proc_readdir(struct file *file, struct dir_context *ctx) static const struct file_operations proc_dir_operations = { .llseek = generic_file_llseek, .read = generic_read_dir, - .iterate = proc_readdir, + .iterate_shared = proc_readdir, }; /* diff --git a/fs/proc/namespaces.c b/fs/proc/namespaces.c index 72cb26f85d58..51b8b0a8ad91 100644 --- a/fs/proc/namespaces.c +++ b/fs/proc/namespaces.c @@ -139,7 +139,8 @@ out: const struct file_operations proc_ns_dir_operations = { .read = generic_read_dir, - .iterate = proc_ns_dir_readdir, + .iterate_shared = proc_ns_dir_readdir, + .llseek = generic_file_llseek, }; static struct dentry *proc_ns_dir_lookup(struct inode *dir, diff --git a/fs/proc/page.c b/fs/proc/page.c index 712f1b9992cc..3ecd445e830d 100644 --- a/fs/proc/page.c +++ b/fs/proc/page.c @@ -142,7 +142,7 @@ u64 stable_page_flags(struct page *page) /* - * Caveats on high order pages: page->_count will only be set + * Caveats on high order pages: page->_refcount will only be set * -1 on the head page; SLUB/SLQB do the same for PG_slab; * SLOB won't set PG_slab at all on compound pages. */ diff --git a/fs/proc/proc_net.c b/fs/proc/proc_net.c index 350984a19c83..c8bbc68cdb05 100644 --- a/fs/proc/proc_net.c +++ b/fs/proc/proc_net.c @@ -179,7 +179,7 @@ static int proc_tgid_net_readdir(struct file *file, struct dir_context *ctx) const struct file_operations proc_net_operations = { .llseek = generic_file_llseek, .read = generic_read_dir, - .iterate = proc_tgid_net_readdir, + .iterate_shared = proc_tgid_net_readdir, }; static __net_init int proc_net_ns_init(struct net *net) diff --git a/fs/proc/proc_sysctl.c b/fs/proc/proc_sysctl.c index fe5b6e6c4671..5e57c3e46e1d 100644 --- a/fs/proc/proc_sysctl.c +++ b/fs/proc/proc_sysctl.c @@ -627,18 +627,19 @@ static bool proc_sys_fill_cache(struct file *file, child = d_lookup(dir, &qname); if (!child) { - child = d_alloc(dir, &qname); - if (child) { + DECLARE_WAIT_QUEUE_HEAD_ONSTACK(wq); + child = d_alloc_parallel(dir, &qname, &wq); + if (IS_ERR(child)) + return false; + if (d_in_lookup(child)) { inode = proc_sys_make_inode(dir->d_sb, head, table); if (!inode) { + d_lookup_done(child); dput(child); return false; - } else { - d_set_d_op(child, &proc_sys_dentry_operations); - d_add(child, inode); } - } else { - return false; + d_set_d_op(child, &proc_sys_dentry_operations); + d_add(child, inode); } } inode = d_inode(child); @@ -789,7 +790,7 @@ static const struct file_operations proc_sys_file_operations = { static const struct file_operations proc_sys_dir_file_operations = { .read = generic_read_dir, - .iterate = proc_sys_readdir, + .iterate_shared = proc_sys_readdir, .llseek = generic_file_llseek, }; diff --git a/fs/proc/root.c b/fs/proc/root.c index 361ab4ee42fc..55bc7d6c8aac 100644 --- a/fs/proc/root.c +++ b/fs/proc/root.c @@ -226,8 +226,8 @@ static int proc_root_readdir(struct file *file, struct dir_context *ctx) */ static const struct file_operations proc_root_operations = { .read = generic_read_dir, - .iterate = proc_root_readdir, - .llseek = default_llseek, + .iterate_shared = proc_root_readdir, + .llseek = generic_file_llseek, }; /* diff --git a/fs/proc/task_mmu.c b/fs/proc/task_mmu.c index 229cb546bee0..4648c7f63ae2 100644 --- a/fs/proc/task_mmu.c +++ b/fs/proc/task_mmu.c @@ -1027,11 +1027,15 @@ static ssize_t clear_refs_write(struct file *file, const char __user *buf, }; if (type == CLEAR_REFS_MM_HIWATER_RSS) { + if (down_write_killable(&mm->mmap_sem)) { + count = -EINTR; + goto out_mm; + } + /* * Writing 5 to /proc/pid/clear_refs resets the peak * resident set size to this mm's current rss value. */ - down_write(&mm->mmap_sem); reset_mm_hiwater_rss(mm); up_write(&mm->mmap_sem); goto out_mm; @@ -1043,7 +1047,10 @@ static ssize_t clear_refs_write(struct file *file, const char __user *buf, if (!(vma->vm_flags & VM_SOFTDIRTY)) continue; up_read(&mm->mmap_sem); - down_write(&mm->mmap_sem); + if (down_write_killable(&mm->mmap_sem)) { + count = -EINTR; + goto out_mm; + } for (vma = mm->mmap; vma; vma = vma->vm_next) { vma->vm_flags &= ~VM_SOFTDIRTY; vma_set_page_prot(vma); @@ -1518,6 +1525,32 @@ static struct page *can_gather_numa_stats(pte_t pte, struct vm_area_struct *vma, return page; } +#ifdef CONFIG_TRANSPARENT_HUGEPAGE +static struct page *can_gather_numa_stats_pmd(pmd_t pmd, + struct vm_area_struct *vma, + unsigned long addr) +{ + struct page *page; + int nid; + + if (!pmd_present(pmd)) + return NULL; + + page = vm_normal_page_pmd(vma, addr, pmd); + if (!page) + return NULL; + + if (PageReserved(page)) + return NULL; + + nid = page_to_nid(page); + if (!node_isset(nid, node_states[N_MEMORY])) + return NULL; + + return page; +} +#endif + static int gather_pte_stats(pmd_t *pmd, unsigned long addr, unsigned long end, struct mm_walk *walk) { @@ -1527,14 +1560,14 @@ static int gather_pte_stats(pmd_t *pmd, unsigned long addr, pte_t *orig_pte; pte_t *pte; +#ifdef CONFIG_TRANSPARENT_HUGEPAGE ptl = pmd_trans_huge_lock(pmd, vma); if (ptl) { - pte_t huge_pte = *(pte_t *)pmd; struct page *page; - page = can_gather_numa_stats(huge_pte, vma, addr); + page = can_gather_numa_stats_pmd(*pmd, vma, addr); if (page) - gather_stats(page, md, pte_dirty(huge_pte), + gather_stats(page, md, pmd_dirty(*pmd), HPAGE_PMD_SIZE/PAGE_SIZE); spin_unlock(ptl); return 0; @@ -1542,6 +1575,7 @@ static int gather_pte_stats(pmd_t *pmd, unsigned long addr, if (pmd_trans_unstable(pmd)) return 0; +#endif orig_pte = pte = pte_offset_map_lock(walk->mm, pmd, addr, &ptl); do { struct page *page = can_gather_numa_stats(*pte, vma, addr); diff --git a/fs/proc/vmcore.c b/fs/proc/vmcore.c index 8afe10cf7df8..8ab782d8b33d 100644 --- a/fs/proc/vmcore.c +++ b/fs/proc/vmcore.c @@ -1071,7 +1071,7 @@ static int __init parse_crash_elf32_headers(void) /* Do some basic Verification. */ if (memcmp(ehdr.e_ident, ELFMAG, SELFMAG) != 0 || (ehdr.e_type != ET_CORE) || - !elf_check_arch(&ehdr) || + !vmcore_elf32_check_arch(&ehdr) || ehdr.e_ident[EI_CLASS] != ELFCLASS32|| ehdr.e_ident[EI_VERSION] != EV_CURRENT || ehdr.e_version != EV_CURRENT || diff --git a/fs/qnx4/dir.c b/fs/qnx4/dir.c index b218f965817b..781056a0480f 100644 --- a/fs/qnx4/dir.c +++ b/fs/qnx4/dir.c @@ -71,7 +71,7 @@ const struct file_operations qnx4_dir_operations = { .llseek = generic_file_llseek, .read = generic_read_dir, - .iterate = qnx4_readdir, + .iterate_shared = qnx4_readdir, .fsync = generic_file_fsync, }; diff --git a/fs/qnx6/dir.c b/fs/qnx6/dir.c index 144ceda4948e..27637e0bdc9f 100644 --- a/fs/qnx6/dir.c +++ b/fs/qnx6/dir.c @@ -272,7 +272,7 @@ found: const struct file_operations qnx6_dir_operations = { .llseek = generic_file_llseek, .read = generic_read_dir, - .iterate = qnx6_readdir, + .iterate_shared = qnx6_readdir, .fsync = generic_file_fsync, }; diff --git a/fs/quota/netlink.c b/fs/quota/netlink.c index d07a2f91d858..8b252673d454 100644 --- a/fs/quota/netlink.c +++ b/fs/quota/netlink.c @@ -47,7 +47,7 @@ void quota_send_warning(struct kqid qid, dev_t dev, void *msg_head; int ret; int msg_size = 4 * nla_total_size(sizeof(u32)) + - 2 * nla_total_size(sizeof(u64)); + 2 * nla_total_size_64bit(sizeof(u64)); /* We have to allocate using GFP_NOFS as we are called from a * filesystem performing write and thus further recursion into @@ -68,8 +68,9 @@ void quota_send_warning(struct kqid qid, dev_t dev, ret = nla_put_u32(skb, QUOTA_NL_A_QTYPE, qid.type); if (ret) goto attr_err_out; - ret = nla_put_u64(skb, QUOTA_NL_A_EXCESS_ID, - from_kqid_munged(&init_user_ns, qid)); + ret = nla_put_u64_64bit(skb, QUOTA_NL_A_EXCESS_ID, + from_kqid_munged(&init_user_ns, qid), + QUOTA_NL_A_PAD); if (ret) goto attr_err_out; ret = nla_put_u32(skb, QUOTA_NL_A_WARNING, warntype); @@ -81,8 +82,9 @@ void quota_send_warning(struct kqid qid, dev_t dev, ret = nla_put_u32(skb, QUOTA_NL_A_DEV_MINOR, MINOR(dev)); if (ret) goto attr_err_out; - ret = nla_put_u64(skb, QUOTA_NL_A_CAUSED_ID, - from_kuid_munged(&init_user_ns, current_uid())); + ret = nla_put_u64_64bit(skb, QUOTA_NL_A_CAUSED_ID, + from_kuid_munged(&init_user_ns, current_uid()), + QUOTA_NL_A_PAD); if (ret) goto attr_err_out; genlmsg_end(skb, msg_head); diff --git a/fs/ramfs/file-nommu.c b/fs/ramfs/file-nommu.c index a586467f6ff6..be3ddd189cd4 100644 --- a/fs/ramfs/file-nommu.c +++ b/fs/ramfs/file-nommu.c @@ -211,14 +211,11 @@ static unsigned long ramfs_nommu_get_unmapped_area(struct file *file, struct page **pages = NULL, **ptr, *page; loff_t isize; - if (!(flags & MAP_SHARED)) - return addr; - /* the mapping mustn't extend beyond the EOF */ lpages = (len + PAGE_SIZE - 1) >> PAGE_SHIFT; isize = i_size_read(inode); - ret = -EINVAL; + ret = -ENOSYS; maxpages = (isize + PAGE_SIZE - 1) >> PAGE_SHIFT; if (pgoff >= maxpages) goto out; @@ -227,7 +224,6 @@ static unsigned long ramfs_nommu_get_unmapped_area(struct file *file, goto out; /* gang-find the pages */ - ret = -ENOMEM; pages = kcalloc(lpages, sizeof(struct page *), GFP_KERNEL); if (!pages) goto out_free; @@ -263,7 +259,7 @@ out: */ static int ramfs_nommu_mmap(struct file *file, struct vm_area_struct *vma) { - if (!(vma->vm_flags & VM_SHARED)) + if (!(vma->vm_flags & (VM_SHARED | VM_MAYSHARE))) return -ENOSYS; file_accessed(file); diff --git a/fs/read_write.c b/fs/read_write.c index cf377cf9dfe3..933b53a375b4 100644 --- a/fs/read_write.c +++ b/fs/read_write.c @@ -302,18 +302,6 @@ loff_t vfs_llseek(struct file *file, loff_t offset, int whence) } EXPORT_SYMBOL(vfs_llseek); -static inline struct fd fdget_pos(int fd) -{ - return __to_fd(__fdget_pos(fd)); -} - -static inline void fdput_pos(struct fd f) -{ - if (f.flags & FDPUT_POS_UNLOCK) - mutex_unlock(&f.file->f_pos_lock); - fdput(f); -} - SYSCALL_DEFINE3(lseek, unsigned int, fd, off_t, offset, unsigned int, whence) { off_t retval; @@ -410,11 +398,6 @@ ssize_t vfs_iter_write(struct file *file, struct iov_iter *iter, loff_t *ppos) } EXPORT_SYMBOL(vfs_iter_write); -/* - * rw_verify_area doesn't like huge counts. We limit - * them to something that fits in "int" so that others - * won't have to do range checks all the time. - */ int rw_verify_area(int read_write, struct file *file, const loff_t *ppos, size_t count) { struct inode *inode; @@ -441,11 +424,8 @@ int rw_verify_area(int read_write, struct file *file, const loff_t *ppos, size_t if (retval < 0) return retval; } - retval = security_file_permission(file, + return security_file_permission(file, read_write == READ ? MAY_READ : MAY_WRITE); - if (retval) - return retval; - return count > MAX_RW_COUNT ? MAX_RW_COUNT : count; } static ssize_t new_sync_read(struct file *filp, char __user *buf, size_t len, loff_t *ppos) @@ -489,8 +469,9 @@ ssize_t vfs_read(struct file *file, char __user *buf, size_t count, loff_t *pos) return -EFAULT; ret = rw_verify_area(READ, file, pos, count); - if (ret >= 0) { - count = ret; + if (!ret) { + if (count > MAX_RW_COUNT) + count = MAX_RW_COUNT; ret = __vfs_read(file, buf, count, pos); if (ret > 0) { fsnotify_access(file); @@ -572,8 +553,9 @@ ssize_t vfs_write(struct file *file, const char __user *buf, size_t count, loff_ return -EFAULT; ret = rw_verify_area(WRITE, file, pos, count); - if (ret >= 0) { - count = ret; + if (!ret) { + if (count > MAX_RW_COUNT) + count = MAX_RW_COUNT; file_start_write(file); ret = __vfs_write(file, buf, count, pos); if (ret > 0) { @@ -698,12 +680,16 @@ static ssize_t do_iter_readv_writev(struct file *filp, struct iov_iter *iter, struct kiocb kiocb; ssize_t ret; - if (flags & ~RWF_HIPRI) + if (flags & ~(RWF_HIPRI | RWF_DSYNC | RWF_SYNC)) return -EOPNOTSUPP; init_sync_kiocb(&kiocb, filp); if (flags & RWF_HIPRI) kiocb.ki_flags |= IOCB_HIPRI; + if (flags & RWF_DSYNC) + kiocb.ki_flags |= IOCB_DSYNC; + if (flags & RWF_SYNC) + kiocb.ki_flags |= (IOCB_DSYNC | IOCB_SYNC); kiocb.ki_pos = *ppos; ret = fn(&kiocb, iter); @@ -1323,7 +1309,8 @@ static ssize_t do_sendfile(int out_fd, int in_fd, loff_t *ppos, retval = rw_verify_area(READ, in.file, &pos, count); if (retval < 0) goto fput_in; - count = retval; + if (count > MAX_RW_COUNT) + count = MAX_RW_COUNT; /* * Get output file, and verify that it is ok.. @@ -1341,7 +1328,6 @@ static ssize_t do_sendfile(int out_fd, int in_fd, loff_t *ppos, retval = rw_verify_area(WRITE, out.file, &out_pos, count); if (retval < 0) goto fput_out; - count = retval; if (!max) max = min(in_inode->i_sb->s_maxbytes, out_inode->i_sb->s_maxbytes); @@ -1485,11 +1471,12 @@ ssize_t vfs_copy_file_range(struct file *file_in, loff_t pos_in, if (flags != 0) return -EINVAL; - /* copy_file_range allows full ssize_t len, ignoring MAX_RW_COUNT */ ret = rw_verify_area(READ, file_in, &pos_in, len); - if (ret >= 0) - ret = rw_verify_area(WRITE, file_out, &pos_out, len); - if (ret < 0) + if (unlikely(ret)) + return ret; + + ret = rw_verify_area(WRITE, file_out, &pos_out, len); + if (unlikely(ret)) return ret; if (!(file_in->f_mode & FMODE_READ) || diff --git a/fs/readdir.c b/fs/readdir.c index 5f2d4bee5a73..68ef06efe6bc 100644 --- a/fs/readdir.c +++ b/fs/readdir.c @@ -24,27 +24,40 @@ int iterate_dir(struct file *file, struct dir_context *ctx) { struct inode *inode = file_inode(file); + bool shared = false; int res = -ENOTDIR; - if (!file->f_op->iterate) + if (file->f_op->iterate_shared) + shared = true; + else if (!file->f_op->iterate) goto out; res = security_file_permission(file, MAY_READ); if (res) goto out; - res = mutex_lock_killable(&inode->i_mutex); - if (res) - goto out; + if (shared) + inode_lock_shared(inode); + else + inode_lock(inode); + // res = mutex_lock_killable(&inode->i_mutex); + // if (res) + // goto out; res = -ENOENT; if (!IS_DEADDIR(inode)) { ctx->pos = file->f_pos; - res = file->f_op->iterate(file, ctx); + if (shared) + res = file->f_op->iterate_shared(file, ctx); + else + res = file->f_op->iterate(file, ctx); file->f_pos = ctx->pos; fsnotify_access(file); file_accessed(file); } - inode_unlock(inode); + if (shared) + inode_unlock_shared(inode); + else + inode_unlock(inode); out: return res; } @@ -111,7 +124,7 @@ SYSCALL_DEFINE3(old_readdir, unsigned int, fd, struct old_linux_dirent __user *, dirent, unsigned int, count) { int error; - struct fd f = fdget(fd); + struct fd f = fdget_pos(fd); struct readdir_callback buf = { .ctx.actor = fillonedir, .dirent = dirent @@ -124,7 +137,7 @@ SYSCALL_DEFINE3(old_readdir, unsigned int, fd, if (buf.result) error = buf.result; - fdput(f); + fdput_pos(f); return error; } @@ -210,7 +223,7 @@ SYSCALL_DEFINE3(getdents, unsigned int, fd, if (!access_ok(VERIFY_WRITE, dirent, count)) return -EFAULT; - f = fdget(fd); + f = fdget_pos(fd); if (!f.file) return -EBADF; @@ -224,7 +237,7 @@ SYSCALL_DEFINE3(getdents, unsigned int, fd, else error = count - buf.count; } - fdput(f); + fdput_pos(f); return error; } @@ -293,7 +306,7 @@ SYSCALL_DEFINE3(getdents64, unsigned int, fd, if (!access_ok(VERIFY_WRITE, dirent, count)) return -EFAULT; - f = fdget(fd); + f = fdget_pos(fd); if (!f.file) return -EBADF; @@ -308,6 +321,6 @@ SYSCALL_DEFINE3(getdents64, unsigned int, fd, else error = count - buf.count; } - fdput(f); + fdput_pos(f); return error; } diff --git a/fs/reiserfs/dir.c b/fs/reiserfs/dir.c index 3abd4004184b..45aa05e2232f 100644 --- a/fs/reiserfs/dir.c +++ b/fs/reiserfs/dir.c @@ -20,7 +20,7 @@ static int reiserfs_dir_fsync(struct file *filp, loff_t start, loff_t end, const struct file_operations reiserfs_dir_operations = { .llseek = generic_file_llseek, .read = generic_read_dir, - .iterate = reiserfs_readdir, + .iterate_shared = reiserfs_readdir, .fsync = reiserfs_dir_fsync, .unlocked_ioctl = reiserfs_ioctl, #ifdef CONFIG_COMPAT diff --git a/fs/reiserfs/file.c b/fs/reiserfs/file.c index 389773711de4..90f815bdfa8a 100644 --- a/fs/reiserfs/file.c +++ b/fs/reiserfs/file.c @@ -260,10 +260,10 @@ const struct file_operations reiserfs_file_operations = { const struct inode_operations reiserfs_file_inode_operations = { .setattr = reiserfs_setattr, - .setxattr = reiserfs_setxattr, - .getxattr = reiserfs_getxattr, + .setxattr = generic_setxattr, + .getxattr = generic_getxattr, .listxattr = reiserfs_listxattr, - .removexattr = reiserfs_removexattr, + .removexattr = generic_removexattr, .permission = reiserfs_permission, .get_acl = reiserfs_get_acl, .set_acl = reiserfs_set_acl, diff --git a/fs/reiserfs/inode.c b/fs/reiserfs/inode.c index d5c2e9c865de..825455d3e4ba 100644 --- a/fs/reiserfs/inode.c +++ b/fs/reiserfs/inode.c @@ -3279,15 +3279,14 @@ static int reiserfs_releasepage(struct page *page, gfp_t unused_gfp_flags) * We thank Mingming Cao for helping us understand in great detail what * to do in this section of the code. */ -static ssize_t reiserfs_direct_IO(struct kiocb *iocb, struct iov_iter *iter, - loff_t offset) +static ssize_t reiserfs_direct_IO(struct kiocb *iocb, struct iov_iter *iter) { struct file *file = iocb->ki_filp; struct inode *inode = file->f_mapping->host; size_t count = iov_iter_count(iter); ssize_t ret; - ret = blockdev_direct_IO(iocb, inode, iter, offset, + ret = blockdev_direct_IO(iocb, inode, iter, reiserfs_get_blocks_direct_io); /* @@ -3296,7 +3295,7 @@ static ssize_t reiserfs_direct_IO(struct kiocb *iocb, struct iov_iter *iter, */ if (unlikely(iov_iter_rw(iter) == WRITE && ret < 0)) { loff_t isize = i_size_read(inode); - loff_t end = offset + count; + loff_t end = iocb->ki_pos + count; if ((end > isize) && inode_newsize_ok(inode, isize) == 0) { truncate_setsize(inode, isize); diff --git a/fs/reiserfs/ioctl.c b/fs/reiserfs/ioctl.c index 57045f423893..2f1ddc908013 100644 --- a/fs/reiserfs/ioctl.c +++ b/fs/reiserfs/ioctl.c @@ -187,7 +187,11 @@ int reiserfs_unpack(struct inode *inode, struct file *filp) } /* we need to make sure nobody is changing the file size beneath us */ - reiserfs_mutex_lock_safe(&inode->i_mutex, inode->i_sb); +{ + int depth = reiserfs_write_unlock_nested(inode->i_sb); + inode_lock(inode); + reiserfs_write_lock_nested(inode->i_sb, depth); +} reiserfs_write_lock(inode->i_sb); diff --git a/fs/reiserfs/namei.c b/fs/reiserfs/namei.c index 2a12d46d7fb4..8a36696d6df9 100644 --- a/fs/reiserfs/namei.c +++ b/fs/reiserfs/namei.c @@ -1650,10 +1650,10 @@ const struct inode_operations reiserfs_dir_inode_operations = { .mknod = reiserfs_mknod, .rename = reiserfs_rename, .setattr = reiserfs_setattr, - .setxattr = reiserfs_setxattr, - .getxattr = reiserfs_getxattr, + .setxattr = generic_setxattr, + .getxattr = generic_getxattr, .listxattr = reiserfs_listxattr, - .removexattr = reiserfs_removexattr, + .removexattr = generic_removexattr, .permission = reiserfs_permission, .get_acl = reiserfs_get_acl, .set_acl = reiserfs_set_acl, @@ -1667,10 +1667,10 @@ const struct inode_operations reiserfs_symlink_inode_operations = { .readlink = generic_readlink, .get_link = page_get_link, .setattr = reiserfs_setattr, - .setxattr = reiserfs_setxattr, - .getxattr = reiserfs_getxattr, + .setxattr = generic_setxattr, + .getxattr = generic_getxattr, .listxattr = reiserfs_listxattr, - .removexattr = reiserfs_removexattr, + .removexattr = generic_removexattr, .permission = reiserfs_permission, }; @@ -1679,10 +1679,10 @@ const struct inode_operations reiserfs_symlink_inode_operations = { */ const struct inode_operations reiserfs_special_inode_operations = { .setattr = reiserfs_setattr, - .setxattr = reiserfs_setxattr, - .getxattr = reiserfs_getxattr, + .setxattr = generic_setxattr, + .getxattr = generic_getxattr, .listxattr = reiserfs_listxattr, - .removexattr = reiserfs_removexattr, + .removexattr = generic_removexattr, .permission = reiserfs_permission, .get_acl = reiserfs_get_acl, .set_acl = reiserfs_set_acl, diff --git a/fs/reiserfs/objectid.c b/fs/reiserfs/objectid.c index 99a5d5dae46a..415d66ca87d1 100644 --- a/fs/reiserfs/objectid.c +++ b/fs/reiserfs/objectid.c @@ -3,8 +3,8 @@ */ #include <linux/string.h> -#include <linux/random.h> #include <linux/time.h> +#include <linux/uuid.h> #include "reiserfs.h" /* find where objectid map starts */ diff --git a/fs/reiserfs/xattr.c b/fs/reiserfs/xattr.c index 28f5f8b11370..a33812ae9fad 100644 --- a/fs/reiserfs/xattr.c +++ b/fs/reiserfs/xattr.c @@ -764,60 +764,6 @@ find_xattr_handler_prefix(const struct xattr_handler **handlers, return xah; } - -/* - * Inode operation getxattr() - */ -ssize_t -reiserfs_getxattr(struct dentry * dentry, const char *name, void *buffer, - size_t size) -{ - const struct xattr_handler *handler; - - handler = find_xattr_handler_prefix(dentry->d_sb->s_xattr, name); - - if (!handler || get_inode_sd_version(d_inode(dentry)) == STAT_DATA_V1) - return -EOPNOTSUPP; - - return handler->get(handler, dentry, name, buffer, size); -} - -/* - * Inode operation setxattr() - * - * d_inode(dentry)->i_mutex down - */ -int -reiserfs_setxattr(struct dentry *dentry, const char *name, const void *value, - size_t size, int flags) -{ - const struct xattr_handler *handler; - - handler = find_xattr_handler_prefix(dentry->d_sb->s_xattr, name); - - if (!handler || get_inode_sd_version(d_inode(dentry)) == STAT_DATA_V1) - return -EOPNOTSUPP; - - return handler->set(handler, dentry, name, value, size, flags); -} - -/* - * Inode operation removexattr() - * - * d_inode(dentry)->i_mutex down - */ -int reiserfs_removexattr(struct dentry *dentry, const char *name) -{ - const struct xattr_handler *handler; - - handler = find_xattr_handler_prefix(dentry->d_sb->s_xattr, name); - - if (!handler || get_inode_sd_version(d_inode(dentry)) == STAT_DATA_V1) - return -EOPNOTSUPP; - - return handler->set(handler, dentry, name, NULL, 0, XATTR_REPLACE); -} - struct listxattr_buf { struct dir_context ctx; size_t size; diff --git a/fs/reiserfs/xattr.h b/fs/reiserfs/xattr.h index 15dde6262c00..613ff5aef94e 100644 --- a/fs/reiserfs/xattr.h +++ b/fs/reiserfs/xattr.h @@ -2,6 +2,7 @@ #include <linux/init.h> #include <linux/list.h> #include <linux/rwsem.h> +#include <linux/xattr.h> struct inode; struct dentry; @@ -18,12 +19,7 @@ int reiserfs_permission(struct inode *inode, int mask); #ifdef CONFIG_REISERFS_FS_XATTR #define has_xattr_dir(inode) (REISERFS_I(inode)->i_flags & i_has_xattr_dir) -ssize_t reiserfs_getxattr(struct dentry *dentry, const char *name, - void *buffer, size_t size); -int reiserfs_setxattr(struct dentry *dentry, const char *name, - const void *value, size_t size, int flags); ssize_t reiserfs_listxattr(struct dentry *dentry, char *buffer, size_t size); -int reiserfs_removexattr(struct dentry *dentry, const char *name); int reiserfs_xattr_get(struct inode *, const char *, void *, size_t); int reiserfs_xattr_set(struct inode *, const char *, const void *, size_t, int); @@ -92,10 +88,7 @@ static inline void reiserfs_init_xattr_rwsem(struct inode *inode) #else -#define reiserfs_getxattr NULL -#define reiserfs_setxattr NULL #define reiserfs_listxattr NULL -#define reiserfs_removexattr NULL static inline void reiserfs_init_xattr_rwsem(struct inode *inode) { diff --git a/fs/reiserfs/xattr_acl.c b/fs/reiserfs/xattr_acl.c index 558a16beaacb..dbed42f755e0 100644 --- a/fs/reiserfs/xattr_acl.c +++ b/fs/reiserfs/xattr_acl.c @@ -197,10 +197,8 @@ struct posix_acl *reiserfs_get_acl(struct inode *inode, int type) size = reiserfs_xattr_get(inode, name, NULL, 0); if (size < 0) { - if (size == -ENODATA || size == -ENOSYS) { - set_cached_acl(inode, type, NULL); + if (size == -ENODATA || size == -ENOSYS) return NULL; - } return ERR_PTR(size); } @@ -220,8 +218,6 @@ struct posix_acl *reiserfs_get_acl(struct inode *inode, int type) } else { acl = reiserfs_posix_acl_from_disk(value, retval); } - if (!IS_ERR(acl)) - set_cached_acl(inode, type, acl); kfree(value); return acl; @@ -370,7 +366,7 @@ int reiserfs_cache_default_acl(struct inode *inode) if (IS_PRIVATE(inode)) return 0; - acl = reiserfs_get_acl(inode, ACL_TYPE_DEFAULT); + acl = get_acl(inode, ACL_TYPE_DEFAULT); if (acl && !IS_ERR(acl)) { int size = reiserfs_acl_size(acl->a_count); diff --git a/fs/reiserfs/xattr_security.c b/fs/reiserfs/xattr_security.c index ab0217d32039..86aeb9dd805a 100644 --- a/fs/reiserfs/xattr_security.c +++ b/fs/reiserfs/xattr_security.c @@ -9,29 +9,26 @@ #include <linux/uaccess.h> static int -security_get(const struct xattr_handler *handler, struct dentry *dentry, - const char *name, void *buffer, size_t size) +security_get(const struct xattr_handler *handler, struct dentry *unused, + struct inode *inode, const char *name, void *buffer, size_t size) { - if (strlen(name) < sizeof(XATTR_SECURITY_PREFIX)) - return -EINVAL; - - if (IS_PRIVATE(d_inode(dentry))) + if (IS_PRIVATE(inode)) return -EPERM; - return reiserfs_xattr_get(d_inode(dentry), name, buffer, size); + return reiserfs_xattr_get(inode, xattr_full_name(handler, name), + buffer, size); } static int security_set(const struct xattr_handler *handler, struct dentry *dentry, const char *name, const void *buffer, size_t size, int flags) { - if (strlen(name) < sizeof(XATTR_SECURITY_PREFIX)) - return -EINVAL; - if (IS_PRIVATE(d_inode(dentry))) return -EPERM; - return reiserfs_xattr_set(d_inode(dentry), name, buffer, size, flags); + return reiserfs_xattr_set(d_inode(dentry), + xattr_full_name(handler, name), + buffer, size, flags); } static bool security_list(struct dentry *dentry) diff --git a/fs/reiserfs/xattr_trusted.c b/fs/reiserfs/xattr_trusted.c index 64b67aa643a9..31837f031f59 100644 --- a/fs/reiserfs/xattr_trusted.c +++ b/fs/reiserfs/xattr_trusted.c @@ -8,29 +8,26 @@ #include <linux/uaccess.h> static int -trusted_get(const struct xattr_handler *handler, struct dentry *dentry, - const char *name, void *buffer, size_t size) +trusted_get(const struct xattr_handler *handler, struct dentry *unused, + struct inode *inode, const char *name, void *buffer, size_t size) { - if (strlen(name) < sizeof(XATTR_TRUSTED_PREFIX)) - return -EINVAL; - - if (!capable(CAP_SYS_ADMIN) || IS_PRIVATE(d_inode(dentry))) + if (!capable(CAP_SYS_ADMIN) || IS_PRIVATE(inode)) return -EPERM; - return reiserfs_xattr_get(d_inode(dentry), name, buffer, size); + return reiserfs_xattr_get(inode, xattr_full_name(handler, name), + buffer, size); } static int trusted_set(const struct xattr_handler *handler, struct dentry *dentry, const char *name, const void *buffer, size_t size, int flags) { - if (strlen(name) < sizeof(XATTR_TRUSTED_PREFIX)) - return -EINVAL; - if (!capable(CAP_SYS_ADMIN) || IS_PRIVATE(d_inode(dentry))) return -EPERM; - return reiserfs_xattr_set(d_inode(dentry), name, buffer, size, flags); + return reiserfs_xattr_set(d_inode(dentry), + xattr_full_name(handler, name), + buffer, size, flags); } static bool trusted_list(struct dentry *dentry) diff --git a/fs/reiserfs/xattr_user.c b/fs/reiserfs/xattr_user.c index 12e6306f562a..f7c39731684b 100644 --- a/fs/reiserfs/xattr_user.c +++ b/fs/reiserfs/xattr_user.c @@ -7,27 +7,24 @@ #include <linux/uaccess.h> static int -user_get(const struct xattr_handler *handler, struct dentry *dentry, - const char *name, void *buffer, size_t size) +user_get(const struct xattr_handler *handler, struct dentry *unused, + struct inode *inode, const char *name, void *buffer, size_t size) { - - if (strlen(name) < sizeof(XATTR_USER_PREFIX)) - return -EINVAL; - if (!reiserfs_xattrs_user(dentry->d_sb)) + if (!reiserfs_xattrs_user(inode->i_sb)) return -EOPNOTSUPP; - return reiserfs_xattr_get(d_inode(dentry), name, buffer, size); + return reiserfs_xattr_get(inode, xattr_full_name(handler, name), + buffer, size); } static int user_set(const struct xattr_handler *handler, struct dentry *dentry, const char *name, const void *buffer, size_t size, int flags) { - if (strlen(name) < sizeof(XATTR_USER_PREFIX)) - return -EINVAL; - if (!reiserfs_xattrs_user(dentry->d_sb)) return -EOPNOTSUPP; - return reiserfs_xattr_set(d_inode(dentry), name, buffer, size, flags); + return reiserfs_xattr_set(d_inode(dentry), + xattr_full_name(handler, name), + buffer, size, flags); } static bool user_list(struct dentry *dentry) diff --git a/fs/romfs/super.c b/fs/romfs/super.c index 6b00ca357c58..d0f8a38dfafa 100644 --- a/fs/romfs/super.c +++ b/fs/romfs/super.c @@ -280,8 +280,8 @@ error: static const struct file_operations romfs_dir_operations = { .read = generic_read_dir, - .iterate = romfs_readdir, - .llseek = default_llseek, + .iterate_shared = romfs_readdir, + .llseek = generic_file_llseek, }; static const struct inode_operations romfs_dir_inode_operations = { diff --git a/fs/select.c b/fs/select.c index 869293988c2a..8ed9da50896a 100644 --- a/fs/select.c +++ b/fs/select.c @@ -47,7 +47,7 @@ #define MAX_SLACK (100 * NSEC_PER_MSEC) -static long __estimate_accuracy(struct timespec *tv) +static long __estimate_accuracy(struct timespec64 *tv) { long slack; int divfactor = 1000; @@ -70,10 +70,10 @@ static long __estimate_accuracy(struct timespec *tv) return slack; } -u64 select_estimate_accuracy(struct timespec *tv) +u64 select_estimate_accuracy(struct timespec64 *tv) { u64 ret; - struct timespec now; + struct timespec64 now; /* * Realtime tasks get a slack of 0 for obvious reasons. @@ -82,8 +82,8 @@ u64 select_estimate_accuracy(struct timespec *tv) if (rt_task(current)) return 0; - ktime_get_ts(&now); - now = timespec_sub(*tv, now); + ktime_get_ts64(&now); + now = timespec64_sub(*tv, now); ret = __estimate_accuracy(&now); if (ret < current->timer_slack_ns) return current->timer_slack_ns; @@ -260,7 +260,7 @@ EXPORT_SYMBOL(poll_schedule_timeout); /** * poll_select_set_timeout - helper function to setup the timeout value - * @to: pointer to timespec variable for the final timeout + * @to: pointer to timespec64 variable for the final timeout * @sec: seconds (from user space) * @nsec: nanoseconds (from user space) * @@ -269,26 +269,28 @@ EXPORT_SYMBOL(poll_schedule_timeout); * * Returns -EINVAL if sec/nsec are not normalized. Otherwise 0. */ -int poll_select_set_timeout(struct timespec *to, long sec, long nsec) +int poll_select_set_timeout(struct timespec64 *to, time64_t sec, long nsec) { - struct timespec ts = {.tv_sec = sec, .tv_nsec = nsec}; + struct timespec64 ts = {.tv_sec = sec, .tv_nsec = nsec}; - if (!timespec_valid(&ts)) + if (!timespec64_valid(&ts)) return -EINVAL; /* Optimize for the zero timeout value here */ if (!sec && !nsec) { to->tv_sec = to->tv_nsec = 0; } else { - ktime_get_ts(to); - *to = timespec_add_safe(*to, ts); + ktime_get_ts64(to); + *to = timespec64_add_safe(*to, ts); } return 0; } -static int poll_select_copy_remaining(struct timespec *end_time, void __user *p, +static int poll_select_copy_remaining(struct timespec64 *end_time, + void __user *p, int timeval, int ret) { + struct timespec64 rts64; struct timespec rts; struct timeval rtv; @@ -302,16 +304,18 @@ static int poll_select_copy_remaining(struct timespec *end_time, void __user *p, if (!end_time->tv_sec && !end_time->tv_nsec) return ret; - ktime_get_ts(&rts); - rts = timespec_sub(*end_time, rts); - if (rts.tv_sec < 0) - rts.tv_sec = rts.tv_nsec = 0; + ktime_get_ts64(&rts64); + rts64 = timespec64_sub(*end_time, rts64); + if (rts64.tv_sec < 0) + rts64.tv_sec = rts64.tv_nsec = 0; + + rts = timespec64_to_timespec(rts64); if (timeval) { if (sizeof(rtv) > sizeof(rtv.tv_sec) + sizeof(rtv.tv_usec)) memset(&rtv, 0, sizeof(rtv)); - rtv.tv_sec = rts.tv_sec; - rtv.tv_usec = rts.tv_nsec / NSEC_PER_USEC; + rtv.tv_sec = rts64.tv_sec; + rtv.tv_usec = rts64.tv_nsec / NSEC_PER_USEC; if (!copy_to_user(p, &rtv, sizeof(rtv))) return ret; @@ -396,7 +400,7 @@ static inline void wait_key_set(poll_table *wait, unsigned long in, wait->_key |= POLLOUT_SET; } -int do_select(int n, fd_set_bits *fds, struct timespec *end_time) +int do_select(int n, fd_set_bits *fds, struct timespec64 *end_time) { ktime_t expire, *to = NULL; struct poll_wqueues table; @@ -522,7 +526,7 @@ int do_select(int n, fd_set_bits *fds, struct timespec *end_time) * pointer to the expiry value. */ if (end_time && !to) { - expire = timespec_to_ktime(*end_time); + expire = timespec64_to_ktime(*end_time); to = &expire; } @@ -545,7 +549,7 @@ int do_select(int n, fd_set_bits *fds, struct timespec *end_time) * I'm trying ERESTARTNOHAND which restart only when you want to. */ int core_sys_select(int n, fd_set __user *inp, fd_set __user *outp, - fd_set __user *exp, struct timespec *end_time) + fd_set __user *exp, struct timespec64 *end_time) { fd_set_bits fds; void *bits; @@ -622,7 +626,7 @@ out_nofds: SYSCALL_DEFINE5(select, int, n, fd_set __user *, inp, fd_set __user *, outp, fd_set __user *, exp, struct timeval __user *, tvp) { - struct timespec end_time, *to = NULL; + struct timespec64 end_time, *to = NULL; struct timeval tv; int ret; @@ -648,15 +652,17 @@ static long do_pselect(int n, fd_set __user *inp, fd_set __user *outp, const sigset_t __user *sigmask, size_t sigsetsize) { sigset_t ksigmask, sigsaved; - struct timespec ts, end_time, *to = NULL; + struct timespec ts; + struct timespec64 ts64, end_time, *to = NULL; int ret; if (tsp) { if (copy_from_user(&ts, tsp, sizeof(ts))) return -EFAULT; + ts64 = timespec_to_timespec64(ts); to = &end_time; - if (poll_select_set_timeout(to, ts.tv_sec, ts.tv_nsec)) + if (poll_select_set_timeout(to, ts64.tv_sec, ts64.tv_nsec)) return -EINVAL; } @@ -779,7 +785,7 @@ static inline unsigned int do_pollfd(struct pollfd *pollfd, poll_table *pwait, } static int do_poll(struct poll_list *list, struct poll_wqueues *wait, - struct timespec *end_time) + struct timespec64 *end_time) { poll_table* pt = &wait->pt; ktime_t expire, *to = NULL; @@ -854,7 +860,7 @@ static int do_poll(struct poll_list *list, struct poll_wqueues *wait, * pointer to the expiry value. */ if (end_time && !to) { - expire = timespec_to_ktime(*end_time); + expire = timespec64_to_ktime(*end_time); to = &expire; } @@ -868,7 +874,7 @@ static int do_poll(struct poll_list *list, struct poll_wqueues *wait, sizeof(struct pollfd)) int do_sys_poll(struct pollfd __user *ufds, unsigned int nfds, - struct timespec *end_time) + struct timespec64 *end_time) { struct poll_wqueues table; int err = -EFAULT, fdcount, len, size; @@ -936,7 +942,7 @@ static long do_restart_poll(struct restart_block *restart_block) { struct pollfd __user *ufds = restart_block->poll.ufds; int nfds = restart_block->poll.nfds; - struct timespec *to = NULL, end_time; + struct timespec64 *to = NULL, end_time; int ret; if (restart_block->poll.has_timeout) { @@ -957,7 +963,7 @@ static long do_restart_poll(struct restart_block *restart_block) SYSCALL_DEFINE3(poll, struct pollfd __user *, ufds, unsigned int, nfds, int, timeout_msecs) { - struct timespec end_time, *to = NULL; + struct timespec64 end_time, *to = NULL; int ret; if (timeout_msecs >= 0) { @@ -993,7 +999,8 @@ SYSCALL_DEFINE5(ppoll, struct pollfd __user *, ufds, unsigned int, nfds, size_t, sigsetsize) { sigset_t ksigmask, sigsaved; - struct timespec ts, end_time, *to = NULL; + struct timespec ts; + struct timespec64 end_time, *to = NULL; int ret; if (tsp) { diff --git a/fs/splice.c b/fs/splice.c index b018eb485019..dd9bf7e410d2 100644 --- a/fs/splice.c +++ b/fs/splice.c @@ -1143,6 +1143,9 @@ static long do_splice_to(struct file *in, loff_t *ppos, if (unlikely(ret < 0)) return ret; + if (unlikely(len > MAX_RW_COUNT)) + len = MAX_RW_COUNT; + if (in->f_op->splice_read) splice_read = in->f_op->splice_read; else diff --git a/fs/squashfs/dir.c b/fs/squashfs/dir.c index d8c2d747be28..a5845f94a2a1 100644 --- a/fs/squashfs/dir.c +++ b/fs/squashfs/dir.c @@ -231,6 +231,6 @@ failed_read: const struct file_operations squashfs_dir_ops = { .read = generic_read_dir, - .iterate = squashfs_readdir, - .llseek = default_llseek, + .iterate_shared = squashfs_readdir, + .llseek = generic_file_llseek, }; diff --git a/fs/squashfs/xattr.c b/fs/squashfs/xattr.c index 1e9de96288d8..1548b3784548 100644 --- a/fs/squashfs/xattr.c +++ b/fs/squashfs/xattr.c @@ -214,10 +214,12 @@ failed: static int squashfs_xattr_handler_get(const struct xattr_handler *handler, - struct dentry *d, const char *name, + struct dentry *unused, + struct inode *inode, + const char *name, void *buffer, size_t size) { - return squashfs_xattr_get(d_inode(d), handler->flags, name, + return squashfs_xattr_get(inode, handler->flags, name, buffer, size); } diff --git a/fs/super.c b/fs/super.c index 74914b1bae70..d78b9847e6cb 100644 --- a/fs/super.c +++ b/fs/super.c @@ -285,7 +285,7 @@ static void put_super(struct super_block *sb) * deactivate_locked_super - drop an active reference to superblock * @s: superblock to deactivate * - * Drops an active reference to superblock, converting it into a temprory + * Drops an active reference to superblock, converting it into a temporary * one if there is no other active references left. In that case we * tell fs driver to shut it down and drop the temporary reference we * had just acquired. diff --git a/fs/sysv/dir.c b/fs/sysv/dir.c index c0f0a3e643eb..2661b77fc8a7 100644 --- a/fs/sysv/dir.c +++ b/fs/sysv/dir.c @@ -23,7 +23,7 @@ static int sysv_readdir(struct file *, struct dir_context *); const struct file_operations sysv_dir_operations = { .llseek = generic_file_llseek, .read = generic_read_dir, - .iterate = sysv_readdir, + .iterate_shared = sysv_readdir, .fsync = generic_file_fsync, }; diff --git a/fs/ubifs/dir.c b/fs/ubifs/dir.c index 795992a8321e..4b86d3a738e1 100644 --- a/fs/ubifs/dir.c +++ b/fs/ubifs/dir.c @@ -1182,10 +1182,10 @@ const struct inode_operations ubifs_dir_inode_operations = { .rename = ubifs_rename, .setattr = ubifs_setattr, .getattr = ubifs_getattr, - .setxattr = ubifs_setxattr, - .getxattr = ubifs_getxattr, + .setxattr = generic_setxattr, + .getxattr = generic_getxattr, .listxattr = ubifs_listxattr, - .removexattr = ubifs_removexattr, + .removexattr = generic_removexattr, #ifdef CONFIG_UBIFS_ATIME_SUPPORT .update_time = ubifs_update_time, #endif @@ -1195,7 +1195,7 @@ const struct file_operations ubifs_dir_operations = { .llseek = generic_file_llseek, .release = ubifs_dir_release, .read = generic_read_dir, - .iterate = ubifs_readdir, + .iterate_shared = ubifs_readdir, .fsync = ubifs_fsync, .unlocked_ioctl = ubifs_ioctl, #ifdef CONFIG_COMPAT diff --git a/fs/ubifs/file.c b/fs/ubifs/file.c index 446753d8ac34..08316972ff93 100644 --- a/fs/ubifs/file.c +++ b/fs/ubifs/file.c @@ -1597,10 +1597,10 @@ const struct address_space_operations ubifs_file_address_operations = { const struct inode_operations ubifs_file_inode_operations = { .setattr = ubifs_setattr, .getattr = ubifs_getattr, - .setxattr = ubifs_setxattr, - .getxattr = ubifs_getxattr, + .setxattr = generic_setxattr, + .getxattr = generic_getxattr, .listxattr = ubifs_listxattr, - .removexattr = ubifs_removexattr, + .removexattr = generic_removexattr, #ifdef CONFIG_UBIFS_ATIME_SUPPORT .update_time = ubifs_update_time, #endif @@ -1611,10 +1611,10 @@ const struct inode_operations ubifs_symlink_inode_operations = { .get_link = simple_get_link, .setattr = ubifs_setattr, .getattr = ubifs_getattr, - .setxattr = ubifs_setxattr, - .getxattr = ubifs_getxattr, + .setxattr = generic_setxattr, + .getxattr = generic_getxattr, .listxattr = ubifs_listxattr, - .removexattr = ubifs_removexattr, + .removexattr = generic_removexattr, #ifdef CONFIG_UBIFS_ATIME_SUPPORT .update_time = ubifs_update_time, #endif diff --git a/fs/ubifs/sb.c b/fs/ubifs/sb.c index f4fbc7b6b794..3cbb904a6d7d 100644 --- a/fs/ubifs/sb.c +++ b/fs/ubifs/sb.c @@ -28,8 +28,8 @@ #include "ubifs.h" #include <linux/slab.h> -#include <linux/random.h> #include <linux/math64.h> +#include <linux/uuid.h> /* * Default journal size in logical eraseblocks as a percent of total diff --git a/fs/ubifs/super.c b/fs/ubifs/super.c index e98c24ee25a1..70349954e78b 100644 --- a/fs/ubifs/super.c +++ b/fs/ubifs/super.c @@ -2040,6 +2040,7 @@ static int ubifs_fill_super(struct super_block *sb, void *data, int silent) if (c->max_inode_sz > MAX_LFS_FILESIZE) sb->s_maxbytes = c->max_inode_sz = MAX_LFS_FILESIZE; sb->s_op = &ubifs_super_operations; + sb->s_xattr = ubifs_xattr_handlers; mutex_lock(&c->umount_mutex); err = mount_ubifs(c); diff --git a/fs/ubifs/ubifs.h b/fs/ubifs/ubifs.h index 4cd7e569cd00..ddf9f6b9eee2 100644 --- a/fs/ubifs/ubifs.h +++ b/fs/ubifs/ubifs.h @@ -37,6 +37,7 @@ #include <linux/pagemap.h> #include <linux/backing-dev.h> #include <linux/security.h> +#include <linux/xattr.h> #include "ubifs-media.h" /* Version of this UBIFS implementation */ @@ -1732,12 +1733,8 @@ int ubifs_getattr(struct vfsmount *mnt, struct dentry *dentry, struct kstat *stat); /* xattr.c */ -int ubifs_setxattr(struct dentry *dentry, const char *name, - const void *value, size_t size, int flags); -ssize_t ubifs_getxattr(struct dentry *dentry, const char *name, void *buf, - size_t size); +extern const struct xattr_handler *ubifs_xattr_handlers[]; ssize_t ubifs_listxattr(struct dentry *dentry, char *buffer, size_t size); -int ubifs_removexattr(struct dentry *dentry, const char *name); int ubifs_init_security(struct inode *dentry, struct inode *inode, const struct qstr *qstr); diff --git a/fs/ubifs/xattr.c b/fs/ubifs/xattr.c index b043e044121d..6c277eb6aef9 100644 --- a/fs/ubifs/xattr.c +++ b/fs/ubifs/xattr.c @@ -249,42 +249,6 @@ out_free: return err; } -/** - * check_namespace - check extended attribute name-space. - * @nm: extended attribute name - * - * This function makes sure the extended attribute name belongs to one of the - * supported extended attribute name-spaces. Returns name-space index in case - * of success and a negative error code in case of failure. - */ -static int check_namespace(const struct qstr *nm) -{ - int type; - - if (nm->len > UBIFS_MAX_NLEN) - return -ENAMETOOLONG; - - if (!strncmp(nm->name, XATTR_TRUSTED_PREFIX, - XATTR_TRUSTED_PREFIX_LEN)) { - if (nm->name[XATTR_TRUSTED_PREFIX_LEN] == '\0') - return -EINVAL; - type = TRUSTED_XATTR; - } else if (!strncmp(nm->name, XATTR_USER_PREFIX, - XATTR_USER_PREFIX_LEN)) { - if (nm->name[XATTR_USER_PREFIX_LEN] == '\0') - return -EINVAL; - type = USER_XATTR; - } else if (!strncmp(nm->name, XATTR_SECURITY_PREFIX, - XATTR_SECURITY_PREFIX_LEN)) { - if (nm->name[XATTR_SECURITY_PREFIX_LEN] == '\0') - return -EINVAL; - type = SECURITY_XATTR; - } else - return -EOPNOTSUPP; - - return type; -} - static struct inode *iget_xattr(struct ubifs_info *c, ino_t inum) { struct inode *inode; @@ -302,24 +266,23 @@ static struct inode *iget_xattr(struct ubifs_info *c, ino_t inum) return ERR_PTR(-EINVAL); } -static int setxattr(struct inode *host, const char *name, const void *value, - size_t size, int flags) +static int __ubifs_setxattr(struct inode *host, const char *name, + const void *value, size_t size, int flags) { struct inode *inode; struct ubifs_info *c = host->i_sb->s_fs_info; struct qstr nm = QSTR_INIT(name, strlen(name)); struct ubifs_dent_node *xent; union ubifs_key key; - int err, type; + int err; ubifs_assert(inode_is_locked(host)); if (size > UBIFS_MAX_INO_DATA) return -ERANGE; - type = check_namespace(&nm); - if (type < 0) - return type; + if (nm.len > UBIFS_MAX_NLEN) + return -ENAMETOOLONG; xent = kmalloc(UBIFS_MAX_XENT_NODE_SZ, GFP_NOFS); if (!xent) @@ -363,19 +326,10 @@ out_free: return err; } -int ubifs_setxattr(struct dentry *dentry, const char *name, - const void *value, size_t size, int flags) +static ssize_t __ubifs_getxattr(struct inode *host, const char *name, + void *buf, size_t size) { - dbg_gen("xattr '%s', host ino %lu ('%pd'), size %zd", - name, d_inode(dentry)->i_ino, dentry, size); - - return setxattr(d_inode(dentry), name, value, size, flags); -} - -ssize_t ubifs_getxattr(struct dentry *dentry, const char *name, void *buf, - size_t size) -{ - struct inode *inode, *host = d_inode(dentry); + struct inode *inode; struct ubifs_info *c = host->i_sb->s_fs_info; struct qstr nm = QSTR_INIT(name, strlen(name)); struct ubifs_inode *ui; @@ -383,12 +337,8 @@ ssize_t ubifs_getxattr(struct dentry *dentry, const char *name, void *buf, union ubifs_key key; int err; - dbg_gen("xattr '%s', ino %lu ('%pd'), buf size %zd", name, - host->i_ino, dentry, size); - - err = check_namespace(&nm); - if (err < 0) - return err; + if (nm.len > UBIFS_MAX_NLEN) + return -ENAMETOOLONG; xent = kmalloc(UBIFS_MAX_XENT_NODE_SZ, GFP_NOFS); if (!xent) @@ -460,8 +410,6 @@ ssize_t ubifs_listxattr(struct dentry *dentry, char *buffer, size_t size) lowest_xent_key(c, &key, host->i_ino); while (1) { - int type; - xent = ubifs_tnc_next_ent(c, &key, &nm); if (IS_ERR(xent)) { err = PTR_ERR(xent); @@ -471,14 +419,10 @@ ssize_t ubifs_listxattr(struct dentry *dentry, char *buffer, size_t size) nm.name = xent->name; nm.len = le16_to_cpu(xent->nlen); - type = check_namespace(&nm); - if (unlikely(type < 0)) { - err = type; - break; - } - /* Show trusted namespace only for "power" users */ - if (type != TRUSTED_XATTR || capable(CAP_SYS_ADMIN)) { + if (strncmp(xent->name, XATTR_TRUSTED_PREFIX, + XATTR_TRUSTED_PREFIX_LEN) || + capable(CAP_SYS_ADMIN)) { memcpy(buffer + written, nm.name, nm.len + 1); written += nm.len + 1; } @@ -538,22 +482,19 @@ out_cancel: return err; } -int ubifs_removexattr(struct dentry *dentry, const char *name) +static int __ubifs_removexattr(struct inode *host, const char *name) { - struct inode *inode, *host = d_inode(dentry); + struct inode *inode; struct ubifs_info *c = host->i_sb->s_fs_info; struct qstr nm = QSTR_INIT(name, strlen(name)); struct ubifs_dent_node *xent; union ubifs_key key; int err; - dbg_gen("xattr '%s', ino %lu ('%pd')", name, - host->i_ino, dentry); ubifs_assert(inode_is_locked(host)); - err = check_namespace(&nm); - if (err < 0) - return err; + if (nm.len > UBIFS_MAX_NLEN) + return -ENAMETOOLONG; xent = kmalloc(UBIFS_MAX_XENT_NODE_SZ, GFP_NOFS); if (!xent) @@ -603,7 +544,7 @@ static int init_xattrs(struct inode *inode, const struct xattr *xattr_array, } strcpy(name, XATTR_SECURITY_PREFIX); strcpy(name + XATTR_SECURITY_PREFIX_LEN, xattr->name); - err = setxattr(inode, name, xattr->value, xattr->value_len, 0); + err = __ubifs_setxattr(inode, name, xattr->value, xattr->value_len, 0); kfree(name); if (err < 0) break; @@ -626,3 +567,53 @@ int ubifs_init_security(struct inode *dentry, struct inode *inode, } return err; } + +static int ubifs_xattr_get(const struct xattr_handler *handler, + struct dentry *dentry, struct inode *inode, + const char *name, void *buffer, size_t size) +{ + dbg_gen("xattr '%s', ino %lu ('%pd'), buf size %zd", name, + inode->i_ino, dentry, size); + + return __ubifs_getxattr(inode, name, buffer, size); +} + +static int ubifs_xattr_set(const struct xattr_handler *handler, + struct dentry *dentry, const char *name, + const void *value, size_t size, int flags) +{ + struct inode *inode = d_inode(dentry); + + dbg_gen("xattr '%s', host ino %lu ('%pd'), size %zd", + name, inode->i_ino, dentry, size); + + if (value) + return __ubifs_setxattr(inode, name, value, size, flags); + else + return __ubifs_removexattr(inode, name); +} + +const struct xattr_handler ubifs_user_xattr_handler = { + .prefix = XATTR_USER_PREFIX, + .get = ubifs_xattr_get, + .set = ubifs_xattr_set, +}; + +const struct xattr_handler ubifs_trusted_xattr_handler = { + .prefix = XATTR_TRUSTED_PREFIX, + .get = ubifs_xattr_get, + .set = ubifs_xattr_set, +}; + +const struct xattr_handler ubifs_security_xattr_handler = { + .prefix = XATTR_SECURITY_PREFIX, + .get = ubifs_xattr_get, + .set = ubifs_xattr_set, +}; + +const struct xattr_handler *ubifs_xattr_handlers[] = { + &ubifs_user_xattr_handler, + &ubifs_trusted_xattr_handler, + &ubifs_security_xattr_handler, + NULL +}; diff --git a/fs/udf/dir.c b/fs/udf/dir.c index b51b371b874a..4c5593abc553 100644 --- a/fs/udf/dir.c +++ b/fs/udf/dir.c @@ -202,7 +202,7 @@ out: const struct file_operations udf_dir_operations = { .llseek = generic_file_llseek, .read = generic_read_dir, - .iterate = udf_readdir, + .iterate_shared = udf_readdir, .unlocked_ioctl = udf_ioctl, .fsync = generic_file_fsync, }; diff --git a/fs/udf/file.c b/fs/udf/file.c index 877ba1c9b461..632570617327 100644 --- a/fs/udf/file.c +++ b/fs/udf/file.c @@ -99,8 +99,7 @@ static int udf_adinicb_write_begin(struct file *file, return 0; } -static ssize_t udf_adinicb_direct_IO(struct kiocb *iocb, struct iov_iter *iter, - loff_t offset) +static ssize_t udf_adinicb_direct_IO(struct kiocb *iocb, struct iov_iter *iter) { /* Fallback to buffered I/O. */ return 0; @@ -153,9 +152,7 @@ out: if (retval > 0) { mark_inode_dirty(inode); - err = generic_write_sync(file, iocb->ki_pos - retval, retval); - if (err < 0) - retval = err; + retval = generic_write_sync(iocb, retval); } return retval; diff --git a/fs/udf/inode.c b/fs/udf/inode.c index 2dc461eeb415..f323aff740ef 100644 --- a/fs/udf/inode.c +++ b/fs/udf/inode.c @@ -214,8 +214,7 @@ static int udf_write_begin(struct file *file, struct address_space *mapping, return ret; } -static ssize_t udf_direct_IO(struct kiocb *iocb, struct iov_iter *iter, - loff_t offset) +static ssize_t udf_direct_IO(struct kiocb *iocb, struct iov_iter *iter) { struct file *file = iocb->ki_filp; struct address_space *mapping = file->f_mapping; @@ -223,9 +222,9 @@ static ssize_t udf_direct_IO(struct kiocb *iocb, struct iov_iter *iter, size_t count = iov_iter_count(iter); ssize_t ret; - ret = blockdev_direct_IO(iocb, inode, iter, offset, udf_get_block); + ret = blockdev_direct_IO(iocb, inode, iter, udf_get_block); if (unlikely(ret < 0 && iov_iter_rw(iter) == WRITE)) - udf_write_failed(mapping, offset + count); + udf_write_failed(mapping, iocb->ki_pos + count); return ret; } diff --git a/fs/udf/namei.c b/fs/udf/namei.c index a2ba11eca995..c3e5c9679371 100644 --- a/fs/udf/namei.c +++ b/fs/udf/namei.c @@ -1250,7 +1250,7 @@ static struct dentry *udf_get_parent(struct dentry *child) brelse(fibh.sbh); tloc = lelb_to_cpu(cfi.icb.extLocation); - inode = udf_iget(d_inode(child)->i_sb, &tloc); + inode = udf_iget(child->d_sb, &tloc); if (IS_ERR(inode)) return ERR_CAST(inode); diff --git a/fs/udf/super.c b/fs/udf/super.c index fa92fe839fda..5e2c8c814e1b 100644 --- a/fs/udf/super.c +++ b/fs/udf/super.c @@ -78,6 +78,15 @@ #define VSD_FIRST_SECTOR_OFFSET 32768 #define VSD_MAX_SECTOR_OFFSET 0x800000 +/* + * Maximum number of Terminating Descriptor / Logical Volume Integrity + * Descriptor redirections. The chosen numbers are arbitrary - just that we + * hopefully don't limit any real use of rewritten inode on write-once media + * but avoid looping for too long on corrupted media. + */ +#define UDF_MAX_TD_NESTING 64 +#define UDF_MAX_LVID_NESTING 1000 + enum { UDF_MAX_LINKS = 0xffff }; /* These are the "meat" - everything else is stuffing */ @@ -919,14 +928,14 @@ static int udf_load_pvoldesc(struct super_block *sb, sector_t block) #endif } - ret = udf_CS0toUTF8(outstr, 31, pvoldesc->volIdent, 32); + ret = udf_dstrCS0toUTF8(outstr, 31, pvoldesc->volIdent, 32); if (ret < 0) goto out_bh; strncpy(UDF_SB(sb)->s_volume_ident, outstr, ret); udf_debug("volIdent[] = '%s'\n", UDF_SB(sb)->s_volume_ident); - ret = udf_CS0toUTF8(outstr, 127, pvoldesc->volSetIdent, 128); + ret = udf_dstrCS0toUTF8(outstr, 127, pvoldesc->volSetIdent, 128); if (ret < 0) goto out_bh; @@ -1541,42 +1550,52 @@ out_bh: } /* - * udf_load_logicalvolint - * + * Find the prevailing Logical Volume Integrity Descriptor. */ static void udf_load_logicalvolint(struct super_block *sb, struct kernel_extent_ad loc) { - struct buffer_head *bh = NULL; + struct buffer_head *bh, *final_bh; uint16_t ident; struct udf_sb_info *sbi = UDF_SB(sb); struct logicalVolIntegrityDesc *lvid; + int indirections = 0; + + while (++indirections <= UDF_MAX_LVID_NESTING) { + final_bh = NULL; + while (loc.extLength > 0 && + (bh = udf_read_tagged(sb, loc.extLocation, + loc.extLocation, &ident))) { + if (ident != TAG_IDENT_LVID) { + brelse(bh); + break; + } + + brelse(final_bh); + final_bh = bh; - while (loc.extLength > 0 && - (bh = udf_read_tagged(sb, loc.extLocation, - loc.extLocation, &ident)) && - ident == TAG_IDENT_LVID) { - sbi->s_lvid_bh = bh; - lvid = (struct logicalVolIntegrityDesc *)bh->b_data; + loc.extLength -= sb->s_blocksize; + loc.extLocation++; + } - if (lvid->nextIntegrityExt.extLength) - udf_load_logicalvolint(sb, - leea_to_cpu(lvid->nextIntegrityExt)); + if (!final_bh) + return; - if (sbi->s_lvid_bh != bh) - brelse(bh); - loc.extLength -= sb->s_blocksize; - loc.extLocation++; + brelse(sbi->s_lvid_bh); + sbi->s_lvid_bh = final_bh; + + lvid = (struct logicalVolIntegrityDesc *)final_bh->b_data; + if (lvid->nextIntegrityExt.extLength == 0) + return; + + loc = leea_to_cpu(lvid->nextIntegrityExt); } - if (sbi->s_lvid_bh != bh) - brelse(bh); + + udf_warn(sb, "Too many LVID indirections (max %u), ignoring.\n", + UDF_MAX_LVID_NESTING); + brelse(sbi->s_lvid_bh); + sbi->s_lvid_bh = NULL; } -/* - * Maximum number of Terminating Descriptor redirections. The chosen number is - * arbitrary - just that we hopefully don't limit any real use of rewritten - * inode on write-once media but avoid looping for too long on corrupted media. - */ -#define UDF_MAX_TD_NESTING 64 /* * Process a main/reserve volume descriptor sequence. diff --git a/fs/udf/udf_sb.h b/fs/udf/udf_sb.h index 1f32c7bd9f57..27b5335730c9 100644 --- a/fs/udf/udf_sb.h +++ b/fs/udf/udf_sb.h @@ -3,9 +3,7 @@ #include <linux/mutex.h> #include <linux/bitops.h> - -/* Since UDF 2.01 is ISO 13346 based... */ -#define UDF_SUPER_MAGIC 0x15013346 +#include <linux/magic.h> #define UDF_MAX_READ_VERSION 0x0250 #define UDF_MAX_WRITE_VERSION 0x0201 diff --git a/fs/udf/udfdecl.h b/fs/udf/udfdecl.h index 972b70625614..263829ef1873 100644 --- a/fs/udf/udfdecl.h +++ b/fs/udf/udfdecl.h @@ -212,7 +212,7 @@ extern int udf_get_filename(struct super_block *, const uint8_t *, int, uint8_t *, int); extern int udf_put_filename(struct super_block *, const uint8_t *, int, uint8_t *, int); -extern int udf_CS0toUTF8(uint8_t *, int, const uint8_t *, int); +extern int udf_dstrCS0toUTF8(uint8_t *, int, const uint8_t *, int); /* ialloc.c */ extern void udf_free_inode(struct inode *); diff --git a/fs/udf/unicode.c b/fs/udf/unicode.c index 3ff42f4437f3..695389a4fc23 100644 --- a/fs/udf/unicode.c +++ b/fs/udf/unicode.c @@ -335,9 +335,21 @@ try_again: return u_len; } -int udf_CS0toUTF8(uint8_t *utf_o, int o_len, const uint8_t *ocu_i, int i_len) +int udf_dstrCS0toUTF8(uint8_t *utf_o, int o_len, + const uint8_t *ocu_i, int i_len) { - return udf_name_from_CS0(utf_o, o_len, ocu_i, i_len, + int s_len = 0; + + if (i_len > 0) { + s_len = ocu_i[i_len - 1]; + if (s_len >= i_len) { + pr_err("incorrect dstring lengths (%d/%d)\n", + s_len, i_len); + return -EINVAL; + } + } + + return udf_name_from_CS0(utf_o, o_len, ocu_i, s_len, udf_uni2char_utf8, 0); } diff --git a/fs/ufs/dir.c b/fs/ufs/dir.c index 0b1457292734..57dcceda17d6 100644 --- a/fs/ufs/dir.c +++ b/fs/ufs/dir.c @@ -105,7 +105,7 @@ void ufs_set_link(struct inode *dir, struct ufs_dir_entry *de, } -static void ufs_check_page(struct page *page) +static bool ufs_check_page(struct page *page) { struct inode *dir = page->mapping->host; struct super_block *sb = dir->i_sb; @@ -143,7 +143,7 @@ static void ufs_check_page(struct page *page) goto Eend; out: SetPageChecked(page); - return; + return true; /* Too bad, we had an error */ @@ -180,8 +180,8 @@ Eend: "offset=%lu", dir->i_ino, (page->index<<PAGE_SHIFT)+offs); fail: - SetPageChecked(page); SetPageError(page); + return false; } static struct page *ufs_get_page(struct inode *dir, unsigned long n) @@ -190,10 +190,10 @@ static struct page *ufs_get_page(struct inode *dir, unsigned long n) struct page *page = read_mapping_page(mapping, n, NULL); if (!IS_ERR(page)) { kmap(page); - if (!PageChecked(page)) - ufs_check_page(page); - if (PageError(page)) - goto fail; + if (unlikely(!PageChecked(page))) { + if (PageError(page) || !ufs_check_page(page)) + goto fail; + } } return page; @@ -653,7 +653,7 @@ not_empty: const struct file_operations ufs_dir_operations = { .read = generic_read_dir, - .iterate = ufs_readdir, + .iterate_shared = ufs_readdir, .fsync = generic_file_fsync, .llseek = generic_file_llseek, }; diff --git a/fs/ufs/super.c b/fs/ufs/super.c index 442fd52ebffe..f04ab232d08d 100644 --- a/fs/ufs/super.c +++ b/fs/ufs/super.c @@ -132,7 +132,7 @@ static struct dentry *ufs_get_parent(struct dentry *child) ino = ufs_inode_by_name(d_inode(child), &dot_dot); if (!ino) return ERR_PTR(-ENOENT); - return d_obtain_alias(ufs_iget(d_inode(child)->i_sb, ino)); + return d_obtain_alias(ufs_iget(child->d_sb, ino)); } static const struct export_operations ufs_export_ops = { diff --git a/fs/userfaultfd.c b/fs/userfaultfd.c index 66cdb44616d5..2d97952e341a 100644 --- a/fs/userfaultfd.c +++ b/fs/userfaultfd.c @@ -137,7 +137,7 @@ static void userfaultfd_ctx_put(struct userfaultfd_ctx *ctx) VM_BUG_ON(waitqueue_active(&ctx->fault_wqh)); VM_BUG_ON(spin_is_locked(&ctx->fd_wqh.lock)); VM_BUG_ON(waitqueue_active(&ctx->fd_wqh)); - mmput(ctx->mm); + mmdrop(ctx->mm); kmem_cache_free(userfaultfd_ctx_cachep, ctx); } } @@ -434,6 +434,9 @@ static int userfaultfd_release(struct inode *inode, struct file *file) ACCESS_ONCE(ctx->released) = true; + if (!mmget_not_zero(mm)) + goto wakeup; + /* * Flush page faults out of all CPUs. NOTE: all page faults * must be retried without returning VM_FAULT_SIGBUS if @@ -466,7 +469,8 @@ static int userfaultfd_release(struct inode *inode, struct file *file) vma->vm_userfaultfd_ctx = NULL_VM_UFFD_CTX; } up_write(&mm->mmap_sem); - + mmput(mm); +wakeup: /* * After no new page faults can wait on this fault_*wqh, flush * the last page faults that may have been already waiting on @@ -760,10 +764,12 @@ static int userfaultfd_register(struct userfaultfd_ctx *ctx, start = uffdio_register.range.start; end = start + uffdio_register.range.len; + ret = -ENOMEM; + if (!mmget_not_zero(mm)) + goto out; + down_write(&mm->mmap_sem); vma = find_vma_prev(mm, start, &prev); - - ret = -ENOMEM; if (!vma) goto out_unlock; @@ -864,6 +870,7 @@ static int userfaultfd_register(struct userfaultfd_ctx *ctx, } while (vma && vma->vm_start < end); out_unlock: up_write(&mm->mmap_sem); + mmput(mm); if (!ret) { /* * Now that we scanned all vmas we can already tell @@ -902,10 +909,12 @@ static int userfaultfd_unregister(struct userfaultfd_ctx *ctx, start = uffdio_unregister.start; end = start + uffdio_unregister.len; + ret = -ENOMEM; + if (!mmget_not_zero(mm)) + goto out; + down_write(&mm->mmap_sem); vma = find_vma_prev(mm, start, &prev); - - ret = -ENOMEM; if (!vma) goto out_unlock; @@ -998,6 +1007,7 @@ static int userfaultfd_unregister(struct userfaultfd_ctx *ctx, } while (vma && vma->vm_start < end); out_unlock: up_write(&mm->mmap_sem); + mmput(mm); out: return ret; } @@ -1067,9 +1077,11 @@ static int userfaultfd_copy(struct userfaultfd_ctx *ctx, goto out; if (uffdio_copy.mode & ~UFFDIO_COPY_MODE_DONTWAKE) goto out; - - ret = mcopy_atomic(ctx->mm, uffdio_copy.dst, uffdio_copy.src, - uffdio_copy.len); + if (mmget_not_zero(ctx->mm)) { + ret = mcopy_atomic(ctx->mm, uffdio_copy.dst, uffdio_copy.src, + uffdio_copy.len); + mmput(ctx->mm); + } if (unlikely(put_user(ret, &user_uffdio_copy->copy))) return -EFAULT; if (ret < 0) @@ -1110,8 +1122,11 @@ static int userfaultfd_zeropage(struct userfaultfd_ctx *ctx, if (uffdio_zeropage.mode & ~UFFDIO_ZEROPAGE_MODE_DONTWAKE) goto out; - ret = mfill_zeropage(ctx->mm, uffdio_zeropage.range.start, - uffdio_zeropage.range.len); + if (mmget_not_zero(ctx->mm)) { + ret = mfill_zeropage(ctx->mm, uffdio_zeropage.range.start, + uffdio_zeropage.range.len); + mmput(ctx->mm); + } if (unlikely(put_user(ret, &user_uffdio_zeropage->zeropage))) return -EFAULT; if (ret < 0) @@ -1289,12 +1304,12 @@ static struct file *userfaultfd_file_create(int flags) ctx->released = false; ctx->mm = current->mm; /* prevent the mm struct to be freed */ - atomic_inc(&ctx->mm->mm_users); + atomic_inc(&ctx->mm->mm_count); file = anon_inode_getfile("[userfaultfd]", &userfaultfd_fops, ctx, O_RDWR | (flags & UFFD_SHARED_FCNTL_FLAGS)); if (IS_ERR(file)) { - mmput(ctx->mm); + mmdrop(ctx->mm); kmem_cache_free(userfaultfd_ctx_cachep, ctx); } out: diff --git a/fs/xattr.c b/fs/xattr.c index 4861322e28e8..b11945e15fde 100644 --- a/fs/xattr.c +++ b/fs/xattr.c @@ -192,7 +192,7 @@ vfs_getxattr_alloc(struct dentry *dentry, const char *name, char **xattr_value, if (!inode->i_op->getxattr) return -EOPNOTSUPP; - error = inode->i_op->getxattr(dentry, name, NULL, 0); + error = inode->i_op->getxattr(dentry, inode, name, NULL, 0); if (error < 0) return error; @@ -203,7 +203,7 @@ vfs_getxattr_alloc(struct dentry *dentry, const char *name, char **xattr_value, memset(value, 0, error + 1); } - error = inode->i_op->getxattr(dentry, name, value, error); + error = inode->i_op->getxattr(dentry, inode, name, value, error); *xattr_value = value; return error; } @@ -236,7 +236,7 @@ vfs_getxattr(struct dentry *dentry, const char *name, void *value, size_t size) } nolsm: if (inode->i_op->getxattr) - error = inode->i_op->getxattr(dentry, name, value, size); + error = inode->i_op->getxattr(dentry, inode, name, value, size); else error = -EOPNOTSUPP; @@ -691,14 +691,16 @@ xattr_resolve_name(const struct xattr_handler **handlers, const char **name) * Find the handler for the prefix and dispatch its get() operation. */ ssize_t -generic_getxattr(struct dentry *dentry, const char *name, void *buffer, size_t size) +generic_getxattr(struct dentry *dentry, struct inode *inode, + const char *name, void *buffer, size_t size) { const struct xattr_handler *handler; handler = xattr_resolve_name(dentry->d_sb->s_xattr, &name); if (IS_ERR(handler)) return PTR_ERR(handler); - return handler->get(handler, dentry, name, buffer, size); + return handler->get(handler, dentry, inode, + name, buffer, size); } /* diff --git a/fs/xfs/xfs_acl.c b/fs/xfs/xfs_acl.c index 2d5df1f23bbc..b6e527b8eccb 100644 --- a/fs/xfs/xfs_acl.c +++ b/fs/xfs/xfs_acl.c @@ -158,22 +158,14 @@ xfs_get_acl(struct inode *inode, int type) if (error) { /* * If the attribute doesn't exist make sure we have a negative - * cache entry, for any other error assume it is transient and - * leave the cache entry as ACL_NOT_CACHED. + * cache entry, for any other error assume it is transient. */ - if (error == -ENOATTR) - goto out_update_cache; - acl = ERR_PTR(error); - goto out; + if (error != -ENOATTR) + acl = ERR_PTR(error); + } else { + acl = xfs_acl_from_disk(xfs_acl, len, + XFS_ACL_MAX_ENTRIES(ip->i_mount)); } - - acl = xfs_acl_from_disk(xfs_acl, len, XFS_ACL_MAX_ENTRIES(ip->i_mount)); - if (IS_ERR(acl)) - goto out; - -out_update_cache: - set_cached_acl(inode, type, acl); -out: kmem_free(xfs_acl); return acl; } diff --git a/fs/xfs/xfs_aops.c b/fs/xfs/xfs_aops.c index e49b2406d15d..c535887c60a8 100644 --- a/fs/xfs/xfs_aops.c +++ b/fs/xfs/xfs_aops.c @@ -1406,8 +1406,7 @@ xfs_end_io_direct_write( STATIC ssize_t xfs_vm_direct_IO( struct kiocb *iocb, - struct iov_iter *iter, - loff_t offset) + struct iov_iter *iter) { struct inode *inode = iocb->ki_filp->f_mapping->host; dio_iodone_t *endio = NULL; @@ -1420,12 +1419,12 @@ xfs_vm_direct_IO( } if (IS_DAX(inode)) { - return dax_do_io(iocb, inode, iter, offset, + return dax_do_io(iocb, inode, iter, xfs_get_blocks_direct, endio, 0); } bdev = xfs_find_bdev_for_inode(inode); - return __blockdev_direct_IO(iocb, inode, bdev, iter, offset, + return __blockdev_direct_IO(iocb, inode, bdev, iter, xfs_get_blocks_direct, endio, NULL, flags); } diff --git a/fs/xfs/xfs_dir2_readdir.c b/fs/xfs/xfs_dir2_readdir.c index 93b3ab0c5435..f44f79996978 100644 --- a/fs/xfs/xfs_dir2_readdir.c +++ b/fs/xfs/xfs_dir2_readdir.c @@ -273,10 +273,11 @@ xfs_dir2_leaf_readbuf( size_t bufsize, struct xfs_dir2_leaf_map_info *mip, xfs_dir2_off_t *curoff, - struct xfs_buf **bpp) + struct xfs_buf **bpp, + bool trim_map) { struct xfs_inode *dp = args->dp; - struct xfs_buf *bp = *bpp; + struct xfs_buf *bp = NULL; struct xfs_bmbt_irec *map = mip->map; struct blk_plug plug; int error = 0; @@ -286,13 +287,10 @@ xfs_dir2_leaf_readbuf( struct xfs_da_geometry *geo = args->geo; /* - * If we have a buffer, we need to release it and - * take it out of the mapping. + * If the caller just finished processing a buffer, it will tell us + * we need to trim that block out of the mapping now it is done. */ - - if (bp) { - xfs_trans_brelse(NULL, bp); - bp = NULL; + if (trim_map) { mip->map_blocks -= geo->fsbcount; /* * Loop to get rid of the extents for the @@ -533,10 +531,17 @@ xfs_dir2_leaf_getdents( */ if (!bp || ptr >= (char *)bp->b_addr + geo->blksize) { int lock_mode; + bool trim_map = false; + + if (bp) { + xfs_trans_brelse(NULL, bp); + bp = NULL; + trim_map = true; + } lock_mode = xfs_ilock_data_map_shared(dp); error = xfs_dir2_leaf_readbuf(args, bufsize, map_info, - &curoff, &bp); + &curoff, &bp, trim_map); xfs_iunlock(dp, lock_mode); if (error || !map_info->map_valid) break; diff --git a/fs/xfs/xfs_file.c b/fs/xfs/xfs_file.c index 569938a4a357..85ce3032f815 100644 --- a/fs/xfs/xfs_file.c +++ b/fs/xfs/xfs_file.c @@ -718,18 +718,19 @@ xfs_file_dio_aio_write( int unaligned_io = 0; int iolock; size_t count = iov_iter_count(from); - loff_t pos = iocb->ki_pos; loff_t end; struct iov_iter data; struct xfs_buftarg *target = XFS_IS_REALTIME_INODE(ip) ? mp->m_rtdev_targp : mp->m_ddev_targp; /* DIO must be aligned to device logical sector size */ - if (!IS_DAX(inode) && ((pos | count) & target->bt_logical_sectormask)) + if (!IS_DAX(inode) && + ((iocb->ki_pos | count) & target->bt_logical_sectormask)) return -EINVAL; /* "unaligned" here means not aligned to a filesystem block */ - if ((pos & mp->m_blockmask) || ((pos + count) & mp->m_blockmask)) + if ((iocb->ki_pos & mp->m_blockmask) || + ((iocb->ki_pos + count) & mp->m_blockmask)) unaligned_io = 1; /* @@ -760,8 +761,7 @@ xfs_file_dio_aio_write( if (ret) goto out; count = iov_iter_count(from); - pos = iocb->ki_pos; - end = pos + count - 1; + end = iocb->ki_pos + count - 1; /* * See xfs_file_read_iter() for why we do a full-file flush here. @@ -794,19 +794,18 @@ xfs_file_dio_aio_write( trace_xfs_file_direct_write(ip, count, iocb->ki_pos, 0); data = *from; - ret = mapping->a_ops->direct_IO(iocb, &data, pos); + ret = mapping->a_ops->direct_IO(iocb, &data); /* see generic_file_direct_write() for why this is necessary */ if (mapping->nrpages) { invalidate_inode_pages2_range(mapping, - pos >> PAGE_SHIFT, + iocb->ki_pos >> PAGE_SHIFT, end >> PAGE_SHIFT); } if (ret > 0) { - pos += ret; + iocb->ki_pos += ret; iov_iter_advance(from, ret); - iocb->ki_pos = pos; } out: xfs_rw_iunlock(ip, iolock); @@ -904,14 +903,10 @@ xfs_file_write_iter( ret = xfs_file_buffered_aio_write(iocb, from); if (ret > 0) { - ssize_t err; - XFS_STATS_ADD(ip->i_mount, xs_write_bytes, ret); /* Handle various SYNC-type writes */ - err = generic_write_sync(file, iocb->ki_pos - ret, ret); - if (err < 0) - ret = err; + ret = generic_write_sync(iocb, ret); } return ret; } @@ -1714,7 +1709,7 @@ const struct file_operations xfs_file_operations = { const struct file_operations xfs_dir_file_operations = { .open = xfs_dir_open, .read = generic_read_dir, - .iterate = xfs_file_readdir, + .iterate_shared = xfs_file_readdir, .llseek = generic_file_llseek, .unlocked_ioctl = xfs_file_ioctl, #ifdef CONFIG_COMPAT diff --git a/fs/xfs/xfs_xattr.c b/fs/xfs/xfs_xattr.c index 110f1d7d86b0..d111f691f313 100644 --- a/fs/xfs/xfs_xattr.c +++ b/fs/xfs/xfs_xattr.c @@ -32,11 +32,11 @@ static int -xfs_xattr_get(const struct xattr_handler *handler, struct dentry *dentry, - const char *name, void *value, size_t size) +xfs_xattr_get(const struct xattr_handler *handler, struct dentry *unused, + struct inode *inode, const char *name, void *value, size_t size) { int xflags = handler->flags; - struct xfs_inode *ip = XFS_I(d_inode(dentry)); + struct xfs_inode *ip = XFS_I(inode); int error, asize = size; /* Convert Linux syscall to XFS internal ATTR flags */ |