diff options
Diffstat (limited to 'fs')
488 files changed, 15701 insertions, 8314 deletions
diff --git a/fs/9p/v9fs.c b/fs/9p/v9fs.c index 15a99f9c7253..39def020a074 100644 --- a/fs/9p/v9fs.c +++ b/fs/9p/v9fs.c @@ -500,10 +500,9 @@ void v9fs_session_close(struct v9fs_session_info *v9ses) } #ifdef CONFIG_9P_FSCACHE - if (v9ses->fscache) { + if (v9ses->fscache) v9fs_cache_session_put_cookie(v9ses); - kfree(v9ses->cachetag); - } + kfree(v9ses->cachetag); #endif kfree(v9ses->uname); kfree(v9ses->aname); diff --git a/fs/9p/vfs_file.c b/fs/9p/vfs_file.c index 92cd1d80218d..3576123d8299 100644 --- a/fs/9p/vfs_file.c +++ b/fs/9p/vfs_file.c @@ -213,7 +213,7 @@ static int v9fs_file_do_lock(struct file *filp, int cmd, struct file_lock *fl) break; default: WARN_ONCE(1, "unknown lock status code: %d\n", status); - /* fall through */ + fallthrough; case P9_LOCK_ERROR: case P9_LOCK_GRACE: res = -ENOLCK; diff --git a/fs/9p/vfs_inode.c b/fs/9p/vfs_inode.c index c9255d399917..ae0c38ad1fcb 100644 --- a/fs/9p/vfs_inode.c +++ b/fs/9p/vfs_inode.c @@ -223,8 +223,7 @@ v9fs_blank_wstat(struct p9_wstat *wstat) struct inode *v9fs_alloc_inode(struct super_block *sb) { struct v9fs_inode *v9inode; - v9inode = (struct v9fs_inode *)kmem_cache_alloc(v9fs_inode_cache, - GFP_KERNEL); + v9inode = kmem_cache_alloc(v9fs_inode_cache, GFP_KERNEL); if (!v9inode) return NULL; #ifdef CONFIG_9P_FSCACHE @@ -368,59 +367,6 @@ struct inode *v9fs_get_inode(struct super_block *sb, umode_t mode, dev_t rdev) return inode; } -/* -static struct v9fs_fid* -v9fs_clone_walk(struct v9fs_session_info *v9ses, u32 fid, struct dentry *dentry) -{ - int err; - int nfid; - struct v9fs_fid *ret; - struct v9fs_fcall *fcall; - - nfid = v9fs_get_idpool(&v9ses->fidpool); - if (nfid < 0) { - eprintk(KERN_WARNING, "no free fids available\n"); - return ERR_PTR(-ENOSPC); - } - - err = v9fs_t_walk(v9ses, fid, nfid, (char *) dentry->d_name.name, - &fcall); - - if (err < 0) { - if (fcall && fcall->id == RWALK) - goto clunk_fid; - - PRINT_FCALL_ERROR("walk error", fcall); - v9fs_put_idpool(nfid, &v9ses->fidpool); - goto error; - } - - kfree(fcall); - fcall = NULL; - ret = v9fs_fid_create(v9ses, nfid); - if (!ret) { - err = -ENOMEM; - goto clunk_fid; - } - - err = v9fs_fid_insert(ret, dentry); - if (err < 0) { - v9fs_fid_destroy(ret); - goto clunk_fid; - } - - return ret; - -clunk_fid: - v9fs_t_clunk(v9ses, nfid); - -error: - kfree(fcall); - return ERR_PTR(err); -} -*/ - - /** * v9fs_clear_inode - release an inode * @inode: inode to release @@ -1090,7 +1036,7 @@ static int v9fs_vfs_setattr(struct dentry *dentry, struct iattr *iattr) { int retval; struct v9fs_session_info *v9ses; - struct p9_fid *fid; + struct p9_fid *fid = NULL; struct p9_wstat wstat; p9_debug(P9_DEBUG_VFS, "\n"); @@ -1100,7 +1046,12 @@ static int v9fs_vfs_setattr(struct dentry *dentry, struct iattr *iattr) retval = -EPERM; v9ses = v9fs_dentry2v9ses(dentry); - fid = v9fs_fid_lookup(dentry); + if (iattr->ia_valid & ATTR_FILE) { + fid = iattr->ia_file->private_data; + WARN_ON(!fid); + } + if (!fid) + fid = v9fs_fid_lookup(dentry); if(IS_ERR(fid)) return PTR_ERR(fid); diff --git a/fs/9p/vfs_inode_dotl.c b/fs/9p/vfs_inode_dotl.c index 60328b21c5fb..0028eccb665a 100644 --- a/fs/9p/vfs_inode_dotl.c +++ b/fs/9p/vfs_inode_dotl.c @@ -540,7 +540,7 @@ static int v9fs_mapped_iattr_valid(int iattr_valid) int v9fs_vfs_setattr_dotl(struct dentry *dentry, struct iattr *iattr) { int retval; - struct p9_fid *fid; + struct p9_fid *fid = NULL; struct p9_iattr_dotl p9attr; struct inode *inode = d_inode(dentry); @@ -560,7 +560,12 @@ int v9fs_vfs_setattr_dotl(struct dentry *dentry, struct iattr *iattr) p9attr.mtime_sec = iattr->ia_mtime.tv_sec; p9attr.mtime_nsec = iattr->ia_mtime.tv_nsec; - fid = v9fs_fid_lookup(dentry); + if (iattr->ia_valid & ATTR_FILE) { + fid = iattr->ia_file->private_data; + WARN_ON(!fid); + } + if (!fid) + fid = v9fs_fid_lookup(dentry); if (IS_ERR(fid)) return PTR_ERR(fid); diff --git a/fs/Kconfig b/fs/Kconfig index a88aa3af73c1..aa4c12282301 100644 --- a/fs/Kconfig +++ b/fs/Kconfig @@ -201,6 +201,27 @@ config TMPFS_XATTR If unsure, say N. +config TMPFS_INODE64 + bool "Use 64-bit ino_t by default in tmpfs" + depends on TMPFS && 64BIT + default n + help + tmpfs has historically used only inode numbers as wide as an unsigned + int. In some cases this can cause wraparound, potentially resulting + in multiple files with the same inode number on a single device. This + option makes tmpfs use the full width of ino_t by default, without + needing to specify the inode64 option when mounting. + + But if a long-lived tmpfs is to be accessed by 32-bit applications so + ancient that opening a file larger than 2GiB fails with EINVAL, then + the INODE64 config option and inode64 mount option risk operations + failing with EOVERFLOW once 33-bit inode numbers are reached. + + To override this configured default, use the inode32 or inode64 + option when mounting. + + If unsure, say N. + config HUGETLBFS bool "HugeTLB file system support" depends on X86 || IA64 || SPARC64 || (S390 && 64BIT) || \ diff --git a/fs/Makefile b/fs/Makefile index 2ce5112b02c8..1c7b0e3f6daa 100644 --- a/fs/Makefile +++ b/fs/Makefile @@ -13,7 +13,7 @@ obj-y := open.o read_write.o file_table.o super.o \ seq_file.o xattr.o libfs.o fs-writeback.o \ pnode.o splice.o sync.o utimes.o d_path.o \ stack.o fs_struct.o statfs.o fs_pin.o nsfs.o \ - fs_types.o fs_context.o fs_parser.o fsopen.o + fs_types.o fs_context.o fs_parser.o fsopen.o init.o ifeq ($(CONFIG_BLOCK),y) obj-y += buffer.o block_dev.o direct-io.o mpage.o diff --git a/fs/adfs/dir_f.c b/fs/adfs/dir_f.c index 30d526fecc3f..05e963402e25 100644 --- a/fs/adfs/dir_f.c +++ b/fs/adfs/dir_f.c @@ -18,11 +18,11 @@ static inline unsigned int adfs_readval(unsigned char *p, int len) switch (len) { case 4: val |= p[3] << 24; - /* fall through */ + fallthrough; case 3: val |= p[2] << 16; - /* fall through */ + fallthrough; case 2: val |= p[1] << 8; - /* fall through */ + fallthrough; default: val |= p[0]; } return val; @@ -32,11 +32,11 @@ static inline void adfs_writeval(unsigned char *p, int len, unsigned int val) { switch (len) { case 4: p[3] = val >> 24; - /* fall through */ + fallthrough; case 3: p[2] = val >> 16; - /* fall through */ + fallthrough; case 2: p[1] = val >> 8; - /* fall through */ + fallthrough; default: p[0] = val; } } diff --git a/fs/adfs/super.c b/fs/adfs/super.c index a3cc8ecb50da..d553bb5bc17a 100644 --- a/fs/adfs/super.c +++ b/fs/adfs/super.c @@ -12,6 +12,7 @@ #include <linux/slab.h> #include <linux/statfs.h> #include <linux/user_namespace.h> +#include <linux/blkdev.h> #include "adfs.h" #include "dir_f.h" #include "dir_fplus.h" diff --git a/fs/affs/amigaffs.c b/fs/affs/amigaffs.c index f708c45d5f66..29f11e10a7c7 100644 --- a/fs/affs/amigaffs.c +++ b/fs/affs/amigaffs.c @@ -420,24 +420,51 @@ affs_mode_to_prot(struct inode *inode) u32 prot = AFFS_I(inode)->i_protect; umode_t mode = inode->i_mode; + /* + * First, clear all RWED bits for owner, group, other. + * Then, recalculate them afresh. + * + * We'll always clear the delete-inhibit bit for the owner, as that is + * the classic single-user mode AmigaOS protection bit and we need to + * stay compatible with all scenarios. + * + * Since multi-user AmigaOS is an extension, we'll only set the + * delete-allow bit if any of the other bits in the same user class + * (group/other) are used. + */ + prot &= ~(FIBF_NOEXECUTE | FIBF_NOREAD + | FIBF_NOWRITE | FIBF_NODELETE + | FIBF_GRP_EXECUTE | FIBF_GRP_READ + | FIBF_GRP_WRITE | FIBF_GRP_DELETE + | FIBF_OTR_EXECUTE | FIBF_OTR_READ + | FIBF_OTR_WRITE | FIBF_OTR_DELETE); + + /* Classic single-user AmigaOS flags. These are inverted. */ if (!(mode & 0100)) prot |= FIBF_NOEXECUTE; if (!(mode & 0400)) prot |= FIBF_NOREAD; if (!(mode & 0200)) prot |= FIBF_NOWRITE; + + /* Multi-user extended flags. Not inverted. */ if (mode & 0010) prot |= FIBF_GRP_EXECUTE; if (mode & 0040) prot |= FIBF_GRP_READ; if (mode & 0020) prot |= FIBF_GRP_WRITE; + if (mode & 0070) + prot |= FIBF_GRP_DELETE; + if (mode & 0001) prot |= FIBF_OTR_EXECUTE; if (mode & 0004) prot |= FIBF_OTR_READ; if (mode & 0002) prot |= FIBF_OTR_WRITE; + if (mode & 0007) + prot |= FIBF_OTR_DELETE; AFFS_I(inode)->i_protect = prot; } diff --git a/fs/affs/file.c b/fs/affs/file.c index a85817f54483..d91b0133d95d 100644 --- a/fs/affs/file.c +++ b/fs/affs/file.c @@ -14,6 +14,7 @@ */ #include <linux/uio.h> +#include <linux/blkdev.h> #include "affs.h" static struct buffer_head *affs_get_extblock_slow(struct inode *inode, u32 ext); @@ -428,6 +429,24 @@ static int affs_write_begin(struct file *file, struct address_space *mapping, return ret; } +static int affs_write_end(struct file *file, struct address_space *mapping, + loff_t pos, unsigned int len, unsigned int copied, + struct page *page, void *fsdata) +{ + struct inode *inode = mapping->host; + int ret; + + ret = generic_write_end(file, mapping, pos, len, copied, page, fsdata); + + /* Clear Archived bit on file writes, as AmigaOS would do */ + if (AFFS_I(inode)->i_protect & FIBF_ARCHIVED) { + AFFS_I(inode)->i_protect &= ~FIBF_ARCHIVED; + mark_inode_dirty(inode); + } + + return ret; +} + static sector_t _affs_bmap(struct address_space *mapping, sector_t block) { return generic_block_bmap(mapping,block,affs_get_block); @@ -437,7 +456,7 @@ const struct address_space_operations affs_aops = { .readpage = affs_readpage, .writepage = affs_writepage, .write_begin = affs_write_begin, - .write_end = generic_write_end, + .write_end = affs_write_end, .direct_IO = affs_direct_IO, .bmap = _affs_bmap }; @@ -794,6 +813,12 @@ done: if (tmp > inode->i_size) inode->i_size = AFFS_I(inode)->mmu_private = tmp; + /* Clear Archived bit on file writes, as AmigaOS would do */ + if (AFFS_I(inode)->i_protect & FIBF_ARCHIVED) { + AFFS_I(inode)->i_protect &= ~FIBF_ARCHIVED; + mark_inode_dirty(inode); + } + err_first_bh: unlock_page(page); put_page(page); diff --git a/fs/affs/inode.c b/fs/affs/inode.c index a346cf7659f1..044412110b52 100644 --- a/fs/affs/inode.c +++ b/fs/affs/inode.c @@ -93,7 +93,7 @@ struct inode *affs_iget(struct super_block *sb, unsigned long ino) case ST_ROOT: inode->i_uid = sbi->s_uid; inode->i_gid = sbi->s_gid; - /* fall through */ + fallthrough; case ST_USERDIR: if (be32_to_cpu(tail->stype) == ST_USERDIR || affs_test_opt(sbi->s_flags, SF_SETMODE)) { diff --git a/fs/affs/super.c b/fs/affs/super.c index 47107c6712a6..a100cd9950c8 100644 --- a/fs/affs/super.c +++ b/fs/affs/super.c @@ -474,7 +474,7 @@ got_root: case MUFS_INTLFFS: case MUFS_DCFFS: affs_set_opt(sbi->s_flags, SF_MUFS); - /* fall thru */ + fallthrough; case FS_INTLFFS: case FS_DCFFS: affs_set_opt(sbi->s_flags, SF_INTL); @@ -486,7 +486,7 @@ got_root: break; case MUFS_OFS: affs_set_opt(sbi->s_flags, SF_MUFS); - /* fall through */ + fallthrough; case FS_OFS: affs_set_opt(sbi->s_flags, SF_OFS); sb->s_flags |= SB_NOEXEC; @@ -494,7 +494,7 @@ got_root: case MUFS_DCOFS: case MUFS_INTLOFS: affs_set_opt(sbi->s_flags, SF_MUFS); - /* fall through */ + fallthrough; case FS_DCOFS: case FS_INTLOFS: affs_set_opt(sbi->s_flags, SF_INTL); diff --git a/fs/afs/cmservice.c b/fs/afs/cmservice.c index bef413818af7..a4e9e6e07e93 100644 --- a/fs/afs/cmservice.c +++ b/fs/afs/cmservice.c @@ -252,7 +252,7 @@ static int afs_deliver_cb_callback(struct afs_call *call) call->unmarshall++; /* extract the FID array and its count in two steps */ - /* fall through */ + fallthrough; case 1: _debug("extract FID count"); ret = afs_extract_data(call, true); @@ -271,7 +271,7 @@ static int afs_deliver_cb_callback(struct afs_call *call) afs_extract_to_buf(call, call->count * 3 * 4); call->unmarshall++; - /* Fall through */ + fallthrough; case 2: _debug("extract FID array"); ret = afs_extract_data(call, true); @@ -297,7 +297,7 @@ static int afs_deliver_cb_callback(struct afs_call *call) call->unmarshall++; /* extract the callback array and its count in two steps */ - /* fall through */ + fallthrough; case 3: _debug("extract CB count"); ret = afs_extract_data(call, true); @@ -312,7 +312,7 @@ static int afs_deliver_cb_callback(struct afs_call *call) iov_iter_discard(&call->def_iter, READ, call->count2 * 3 * 4); call->unmarshall++; - /* Fall through */ + fallthrough; case 4: _debug("extract discard %zu/%u", iov_iter_count(call->iter), call->count2 * 3 * 4); @@ -391,7 +391,7 @@ static int afs_deliver_cb_init_call_back_state3(struct afs_call *call) afs_extract_to_buf(call, 11 * sizeof(__be32)); call->unmarshall++; - /* Fall through */ + fallthrough; case 1: _debug("extract UUID"); ret = afs_extract_data(call, false); @@ -503,7 +503,7 @@ static int afs_deliver_cb_probe_uuid(struct afs_call *call) afs_extract_to_buf(call, 11 * sizeof(__be32)); call->unmarshall++; - /* Fall through */ + fallthrough; case 1: _debug("extract UUID"); ret = afs_extract_data(call, false); @@ -618,7 +618,7 @@ static int afs_deliver_yfs_cb_callback(struct afs_call *call) call->unmarshall++; /* extract the FID array and its count in two steps */ - /* Fall through */ + fallthrough; case 1: _debug("extract FID count"); ret = afs_extract_data(call, true); @@ -637,7 +637,7 @@ static int afs_deliver_yfs_cb_callback(struct afs_call *call) afs_extract_to_buf(call, size); call->unmarshall++; - /* Fall through */ + fallthrough; case 2: _debug("extract FID array"); ret = afs_extract_data(call, false); diff --git a/fs/afs/dir.c b/fs/afs/dir.c index 96757f3abd74..1d2e61e0ab04 100644 --- a/fs/afs/dir.c +++ b/fs/afs/dir.c @@ -1053,7 +1053,7 @@ static int afs_d_revalidate_rcu(struct dentry *dentry) static int afs_d_revalidate(struct dentry *dentry, unsigned int flags) { struct afs_vnode *vnode, *dir; - struct afs_fid uninitialized_var(fid); + struct afs_fid fid; struct dentry *parent; struct inode *inode; struct key *key; diff --git a/fs/afs/dynroot.c b/fs/afs/dynroot.c index b79879aacc02..7b784af604fd 100644 --- a/fs/afs/dynroot.c +++ b/fs/afs/dynroot.c @@ -382,15 +382,17 @@ void afs_dynroot_depopulate(struct super_block *sb) net->dynroot_sb = NULL; mutex_unlock(&net->proc_cells_lock); - inode_lock(root->d_inode); - - /* Remove all the pins for dirs created for manually added cells */ - list_for_each_entry_safe(subdir, tmp, &root->d_subdirs, d_child) { - if (subdir->d_fsdata) { - subdir->d_fsdata = NULL; - dput(subdir); + if (root) { + inode_lock(root->d_inode); + + /* Remove all the pins for dirs created for manually added cells */ + list_for_each_entry_safe(subdir, tmp, &root->d_subdirs, d_child) { + if (subdir->d_fsdata) { + subdir->d_fsdata = NULL; + dput(subdir); + } } - } - inode_unlock(root->d_inode); + inode_unlock(root->d_inode); + } } diff --git a/fs/afs/file.c b/fs/afs/file.c index 6f6ed1605cfe..371d1488cc54 100644 --- a/fs/afs/file.c +++ b/fs/afs/file.c @@ -311,7 +311,7 @@ int afs_page_filler(void *data, struct page *page) case -ENOBUFS: _debug("cache said ENOBUFS"); - /* fall through */ + fallthrough; default: go_on: req = kzalloc(struct_size(req, array, 1), GFP_KERNEL); diff --git a/fs/afs/flock.c b/fs/afs/flock.c index ffb8575345ca..cb3054c7843e 100644 --- a/fs/afs/flock.c +++ b/fs/afs/flock.c @@ -376,7 +376,6 @@ again: spin_unlock(&vnode->lock); return; - /* Fall through */ default: /* Looks like a lock request was withdrawn. */ spin_unlock(&vnode->lock); diff --git a/fs/afs/fs_operation.c b/fs/afs/fs_operation.c index 24fd163c6323..97cab12b0a6c 100644 --- a/fs/afs/fs_operation.c +++ b/fs/afs/fs_operation.c @@ -235,6 +235,7 @@ int afs_put_operation(struct afs_operation *op) afs_end_cursor(&op->ac); afs_put_serverlist(op->net, op->server_list); afs_put_volume(op->net, op->volume, afs_volume_trace_put_put_op); + key_put(op->key); kfree(op); return ret; } diff --git a/fs/afs/fs_probe.c b/fs/afs/fs_probe.c index 5d9ef517cf81..e7e98ad63a91 100644 --- a/fs/afs/fs_probe.c +++ b/fs/afs/fs_probe.c @@ -161,8 +161,8 @@ responded: } } - rtt_us = rxrpc_kernel_get_srtt(call->net->socket, call->rxcall); - if (rtt_us < server->probe.rtt) { + if (rxrpc_kernel_get_srtt(call->net->socket, call->rxcall, &rtt_us) && + rtt_us < server->probe.rtt) { server->probe.rtt = rtt_us; server->rtt = rtt_us; alist->preferred = index; diff --git a/fs/afs/fsclient.c b/fs/afs/fsclient.c index acb4d0ca2649..1d95ed9dd86e 100644 --- a/fs/afs/fsclient.c +++ b/fs/afs/fsclient.c @@ -320,7 +320,7 @@ static int afs_deliver_fs_fetch_data(struct afs_call *call) call->tmp_u = htonl(0); afs_extract_to_tmp(call); } - /* Fall through */ + fallthrough; /* extract the returned data length */ case 1: @@ -348,7 +348,7 @@ static int afs_deliver_fs_fetch_data(struct afs_call *call) call->bvec[0].bv_page = req->pages[req->index]; iov_iter_bvec(&call->def_iter, READ, call->bvec, 1, size); ASSERTCMP(size, <=, PAGE_SIZE); - /* Fall through */ + fallthrough; /* extract the returned data */ case 2: @@ -375,7 +375,7 @@ static int afs_deliver_fs_fetch_data(struct afs_call *call) /* Discard any excess data the server gave us */ afs_extract_discard(call, req->actual_len - req->len); call->unmarshall = 3; - /* Fall through */ + fallthrough; case 3: _debug("extract discard %zu/%llu", @@ -388,7 +388,7 @@ static int afs_deliver_fs_fetch_data(struct afs_call *call) no_more_data: call->unmarshall = 4; afs_extract_to_buf(call, (21 + 3 + 6) * 4); - /* Fall through */ + fallthrough; /* extract the metadata */ case 4: @@ -1343,7 +1343,7 @@ static int afs_deliver_fs_get_volume_status(struct afs_call *call) case 0: call->unmarshall++; afs_extract_to_buf(call, 12 * 4); - /* Fall through */ + fallthrough; /* extract the returned status record */ case 1: @@ -1356,7 +1356,7 @@ static int afs_deliver_fs_get_volume_status(struct afs_call *call) xdr_decode_AFSFetchVolumeStatus(&bp, &op->volstatus.vs); call->unmarshall++; afs_extract_to_tmp(call); - /* Fall through */ + fallthrough; /* extract the volume name length */ case 2: @@ -1371,7 +1371,7 @@ static int afs_deliver_fs_get_volume_status(struct afs_call *call) size = (call->count + 3) & ~3; /* It's padded */ afs_extract_to_buf(call, size); call->unmarshall++; - /* Fall through */ + fallthrough; /* extract the volume name */ case 3: @@ -1385,7 +1385,7 @@ static int afs_deliver_fs_get_volume_status(struct afs_call *call) _debug("volname '%s'", p); afs_extract_to_tmp(call); call->unmarshall++; - /* Fall through */ + fallthrough; /* extract the offline message length */ case 4: @@ -1400,7 +1400,7 @@ static int afs_deliver_fs_get_volume_status(struct afs_call *call) size = (call->count + 3) & ~3; /* It's padded */ afs_extract_to_buf(call, size); call->unmarshall++; - /* Fall through */ + fallthrough; /* extract the offline message */ case 5: @@ -1415,7 +1415,7 @@ static int afs_deliver_fs_get_volume_status(struct afs_call *call) afs_extract_to_tmp(call); call->unmarshall++; - /* Fall through */ + fallthrough; /* extract the message of the day length */ case 6: @@ -1430,7 +1430,7 @@ static int afs_deliver_fs_get_volume_status(struct afs_call *call) size = (call->count + 3) & ~3; /* It's padded */ afs_extract_to_buf(call, size); call->unmarshall++; - /* Fall through */ + fallthrough; /* extract the message of the day */ case 7: @@ -1682,7 +1682,7 @@ static int afs_deliver_fs_get_capabilities(struct afs_call *call) case 0: afs_extract_to_tmp(call); call->unmarshall++; - /* Fall through */ + fallthrough; /* Extract the capabilities word count */ case 1: @@ -1696,7 +1696,7 @@ static int afs_deliver_fs_get_capabilities(struct afs_call *call) call->count2 = count; afs_extract_discard(call, count * sizeof(__be32)); call->unmarshall++; - /* Fall through */ + fallthrough; /* Extract capabilities words */ case 2: @@ -1776,7 +1776,7 @@ static int afs_deliver_fs_inline_bulk_status(struct afs_call *call) case 0: afs_extract_to_tmp(call); call->unmarshall++; - /* Fall through */ + fallthrough; /* Extract the file status count and array in two steps */ case 1: @@ -1794,7 +1794,7 @@ static int afs_deliver_fs_inline_bulk_status(struct afs_call *call) call->unmarshall++; more_counts: afs_extract_to_buf(call, 21 * sizeof(__be32)); - /* Fall through */ + fallthrough; case 2: _debug("extract status array %u", call->count); @@ -1824,7 +1824,7 @@ static int afs_deliver_fs_inline_bulk_status(struct afs_call *call) call->count = 0; call->unmarshall++; afs_extract_to_tmp(call); - /* Fall through */ + fallthrough; /* Extract the callback count and array in two steps */ case 3: @@ -1841,7 +1841,7 @@ static int afs_deliver_fs_inline_bulk_status(struct afs_call *call) call->unmarshall++; more_cbs: afs_extract_to_buf(call, 3 * sizeof(__be32)); - /* Fall through */ + fallthrough; case 4: _debug("extract CB array"); @@ -1870,7 +1870,7 @@ static int afs_deliver_fs_inline_bulk_status(struct afs_call *call) afs_extract_to_buf(call, 6 * sizeof(__be32)); call->unmarshall++; - /* Fall through */ + fallthrough; case 5: ret = afs_extract_data(call, false); @@ -1974,7 +1974,7 @@ static int afs_deliver_fs_fetch_acl(struct afs_call *call) case 0: afs_extract_to_tmp(call); call->unmarshall++; - /* Fall through */ + fallthrough; /* extract the returned data length */ case 1: @@ -1992,7 +1992,7 @@ static int afs_deliver_fs_fetch_acl(struct afs_call *call) acl->size = call->count2; afs_extract_begin(call, acl->data, size); call->unmarshall++; - /* Fall through */ + fallthrough; /* extract the returned data */ case 2: @@ -2002,7 +2002,7 @@ static int afs_deliver_fs_fetch_acl(struct afs_call *call) afs_extract_to_buf(call, (21 + 6) * 4); call->unmarshall++; - /* Fall through */ + fallthrough; /* extract the metadata */ case 3: diff --git a/fs/afs/internal.h b/fs/afs/internal.h index 792ac711985e..18042b7dab6a 100644 --- a/fs/afs/internal.h +++ b/fs/afs/internal.h @@ -401,22 +401,24 @@ struct afs_vlserver { #define AFS_VLSERVER_FL_PROBED 0 /* The VL server has been probed */ #define AFS_VLSERVER_FL_PROBING 1 /* VL server is being probed */ #define AFS_VLSERVER_FL_IS_YFS 2 /* Server is YFS not AFS */ +#define AFS_VLSERVER_FL_RESPONDING 3 /* VL server is responding */ rwlock_t lock; /* Lock on addresses */ atomic_t usage; + unsigned int rtt; /* Server's current RTT in uS */ /* Probe state */ wait_queue_head_t probe_wq; atomic_t probe_outstanding; spinlock_t probe_lock; struct { - unsigned int rtt; /* RTT as ktime/64 */ + unsigned int rtt; /* RTT in uS */ u32 abort_code; short error; - bool have_result; - bool responded:1; - bool is_yfs:1; - bool not_yfs:1; - bool local_failure:1; + unsigned short flags; +#define AFS_VLSERVER_PROBE_RESPONDED 0x01 /* At least once response (may be abort) */ +#define AFS_VLSERVER_PROBE_IS_YFS 0x02 /* The peer appears to be YFS */ +#define AFS_VLSERVER_PROBE_NOT_YFS 0x04 /* The peer appears not to be YFS */ +#define AFS_VLSERVER_PROBE_LOCAL_FAILURE 0x08 /* A local failure prevented a probe */ } probe; u16 port; diff --git a/fs/afs/misc.c b/fs/afs/misc.c index 5334f1bd2bca..1d1a8debe472 100644 --- a/fs/afs/misc.c +++ b/fs/afs/misc.c @@ -120,42 +120,42 @@ void afs_prioritise_error(struct afs_error *e, int error, u32 abort_code) if (e->error == -ETIMEDOUT || e->error == -ETIME) return; - /* Fall through */ + fallthrough; case -ETIMEDOUT: case -ETIME: if (e->error == -ENOMEM || e->error == -ENONET) return; - /* Fall through */ + fallthrough; case -ENOMEM: case -ENONET: if (e->error == -ERFKILL) return; - /* Fall through */ + fallthrough; case -ERFKILL: if (e->error == -EADDRNOTAVAIL) return; - /* Fall through */ + fallthrough; case -EADDRNOTAVAIL: if (e->error == -ENETUNREACH) return; - /* Fall through */ + fallthrough; case -ENETUNREACH: if (e->error == -EHOSTUNREACH) return; - /* Fall through */ + fallthrough; case -EHOSTUNREACH: if (e->error == -EHOSTDOWN) return; - /* Fall through */ + fallthrough; case -EHOSTDOWN: if (e->error == -ECONNREFUSED) return; - /* Fall through */ + fallthrough; case -ECONNREFUSED: if (e->error == -ECONNRESET) return; - /* Fall through */ + fallthrough; case -ECONNRESET: /* Responded, but call expired. */ if (e->responded) return; diff --git a/fs/afs/proc.c b/fs/afs/proc.c index e817fc740ba0..e8babb62ed44 100644 --- a/fs/afs/proc.c +++ b/fs/afs/proc.c @@ -310,6 +310,11 @@ static int afs_proc_cell_vlservers_show(struct seq_file *m, void *v) alist->preferred == i ? '>' : '-', &alist->addrs[i].transport); } + seq_printf(m, " info: fl=%lx rtt=%d\n", vlserver->flags, vlserver->rtt); + seq_printf(m, " probe: fl=%x e=%d ac=%d out=%d\n", + vlserver->probe.flags, vlserver->probe.error, + vlserver->probe.abort_code, + atomic_read(&vlserver->probe_outstanding)); return 0; } diff --git a/fs/afs/rotate.c b/fs/afs/rotate.c index 6a0935cb822f..d83f13c44b92 100644 --- a/fs/afs/rotate.c +++ b/fs/afs/rotate.c @@ -281,7 +281,7 @@ bool afs_select_fileserver(struct afs_operation *op) case -ETIME: if (op->error != -EDESTADDRREQ) goto iterate_address; - /* Fall through */ + fallthrough; case -ERFKILL: case -EADDRNOTAVAIL: case -ENETUNREACH: diff --git a/fs/afs/rxrpc.c b/fs/afs/rxrpc.c index 8fc8fb406a5a..8be709cb8542 100644 --- a/fs/afs/rxrpc.c +++ b/fs/afs/rxrpc.c @@ -568,7 +568,7 @@ static void afs_deliver_to_call(struct afs_call *call) case -EIO: pr_err("kAFS: Call %u in bad state %u\n", call->debug_id, state); - /* Fall through */ + fallthrough; case -ENODATA: case -EBADMSG: case -EMSGSIZE: @@ -669,7 +669,7 @@ long afs_wait_for_call_to_complete(struct afs_call *call, ret = call->ret0; call->ret0 = 0; - /* Fall through */ + fallthrough; case -ECONNABORTED: ac->responded = true; break; @@ -872,7 +872,7 @@ void afs_send_empty_reply(struct afs_call *call) _debug("oom"); rxrpc_kernel_abort_call(net->socket, call->rxcall, RX_USER_ABORT, -ENOMEM, "KOO"); - /* Fall through */ + fallthrough; default: _leave(" [error]"); return; diff --git a/fs/afs/security.c b/fs/afs/security.c index 90d852704328..9cf3102f370c 100644 --- a/fs/afs/security.c +++ b/fs/afs/security.c @@ -399,7 +399,7 @@ int afs_check_permit(struct afs_vnode *vnode, struct key *key, int afs_permission(struct inode *inode, int mask) { struct afs_vnode *vnode = AFS_FS_I(inode); - afs_access_t uninitialized_var(access); + afs_access_t access; struct key *key; int ret = 0; diff --git a/fs/afs/vl_list.c b/fs/afs/vl_list.c index 8fea54eba0c2..38b2ba1d9ec0 100644 --- a/fs/afs/vl_list.c +++ b/fs/afs/vl_list.c @@ -21,6 +21,7 @@ struct afs_vlserver *afs_alloc_vlserver(const char *name, size_t name_len, rwlock_init(&vlserver->lock); init_waitqueue_head(&vlserver->probe_wq); spin_lock_init(&vlserver->probe_lock); + vlserver->rtt = UINT_MAX; vlserver->name_len = name_len; vlserver->port = port; memcpy(vlserver->name, name, name_len); diff --git a/fs/afs/vl_probe.c b/fs/afs/vl_probe.c index e3aa013c2177..d1c7068b4346 100644 --- a/fs/afs/vl_probe.c +++ b/fs/afs/vl_probe.c @@ -11,15 +11,33 @@ #include "internal.h" #include "protocol_yfs.h" -static bool afs_vl_probe_done(struct afs_vlserver *server) + +/* + * Handle the completion of a set of probes. + */ +static void afs_finished_vl_probe(struct afs_vlserver *server) { - if (!atomic_dec_and_test(&server->probe_outstanding)) - return false; + if (!(server->probe.flags & AFS_VLSERVER_PROBE_RESPONDED)) { + server->rtt = UINT_MAX; + clear_bit(AFS_VLSERVER_FL_RESPONDING, &server->flags); + } - wake_up_var(&server->probe_outstanding); clear_bit_unlock(AFS_VLSERVER_FL_PROBING, &server->flags); wake_up_bit(&server->flags, AFS_VLSERVER_FL_PROBING); - return true; +} + +/* + * Handle the completion of a probe RPC call. + */ +static void afs_done_one_vl_probe(struct afs_vlserver *server, bool wake_up) +{ + if (atomic_dec_and_test(&server->probe_outstanding)) { + afs_finished_vl_probe(server); + wake_up = true; + } + + if (wake_up) + wake_up_all(&server->probe_wq); } /* @@ -45,15 +63,20 @@ void afs_vlserver_probe_result(struct afs_call *call) server->probe.error = 0; goto responded; case -ECONNABORTED: - if (!server->probe.responded) { + if (!(server->probe.flags & AFS_VLSERVER_PROBE_RESPONDED)) { server->probe.abort_code = call->abort_code; server->probe.error = ret; } goto responded; case -ENOMEM: case -ENONET: - server->probe.local_failure = true; - afs_io_error(call, afs_io_error_vl_probe_fail); + case -EKEYEXPIRED: + case -EKEYREVOKED: + case -EKEYREJECTED: + server->probe.flags |= AFS_VLSERVER_PROBE_LOCAL_FAILURE; + if (server->probe.error == 0) + server->probe.error = ret; + trace_afs_io_error(call->debug_id, ret, afs_io_error_vl_probe_fail); goto out; case -ECONNRESET: /* Responded, but call expired. */ case -ERFKILL: @@ -67,12 +90,12 @@ void afs_vlserver_probe_result(struct afs_call *call) default: clear_bit(index, &alist->responded); set_bit(index, &alist->failed); - if (!server->probe.responded && + if (!(server->probe.flags & AFS_VLSERVER_PROBE_RESPONDED) && (server->probe.error == 0 || server->probe.error == -ETIMEDOUT || server->probe.error == -ETIME)) server->probe.error = ret; - afs_io_error(call, afs_io_error_vl_probe_fail); + trace_afs_io_error(call->debug_id, ret, afs_io_error_vl_probe_fail); goto out; } @@ -81,39 +104,36 @@ responded: clear_bit(index, &alist->failed); if (call->service_id == YFS_VL_SERVICE) { - server->probe.is_yfs = true; + server->probe.flags |= AFS_VLSERVER_PROBE_IS_YFS; set_bit(AFS_VLSERVER_FL_IS_YFS, &server->flags); alist->addrs[index].srx_service = call->service_id; } else { - server->probe.not_yfs = true; - if (!server->probe.is_yfs) { + server->probe.flags |= AFS_VLSERVER_PROBE_NOT_YFS; + if (!(server->probe.flags & AFS_VLSERVER_PROBE_IS_YFS)) { clear_bit(AFS_VLSERVER_FL_IS_YFS, &server->flags); alist->addrs[index].srx_service = call->service_id; } } - rtt_us = rxrpc_kernel_get_srtt(call->net->socket, call->rxcall); - if (rtt_us < server->probe.rtt) { + if (rxrpc_kernel_get_srtt(call->net->socket, call->rxcall, &rtt_us) && + rtt_us < server->probe.rtt) { server->probe.rtt = rtt_us; + server->rtt = rtt_us; alist->preferred = index; - have_result = true; } smp_wmb(); /* Set rtt before responded. */ - server->probe.responded = true; + server->probe.flags |= AFS_VLSERVER_PROBE_RESPONDED; set_bit(AFS_VLSERVER_FL_PROBED, &server->flags); + set_bit(AFS_VLSERVER_FL_RESPONDING, &server->flags); + have_result = true; out: spin_unlock(&server->probe_lock); _debug("probe [%u][%u] %pISpc rtt=%u ret=%d", server_index, index, &alist->addrs[index].transport, rtt_us, ret); - have_result |= afs_vl_probe_done(server); - if (have_result) { - server->probe.have_result = true; - wake_up_var(&server->probe.have_result); - wake_up_all(&server->probe_wq); - } + afs_done_one_vl_probe(server, have_result); } /* @@ -151,11 +171,10 @@ static bool afs_do_probe_vlserver(struct afs_net *net, in_progress = true; } else { afs_prioritise_error(_e, PTR_ERR(call), ac.abort_code); + afs_done_one_vl_probe(server, false); } } - if (!in_progress) - afs_vl_probe_done(server); return in_progress; } @@ -193,7 +212,7 @@ int afs_wait_for_vl_probes(struct afs_vlserver_list *vllist, { struct wait_queue_entry *waits; struct afs_vlserver *server; - unsigned int rtt = UINT_MAX; + unsigned int rtt = UINT_MAX, rtt_s; bool have_responders = false; int pref = -1, i; @@ -205,7 +224,7 @@ int afs_wait_for_vl_probes(struct afs_vlserver_list *vllist, server = vllist->servers[i].server; if (!test_bit(AFS_VLSERVER_FL_PROBING, &server->flags)) __clear_bit(i, &untried); - if (server->probe.responded) + if (server->probe.flags & AFS_VLSERVER_PROBE_RESPONDED) have_responders = true; } } @@ -231,7 +250,7 @@ int afs_wait_for_vl_probes(struct afs_vlserver_list *vllist, for (i = 0; i < vllist->nr_servers; i++) { if (test_bit(i, &untried)) { server = vllist->servers[i].server; - if (server->probe.responded) + if (server->probe.flags & AFS_VLSERVER_PROBE_RESPONDED) goto stop; if (test_bit(AFS_VLSERVER_FL_PROBING, &server->flags)) still_probing = true; @@ -249,10 +268,11 @@ stop: for (i = 0; i < vllist->nr_servers; i++) { if (test_bit(i, &untried)) { server = vllist->servers[i].server; - if (server->probe.responded && - server->probe.rtt < rtt) { + rtt_s = READ_ONCE(server->rtt); + if (test_bit(AFS_VLSERVER_FL_RESPONDING, &server->flags) && + rtt_s < rtt) { pref = i; - rtt = server->probe.rtt; + rtt = rtt_s; } remove_wait_queue(&server->probe_wq, &waits[i]); diff --git a/fs/afs/vl_rotate.c b/fs/afs/vl_rotate.c index f405ca8b240a..c0458c903b31 100644 --- a/fs/afs/vl_rotate.c +++ b/fs/afs/vl_rotate.c @@ -192,7 +192,8 @@ pick_server: for (i = 0; i < vc->server_list->nr_servers; i++) { struct afs_vlserver *s = vc->server_list->servers[i].server; - if (!test_bit(i, &vc->untried) || !s->probe.responded) + if (!test_bit(i, &vc->untried) || + !test_bit(AFS_VLSERVER_FL_RESPONDING, &s->flags)) continue; if (s->probe.rtt < rtt) { vc->index = i; @@ -262,10 +263,14 @@ no_more_servers: for (i = 0; i < vc->server_list->nr_servers; i++) { struct afs_vlserver *s = vc->server_list->servers[i].server; + if (test_bit(AFS_VLSERVER_FL_RESPONDING, &s->flags)) + e.responded = true; afs_prioritise_error(&e, READ_ONCE(s->probe.error), s->probe.abort_code); } + error = e.error; + failed_set_error: vc->error = error; failed: diff --git a/fs/afs/vlclient.c b/fs/afs/vlclient.c index fd82850cd424..dc9327332f06 100644 --- a/fs/afs/vlclient.c +++ b/fs/afs/vlclient.c @@ -196,7 +196,7 @@ static int afs_deliver_vl_get_addrs_u(struct afs_call *call) /* Extract the returned uuid, uniquifier, nentries and * blkaddrs size */ - /* Fall through */ + fallthrough; case 1: ret = afs_extract_data(call, true); if (ret < 0) @@ -221,7 +221,7 @@ static int afs_deliver_vl_get_addrs_u(struct afs_call *call) count = min(call->count, 4U); afs_extract_to_buf(call, count * sizeof(__be32)); - /* Fall through - and extract entries */ + fallthrough; /* and extract entries */ case 2: ret = afs_extract_data(call, call->count > 4); if (ret < 0) @@ -324,7 +324,7 @@ static int afs_deliver_vl_get_capabilities(struct afs_call *call) afs_extract_to_tmp(call); call->unmarshall++; - /* Fall through - and extract the capabilities word count */ + fallthrough; /* and extract the capabilities word count */ case 1: ret = afs_extract_data(call, true); if (ret < 0) @@ -337,7 +337,7 @@ static int afs_deliver_vl_get_capabilities(struct afs_call *call) call->unmarshall++; afs_extract_discard(call, count * sizeof(__be32)); - /* Fall through - and extract capabilities words */ + fallthrough; /* and extract capabilities words */ case 2: ret = afs_extract_data(call, false); if (ret < 0) @@ -436,7 +436,7 @@ static int afs_deliver_yfsvl_get_endpoints(struct afs_call *call) /* Extract the returned uuid, uniquifier, fsEndpoints count and * either the first fsEndpoint type or the volEndpoints * count if there are no fsEndpoints. */ - /* Fall through */ + fallthrough; case 1: ret = afs_extract_data(call, true); if (ret < 0) @@ -475,7 +475,7 @@ static int afs_deliver_yfsvl_get_endpoints(struct afs_call *call) afs_extract_to_buf(call, size); call->unmarshall = 2; - /* Fall through - and extract fsEndpoints[] entries */ + fallthrough; /* and extract fsEndpoints[] entries */ case 2: ret = afs_extract_data(call, true); if (ret < 0) @@ -526,7 +526,7 @@ static int afs_deliver_yfsvl_get_endpoints(struct afs_call *call) * extract the type of the next endpoint when we extract the * data of the current one, but this is the first... */ - /* Fall through */ + fallthrough; case 3: ret = afs_extract_data(call, true); if (ret < 0) @@ -552,7 +552,7 @@ static int afs_deliver_yfsvl_get_endpoints(struct afs_call *call) afs_extract_to_buf(call, size); call->unmarshall = 4; - /* Fall through - and extract volEndpoints[] entries */ + fallthrough; /* and extract volEndpoints[] entries */ case 4: ret = afs_extract_data(call, true); if (ret < 0) @@ -587,7 +587,7 @@ static int afs_deliver_yfsvl_get_endpoints(struct afs_call *call) afs_extract_discard(call, 0); call->unmarshall = 5; - /* Fall through - Done */ + fallthrough; /* Done */ case 5: ret = afs_extract_data(call, false); if (ret < 0) @@ -663,7 +663,7 @@ static int afs_deliver_yfsvl_get_cell_name(struct afs_call *call) afs_extract_to_tmp(call); call->unmarshall++; - /* Fall through - and extract the cell name length */ + fallthrough; /* and extract the cell name length */ case 1: ret = afs_extract_data(call, true); if (ret < 0) @@ -685,7 +685,7 @@ static int afs_deliver_yfsvl_get_cell_name(struct afs_call *call) afs_extract_begin(call, cell_name, namesz); call->unmarshall++; - /* Fall through - and extract cell name */ + fallthrough; /* and extract cell name */ case 2: ret = afs_extract_data(call, true); if (ret < 0) @@ -694,7 +694,7 @@ static int afs_deliver_yfsvl_get_cell_name(struct afs_call *call) afs_extract_discard(call, call->count2); call->unmarshall++; - /* Fall through - and extract padding */ + fallthrough; /* and extract padding */ case 3: ret = afs_extract_data(call, false); if (ret < 0) diff --git a/fs/afs/write.c b/fs/afs/write.c index a121c247d95a..4b2265cb1891 100644 --- a/fs/afs/write.c +++ b/fs/afs/write.c @@ -609,7 +609,7 @@ no_more: default: pr_notice("kAFS: Unexpected error from FS.StoreData %d\n", ret); - /* Fall through */ + fallthrough; case -EACCES: case -EPERM: case -ENOKEY: diff --git a/fs/afs/yfsclient.c b/fs/afs/yfsclient.c index 8c24fdc899e3..3b1239b7e90d 100644 --- a/fs/afs/yfsclient.c +++ b/fs/afs/yfsclient.c @@ -373,7 +373,7 @@ static int yfs_deliver_fs_fetch_data64(struct afs_call *call) req->offset = req->pos & (PAGE_SIZE - 1); afs_extract_to_tmp64(call); call->unmarshall++; - /* Fall through */ + fallthrough; /* extract the returned data length */ case 1: @@ -401,7 +401,7 @@ static int yfs_deliver_fs_fetch_data64(struct afs_call *call) call->bvec[0].bv_page = req->pages[req->index]; iov_iter_bvec(&call->def_iter, READ, call->bvec, 1, size); ASSERTCMP(size, <=, PAGE_SIZE); - /* Fall through */ + fallthrough; /* extract the returned data */ case 2: @@ -428,7 +428,7 @@ static int yfs_deliver_fs_fetch_data64(struct afs_call *call) /* Discard any excess data the server gave us */ afs_extract_discard(call, req->actual_len - req->len); call->unmarshall = 3; - /* Fall through */ + fallthrough; case 3: _debug("extract discard %zu/%llu", @@ -444,7 +444,7 @@ static int yfs_deliver_fs_fetch_data64(struct afs_call *call) sizeof(struct yfs_xdr_YFSFetchStatus) + sizeof(struct yfs_xdr_YFSCallBack) + sizeof(struct yfs_xdr_YFSVolSync)); - /* Fall through */ + fallthrough; /* extract the metadata */ case 4: @@ -461,7 +461,7 @@ static int yfs_deliver_fs_fetch_data64(struct afs_call *call) req->file_size = vp->scb.status.size; call->unmarshall++; - /* Fall through */ + fallthrough; case 5: break; @@ -1262,7 +1262,7 @@ static int yfs_deliver_fs_get_volume_status(struct afs_call *call) case 0: call->unmarshall++; afs_extract_to_buf(call, sizeof(struct yfs_xdr_YFSFetchVolumeStatus)); - /* Fall through */ + fallthrough; /* extract the returned status record */ case 1: @@ -1275,7 +1275,7 @@ static int yfs_deliver_fs_get_volume_status(struct afs_call *call) xdr_decode_YFSFetchVolumeStatus(&bp, &op->volstatus.vs); call->unmarshall++; afs_extract_to_tmp(call); - /* Fall through */ + fallthrough; /* extract the volume name length */ case 2: @@ -1290,7 +1290,7 @@ static int yfs_deliver_fs_get_volume_status(struct afs_call *call) size = (call->count + 3) & ~3; /* It's padded */ afs_extract_to_buf(call, size); call->unmarshall++; - /* Fall through */ + fallthrough; /* extract the volume name */ case 3: @@ -1304,7 +1304,7 @@ static int yfs_deliver_fs_get_volume_status(struct afs_call *call) _debug("volname '%s'", p); afs_extract_to_tmp(call); call->unmarshall++; - /* Fall through */ + fallthrough; /* extract the offline message length */ case 4: @@ -1319,7 +1319,7 @@ static int yfs_deliver_fs_get_volume_status(struct afs_call *call) size = (call->count + 3) & ~3; /* It's padded */ afs_extract_to_buf(call, size); call->unmarshall++; - /* Fall through */ + fallthrough; /* extract the offline message */ case 5: @@ -1334,7 +1334,7 @@ static int yfs_deliver_fs_get_volume_status(struct afs_call *call) afs_extract_to_tmp(call); call->unmarshall++; - /* Fall through */ + fallthrough; /* extract the message of the day length */ case 6: @@ -1349,7 +1349,7 @@ static int yfs_deliver_fs_get_volume_status(struct afs_call *call) size = (call->count + 3) & ~3; /* It's padded */ afs_extract_to_buf(call, size); call->unmarshall++; - /* Fall through */ + fallthrough; /* extract the message of the day */ case 7: @@ -1363,7 +1363,7 @@ static int yfs_deliver_fs_get_volume_status(struct afs_call *call) _debug("motd '%s'", p); call->unmarshall++; - /* Fall through */ + fallthrough; case 8: break; @@ -1622,7 +1622,7 @@ static int yfs_deliver_fs_inline_bulk_status(struct afs_call *call) case 0: afs_extract_to_tmp(call); call->unmarshall++; - /* Fall through */ + fallthrough; /* Extract the file status count and array in two steps */ case 1: @@ -1640,7 +1640,7 @@ static int yfs_deliver_fs_inline_bulk_status(struct afs_call *call) call->unmarshall++; more_counts: afs_extract_to_buf(call, sizeof(struct yfs_xdr_YFSFetchStatus)); - /* Fall through */ + fallthrough; case 2: _debug("extract status array %u", call->count); @@ -1670,7 +1670,7 @@ static int yfs_deliver_fs_inline_bulk_status(struct afs_call *call) call->count = 0; call->unmarshall++; afs_extract_to_tmp(call); - /* Fall through */ + fallthrough; /* Extract the callback count and array in two steps */ case 3: @@ -1687,7 +1687,7 @@ static int yfs_deliver_fs_inline_bulk_status(struct afs_call *call) call->unmarshall++; more_cbs: afs_extract_to_buf(call, sizeof(struct yfs_xdr_YFSCallBack)); - /* Fall through */ + fallthrough; case 4: _debug("extract CB array"); @@ -1716,7 +1716,7 @@ static int yfs_deliver_fs_inline_bulk_status(struct afs_call *call) afs_extract_to_buf(call, sizeof(struct yfs_xdr_YFSVolSync)); call->unmarshall++; - /* Fall through */ + fallthrough; case 5: ret = afs_extract_data(call, false); @@ -1727,7 +1727,7 @@ static int yfs_deliver_fs_inline_bulk_status(struct afs_call *call) xdr_decode_YFSVolSync(&bp, &op->volsync); call->unmarshall++; - /* Fall through */ + fallthrough; case 6: break; @@ -1804,7 +1804,7 @@ static int yfs_deliver_fs_fetch_opaque_acl(struct afs_call *call) case 0: afs_extract_to_tmp(call); call->unmarshall++; - /* Fall through */ + fallthrough; /* Extract the file ACL length */ case 1: @@ -1826,7 +1826,7 @@ static int yfs_deliver_fs_fetch_opaque_acl(struct afs_call *call) afs_extract_discard(call, size); } call->unmarshall++; - /* Fall through */ + fallthrough; /* Extract the file ACL */ case 2: @@ -1836,7 +1836,7 @@ static int yfs_deliver_fs_fetch_opaque_acl(struct afs_call *call) afs_extract_to_tmp(call); call->unmarshall++; - /* Fall through */ + fallthrough; /* Extract the volume ACL length */ case 3: @@ -1858,7 +1858,7 @@ static int yfs_deliver_fs_fetch_opaque_acl(struct afs_call *call) afs_extract_discard(call, size); } call->unmarshall++; - /* Fall through */ + fallthrough; /* Extract the volume ACL */ case 4: @@ -1871,7 +1871,7 @@ static int yfs_deliver_fs_fetch_opaque_acl(struct afs_call *call) sizeof(struct yfs_xdr_YFSFetchStatus) + sizeof(struct yfs_xdr_YFSVolSync)); call->unmarshall++; - /* Fall through */ + fallthrough; /* extract the metadata */ case 5: @@ -1886,7 +1886,7 @@ static int yfs_deliver_fs_fetch_opaque_acl(struct afs_call *call) xdr_decode_YFSVolSync(&bp, &op->volsync); call->unmarshall++; - /* Fall through */ + fallthrough; case 6: break; @@ -525,9 +525,9 @@ static int aio_setup_ring(struct kioctx *ctx, unsigned int nr_events) return -EINTR; } - ctx->mmap_base = do_mmap_pgoff(ctx->aio_ring_file, 0, ctx->mmap_size, - PROT_READ | PROT_WRITE, - MAP_SHARED, 0, &unused, NULL); + ctx->mmap_base = do_mmap(ctx->aio_ring_file, 0, ctx->mmap_size, + PROT_READ | PROT_WRITE, + MAP_SHARED, 0, &unused, NULL); mmap_write_unlock(mm); if (IS_ERR((void *)ctx->mmap_base)) { ctx->mmap_size = 0; @@ -1511,7 +1511,7 @@ static inline void aio_rw_done(struct kiocb *req, ssize_t ret) * may be already running. Just fail this IO with EINTR. */ ret = -EINTR; - /*FALLTHRU*/ + fallthrough; default: req->ki_complete(req, ret, 0); } diff --git a/fs/autofs/dev-ioctl.c b/fs/autofs/dev-ioctl.c index f3a0f412b43b..75105f45c51a 100644 --- a/fs/autofs/dev-ioctl.c +++ b/fs/autofs/dev-ioctl.c @@ -20,7 +20,7 @@ * another mount. This situation arises when starting automount(8) * or other user space daemon which uses direct mounts or offset * mounts (used for autofs lazy mount/umount of nested mount trees), - * which have been left busy at at service shutdown. + * which have been left busy at service shutdown. */ typedef int (*ioctl_fn)(struct file *, struct autofs_sb_info *, @@ -496,7 +496,7 @@ static int autofs_dev_ioctl_askumount(struct file *fp, * located path is the root of a mount we return 1 along with * the super magic of the mount or 0 otherwise. * - * In both cases the the device number (as returned by + * In both cases the device number (as returned by * new_encode_dev()) is also returned. */ static int autofs_dev_ioctl_ismountpoint(struct file *fp, diff --git a/fs/befs/linuxvfs.c b/fs/befs/linuxvfs.c index 64cdf4d8e424..2482032021ca 100644 --- a/fs/befs/linuxvfs.c +++ b/fs/befs/linuxvfs.c @@ -22,6 +22,7 @@ #include <linux/cred.h> #include <linux/exportfs.h> #include <linux/seq_file.h> +#include <linux/blkdev.h> #include "befs.h" #include "btree.h" diff --git a/fs/binfmt_elf.c b/fs/binfmt_elf.c index 9fe3b51c116a..13d053982dd7 100644 --- a/fs/binfmt_elf.c +++ b/fs/binfmt_elf.c @@ -1821,7 +1821,7 @@ static int fill_thread_core_info(struct elf_thread_core_info *t, long signr, size_t *total) { unsigned int i; - unsigned int regset0_size = regset_size(t->task, &view->regsets[0]); + int regset0_size; /* * NT_PRSTATUS is the one special case, because the regset data @@ -1830,8 +1830,10 @@ static int fill_thread_core_info(struct elf_thread_core_info *t, * We assume that regset 0 is NT_PRSTATUS. */ fill_prstatus(&t->prstatus, t->task, signr); - (void) view->regsets[0].get(t->task, &view->regsets[0], 0, regset0_size, - &t->prstatus.pr_reg, NULL); + regset0_size = regset_get(t->task, &view->regsets[0], + sizeof(t->prstatus.pr_reg), &t->prstatus.pr_reg); + if (regset0_size < 0) + return 0; fill_note(&t->notes[0], "CORE", NT_PRSTATUS, PRSTATUS_SIZE(t->prstatus, regset0_size), &t->prstatus); @@ -1846,32 +1848,28 @@ static int fill_thread_core_info(struct elf_thread_core_info *t, */ for (i = 1; i < view->n; ++i) { const struct user_regset *regset = &view->regsets[i]; + int note_type = regset->core_note_type; + bool is_fpreg = note_type == NT_PRFPREG; + void *data; + int ret; + do_thread_regset_writeback(t->task, regset); - if (regset->core_note_type && regset->get && - (!regset->active || regset->active(t->task, regset) > 0)) { - int ret; - size_t size = regset_size(t->task, regset); - void *data = kzalloc(size, GFP_KERNEL); - if (unlikely(!data)) - return 0; - ret = regset->get(t->task, regset, - 0, size, data, NULL); - if (unlikely(ret)) - kfree(data); - else { - if (regset->core_note_type != NT_PRFPREG) - fill_note(&t->notes[i], "LINUX", - regset->core_note_type, - size, data); - else { - SET_PR_FPVALID(&t->prstatus, - 1, regset0_size); - fill_note(&t->notes[i], "CORE", - NT_PRFPREG, size, data); - } - *total += notesize(&t->notes[i]); - } - } + if (!note_type) // not for coredumps + continue; + if (regset->active && regset->active(t->task, regset) <= 0) + continue; + + ret = regset_get_alloc(t->task, regset, ~0U, &data); + if (ret < 0) + continue; + + if (is_fpreg) + SET_PR_FPVALID(&t->prstatus, 1, regset0_size); + + fill_note(&t->notes[i], is_fpreg ? "CORE" : "LINUX", + note_type, ret, data); + + *total += notesize(&t->notes[i]); } return 1; @@ -2040,9 +2038,6 @@ struct elf_thread_status struct elf_prstatus prstatus; /* NT_PRSTATUS */ elf_fpregset_t fpu; /* NT_PRFPREG */ struct task_struct *thread; -#ifdef ELF_CORE_COPY_XFPREGS - elf_fpxregset_t xfpu; /* ELF_CORE_XFPREG_TYPE */ -#endif struct memelfnote notes[3]; int num_notes; }; @@ -2073,15 +2068,6 @@ static int elf_dump_thread_status(long signr, struct elf_thread_status *t) t->num_notes++; sz += notesize(&t->notes[1]); } - -#ifdef ELF_CORE_COPY_XFPREGS - if (elf_core_copy_task_xfpregs(p, &t->xfpu)) { - fill_note(&t->notes[2], "LINUX", ELF_CORE_XFPREG_TYPE, - sizeof(t->xfpu), &t->xfpu); - t->num_notes++; - sz += notesize(&t->notes[2]); - } -#endif return sz; } @@ -2092,9 +2078,6 @@ struct elf_note_info { struct elf_prpsinfo *psinfo; /* NT_PRPSINFO */ struct list_head thread_list; elf_fpregset_t *fpu; -#ifdef ELF_CORE_COPY_XFPREGS - elf_fpxregset_t *xfpu; -#endif user_siginfo_t csigdata; int thread_status_size; int numnote; @@ -2118,11 +2101,6 @@ static int elf_note_info_init(struct elf_note_info *info) info->fpu = kmalloc(sizeof(*info->fpu), GFP_KERNEL); if (!info->fpu) return 0; -#ifdef ELF_CORE_COPY_XFPREGS - info->xfpu = kmalloc(sizeof(*info->xfpu), GFP_KERNEL); - if (!info->xfpu) - return 0; -#endif return 1; } @@ -2186,13 +2164,6 @@ static int fill_note_info(struct elfhdr *elf, int phdrs, if (info->prstatus->pr_fpvalid) fill_note(info->notes + info->numnote++, "CORE", NT_PRFPREG, sizeof(*info->fpu), info->fpu); -#ifdef ELF_CORE_COPY_XFPREGS - if (elf_core_copy_task_xfpregs(current, info->xfpu)) - fill_note(info->notes + info->numnote++, - "LINUX", ELF_CORE_XFPREG_TYPE, - sizeof(*info->xfpu), info->xfpu); -#endif - return 1; } @@ -2245,9 +2216,6 @@ static void free_note_info(struct elf_note_info *info) kfree(info->psinfo); kfree(info->notes); kfree(info->fpu); -#ifdef ELF_CORE_COPY_XFPREGS - kfree(info->xfpu); -#endif } #endif diff --git a/fs/binfmt_elf_fdpic.c b/fs/binfmt_elf_fdpic.c index 0f45521b237c..50f845702b92 100644 --- a/fs/binfmt_elf_fdpic.c +++ b/fs/binfmt_elf_fdpic.c @@ -35,10 +35,10 @@ #include <linux/elfcore.h> #include <linux/coredump.h> #include <linux/dax.h> +#include <linux/regset.h> #include <linux/uaccess.h> #include <asm/param.h> -#include <asm/pgalloc.h> typedef char *elf_caddr_t; @@ -1189,6 +1189,32 @@ static int elf_fdpic_map_file_by_direct_mmap(struct elf_fdpic_params *params, */ #ifdef CONFIG_ELF_CORE +struct elf_prstatus_fdpic +{ + struct elf_siginfo pr_info; /* Info associated with signal */ + short pr_cursig; /* Current signal */ + unsigned long pr_sigpend; /* Set of pending signals */ + unsigned long pr_sighold; /* Set of held signals */ + pid_t pr_pid; + pid_t pr_ppid; + pid_t pr_pgrp; + pid_t pr_sid; + struct __kernel_old_timeval pr_utime; /* User time */ + struct __kernel_old_timeval pr_stime; /* System time */ + struct __kernel_old_timeval pr_cutime; /* Cumulative user time */ + struct __kernel_old_timeval pr_cstime; /* Cumulative system time */ + elf_gregset_t pr_reg; /* GP registers */ + /* When using FDPIC, the loadmap addresses need to be communicated + * to GDB in order for GDB to do the necessary relocations. The + * fields (below) used to communicate this information are placed + * immediately after ``pr_reg'', so that the loadmap addresses may + * be viewed as part of the register set if so desired. + */ + unsigned long pr_exec_fdpic_loadmap; + unsigned long pr_interp_fdpic_loadmap; + int pr_fpvalid; /* True if math co-processor being used. */ +}; + /* * Decide whether a segment is worth dumping; default is yes to be * sure (missing info is worse than too much; etc). @@ -1345,7 +1371,7 @@ static inline void fill_note(struct memelfnote *note, const char *name, int type * fill up all the fields in prstatus from the given task struct, except * registers which need to be filled up separately. */ -static void fill_prstatus(struct elf_prstatus *prstatus, +static void fill_prstatus(struct elf_prstatus_fdpic *prstatus, struct task_struct *p, long signr) { prstatus->pr_info.si_signo = prstatus->pr_cursig = signr; @@ -1427,14 +1453,10 @@ static int fill_psinfo(struct elf_prpsinfo *psinfo, struct task_struct *p, /* Here is the structure in which status of each thread is captured. */ struct elf_thread_status { - struct list_head list; - struct elf_prstatus prstatus; /* NT_PRSTATUS */ + struct elf_thread_status *next; + struct elf_prstatus_fdpic prstatus; /* NT_PRSTATUS */ elf_fpregset_t fpu; /* NT_PRFPREG */ - struct task_struct *thread; -#ifdef ELF_CORE_COPY_XFPREGS - elf_fpxregset_t xfpu; /* ELF_CORE_XFPREG_TYPE */ -#endif - struct memelfnote notes[3]; + struct memelfnote notes[2]; int num_notes; }; @@ -1443,38 +1465,44 @@ struct elf_thread_status * we need to keep a linked list of every thread's pr_status and then create * a single section for them in the final core file. */ -static int elf_dump_thread_status(long signr, struct elf_thread_status *t) +static struct elf_thread_status *elf_dump_thread_status(long signr, struct task_struct *p, int *sz) { - struct task_struct *p = t->thread; - int sz = 0; + const struct user_regset_view *view = task_user_regset_view(p); + struct elf_thread_status *t; + int i, ret; - t->num_notes = 0; + t = kzalloc(sizeof(struct elf_thread_status), GFP_KERNEL); + if (!t) + return t; fill_prstatus(&t->prstatus, p, signr); - elf_core_copy_task_regs(p, &t->prstatus.pr_reg); + regset_get(p, &view->regsets[0], + sizeof(t->prstatus.pr_reg), &t->prstatus.pr_reg); fill_note(&t->notes[0], "CORE", NT_PRSTATUS, sizeof(t->prstatus), &t->prstatus); t->num_notes++; - sz += notesize(&t->notes[0]); + *sz += notesize(&t->notes[0]); + + for (i = 1; i < view->n; ++i) { + const struct user_regset *regset = &view->regsets[i]; + if (regset->core_note_type != NT_PRFPREG) + continue; + if (regset->active && regset->active(p, regset) <= 0) + continue; + ret = regset_get(p, regset, sizeof(t->fpu), &t->fpu); + if (ret >= 0) + t->prstatus.pr_fpvalid = 1; + break; + } - t->prstatus.pr_fpvalid = elf_core_copy_task_fpregs(p, NULL, &t->fpu); if (t->prstatus.pr_fpvalid) { fill_note(&t->notes[1], "CORE", NT_PRFPREG, sizeof(t->fpu), &t->fpu); t->num_notes++; - sz += notesize(&t->notes[1]); + *sz += notesize(&t->notes[1]); } - -#ifdef ELF_CORE_COPY_XFPREGS - if (elf_core_copy_task_xfpregs(p, &t->xfpu)) { - fill_note(&t->notes[2], "LINUX", ELF_CORE_XFPREG_TYPE, - sizeof(t->xfpu), &t->xfpu); - t->num_notes++; - sz += notesize(&t->notes[2]); - } -#endif - return sz; + return t; } static void fill_extnum_info(struct elfhdr *elf, struct elf_shdr *shdr4extnum, @@ -1553,23 +1581,15 @@ static size_t elf_core_vma_data_size(unsigned long mm_flags) */ static int elf_fdpic_core_dump(struct coredump_params *cprm) { -#define NUM_NOTES 6 int has_dumped = 0; int segs; int i; struct vm_area_struct *vma; struct elfhdr *elf = NULL; loff_t offset = 0, dataoff; - int numnote; - struct memelfnote *notes = NULL; - struct elf_prstatus *prstatus = NULL; /* NT_PRSTATUS */ + struct memelfnote psinfo_note, auxv_note; struct elf_prpsinfo *psinfo = NULL; /* NT_PRPSINFO */ - LIST_HEAD(thread_list); - struct list_head *t; - elf_fpregset_t *fpu = NULL; -#ifdef ELF_CORE_COPY_XFPREGS - elf_fpxregset_t *xfpu = NULL; -#endif + struct elf_thread_status *thread_list = NULL; int thread_status_size = 0; elf_addr_t *auxv; struct elf_phdr *phdr4note = NULL; @@ -1595,47 +1615,28 @@ static int elf_fdpic_core_dump(struct coredump_params *cprm) elf = kmalloc(sizeof(*elf), GFP_KERNEL); if (!elf) goto end_coredump; - prstatus = kzalloc(sizeof(*prstatus), GFP_KERNEL); - if (!prstatus) - goto end_coredump; psinfo = kmalloc(sizeof(*psinfo), GFP_KERNEL); if (!psinfo) goto end_coredump; - notes = kmalloc_array(NUM_NOTES, sizeof(struct memelfnote), - GFP_KERNEL); - if (!notes) - goto end_coredump; - fpu = kmalloc(sizeof(*fpu), GFP_KERNEL); - if (!fpu) - goto end_coredump; -#ifdef ELF_CORE_COPY_XFPREGS - xfpu = kmalloc(sizeof(*xfpu), GFP_KERNEL); - if (!xfpu) - goto end_coredump; -#endif for (ct = current->mm->core_state->dumper.next; ct; ct = ct->next) { - tmp = kzalloc(sizeof(*tmp), GFP_KERNEL); + tmp = elf_dump_thread_status(cprm->siginfo->si_signo, + ct->task, &thread_status_size); if (!tmp) goto end_coredump; - tmp->thread = ct->task; - list_add(&tmp->list, &thread_list); - } - - list_for_each(t, &thread_list) { - struct elf_thread_status *tmp; - int sz; - - tmp = list_entry(t, struct elf_thread_status, list); - sz = elf_dump_thread_status(cprm->siginfo->si_signo, tmp); - thread_status_size += sz; + tmp->next = thread_list; + thread_list = tmp; } /* now collect the dump for the current */ - fill_prstatus(prstatus, current, cprm->siginfo->si_signo); - elf_core_copy_regs(&prstatus->pr_reg, cprm->regs); + tmp = elf_dump_thread_status(cprm->siginfo->si_signo, + current, &thread_status_size); + if (!tmp) + goto end_coredump; + tmp->next = thread_list; + thread_list = tmp; segs = current->mm->map_count; segs += elf_core_extra_phdrs(); @@ -1657,51 +1658,28 @@ static int elf_fdpic_core_dump(struct coredump_params *cprm) * with info from their /proc. */ - fill_note(notes + 0, "CORE", NT_PRSTATUS, sizeof(*prstatus), prstatus); fill_psinfo(psinfo, current->group_leader, current->mm); - fill_note(notes + 1, "CORE", NT_PRPSINFO, sizeof(*psinfo), psinfo); - - numnote = 2; + fill_note(&psinfo_note, "CORE", NT_PRPSINFO, sizeof(*psinfo), psinfo); + thread_status_size += notesize(&psinfo_note); auxv = (elf_addr_t *) current->mm->saved_auxv; - i = 0; do i += 2; while (auxv[i - 2] != AT_NULL); - fill_note(¬es[numnote++], "CORE", NT_AUXV, - i * sizeof(elf_addr_t), auxv); - - /* Try to dump the FPU. */ - if ((prstatus->pr_fpvalid = - elf_core_copy_task_fpregs(current, cprm->regs, fpu))) - fill_note(notes + numnote++, - "CORE", NT_PRFPREG, sizeof(*fpu), fpu); -#ifdef ELF_CORE_COPY_XFPREGS - if (elf_core_copy_task_xfpregs(current, xfpu)) - fill_note(notes + numnote++, - "LINUX", ELF_CORE_XFPREG_TYPE, sizeof(*xfpu), xfpu); -#endif + fill_note(&auxv_note, "CORE", NT_AUXV, i * sizeof(elf_addr_t), auxv); + thread_status_size += notesize(&auxv_note); - offset += sizeof(*elf); /* Elf header */ + offset = sizeof(*elf); /* Elf header */ offset += segs * sizeof(struct elf_phdr); /* Program headers */ /* Write notes phdr entry */ - { - int sz = 0; - - for (i = 0; i < numnote; i++) - sz += notesize(notes + i); - - sz += thread_status_size; + phdr4note = kmalloc(sizeof(*phdr4note), GFP_KERNEL); + if (!phdr4note) + goto end_coredump; - phdr4note = kmalloc(sizeof(*phdr4note), GFP_KERNEL); - if (!phdr4note) - goto end_coredump; - - fill_elf_note_phdr(phdr4note, sz, offset); - offset += sz; - } + fill_elf_note_phdr(phdr4note, thread_status_size, offset); + offset += thread_status_size; /* Page-align dumped data */ dataoff = offset = roundup(offset, ELF_EXEC_PAGESIZE); @@ -1754,15 +1732,18 @@ static int elf_fdpic_core_dump(struct coredump_params *cprm) goto end_coredump; /* write out the notes section */ - for (i = 0; i < numnote; i++) - if (!writenote(notes + i, cprm)) + if (!writenote(thread_list->notes, cprm)) + goto end_coredump; + if (!writenote(&psinfo_note, cprm)) + goto end_coredump; + if (!writenote(&auxv_note, cprm)) + goto end_coredump; + for (i = 1; i < thread_list->num_notes; i++) + if (!writenote(thread_list->notes + i, cprm)) goto end_coredump; /* write out the thread status notes section */ - list_for_each(t, &thread_list) { - struct elf_thread_status *tmp = - list_entry(t, struct elf_thread_status, list); - + for (tmp = thread_list->next; tmp; tmp = tmp->next) { for (i = 0; i < tmp->num_notes; i++) if (!writenote(&tmp->notes[i], cprm)) goto end_coredump; @@ -1790,23 +1771,16 @@ static int elf_fdpic_core_dump(struct coredump_params *cprm) } end_coredump: - while (!list_empty(&thread_list)) { - struct list_head *tmp = thread_list.next; - list_del(tmp); - kfree(list_entry(tmp, struct elf_thread_status, list)); + while (thread_list) { + tmp = thread_list; + thread_list = thread_list->next; + kfree(tmp); } kfree(phdr4note); kfree(elf); - kfree(prstatus); kfree(psinfo); - kfree(notes); - kfree(fpu); kfree(shdr4extnum); -#ifdef ELF_CORE_COPY_XFPREGS - kfree(xfpu); -#endif return has_dumped; -#undef NUM_NOTES } #endif /* CONFIG_ELF_CORE */ diff --git a/fs/binfmt_flat.c b/fs/binfmt_flat.c index f2f9086ebe98..b9c658e0548e 100644 --- a/fs/binfmt_flat.c +++ b/fs/binfmt_flat.c @@ -576,7 +576,7 @@ static int load_flat_file(struct linux_binprm *bprm, goto err; } - len = data_len + extra; + len = data_len + extra + MAX_SHARED_LIBS * sizeof(unsigned long); len = PAGE_ALIGN(len); realdatastart = vm_mmap(NULL, 0, len, PROT_READ|PROT_WRITE|PROT_EXEC, MAP_PRIVATE, 0); @@ -590,7 +590,9 @@ static int load_flat_file(struct linux_binprm *bprm, vm_munmap(textpos, text_len); goto err; } - datapos = ALIGN(realdatastart, FLAT_DATA_ALIGN); + datapos = ALIGN(realdatastart + + MAX_SHARED_LIBS * sizeof(unsigned long), + FLAT_DATA_ALIGN); pr_debug("Allocated data+bss+stack (%u bytes): %lx\n", data_len + bss_len + stack_len, datapos); @@ -620,7 +622,7 @@ static int load_flat_file(struct linux_binprm *bprm, memp_size = len; } else { - len = text_len + data_len + extra; + len = text_len + data_len + extra + MAX_SHARED_LIBS * sizeof(u32); len = PAGE_ALIGN(len); textpos = vm_mmap(NULL, 0, len, PROT_READ | PROT_EXEC | PROT_WRITE, MAP_PRIVATE, 0); @@ -635,7 +637,9 @@ static int load_flat_file(struct linux_binprm *bprm, } realdatastart = textpos + ntohl(hdr->data_start); - datapos = ALIGN(realdatastart, FLAT_DATA_ALIGN); + datapos = ALIGN(realdatastart + + MAX_SHARED_LIBS * sizeof(u32), + FLAT_DATA_ALIGN); reloc = (__be32 __user *) (datapos + (ntohl(hdr->reloc_start) - text_len)); @@ -652,9 +656,8 @@ static int load_flat_file(struct linux_binprm *bprm, (text_len + full_data - sizeof(struct flat_hdr)), 0); - if (datapos != realdatastart) - memmove((void *)datapos, (void *)realdatastart, - full_data); + memmove((void *) datapos, (void *) realdatastart, + full_data); #else /* * This is used on MMU systems mainly for testing. @@ -710,7 +713,8 @@ static int load_flat_file(struct linux_binprm *bprm, if (IS_ERR_VALUE(result)) { ret = result; pr_err("Unable to read code+data+bss, errno %d\n", ret); - vm_munmap(textpos, text_len + data_len + extra); + vm_munmap(textpos, text_len + data_len + extra + + MAX_SHARED_LIBS * sizeof(u32)); goto err; } } diff --git a/fs/block_dev.c b/fs/block_dev.c index 0ae656e022fd..8ae833e00443 100644 --- a/fs/block_dev.c +++ b/fs/block_dev.c @@ -105,16 +105,7 @@ EXPORT_SYMBOL(invalidate_bdev); static void set_init_blocksize(struct block_device *bdev) { - unsigned bsize = bdev_logical_block_size(bdev); - loff_t size = i_size_read(bdev->bd_inode); - - while (bsize < PAGE_SIZE) { - if (size & bsize) - break; - bsize <<= 1; - } - bdev->bd_block_size = bsize; - bdev->bd_inode->i_blkbits = blksize_bits(bsize); + bdev->bd_inode->i_blkbits = blksize_bits(bdev_logical_block_size(bdev)); } int set_blocksize(struct block_device *bdev, int size) @@ -128,9 +119,8 @@ int set_blocksize(struct block_device *bdev, int size) return -EINVAL; /* Don't change the size if it is same as current */ - if (bdev->bd_block_size != size) { + if (bdev->bd_inode->i_blkbits != blksize_bits(size)) { sync_blockdev(bdev); - bdev->bd_block_size = size; bdev->bd_inode->i_blkbits = blksize_bits(size); kill_bdev(bdev); } @@ -703,12 +693,12 @@ int bdev_read_page(struct block_device *bdev, sector_t sector, if (!ops->rw_page || bdev_get_integrity(bdev)) return result; - result = blk_queue_enter(bdev->bd_queue, 0); + result = blk_queue_enter(bdev->bd_disk->queue, 0); if (result) return result; result = ops->rw_page(bdev, sector + get_start_sect(bdev), page, REQ_OP_READ); - blk_queue_exit(bdev->bd_queue); + blk_queue_exit(bdev->bd_disk->queue); return result; } @@ -739,7 +729,7 @@ int bdev_write_page(struct block_device *bdev, sector_t sector, if (!ops->rw_page || bdev_get_integrity(bdev)) return -EOPNOTSUPP; - result = blk_queue_enter(bdev->bd_queue, 0); + result = blk_queue_enter(bdev->bd_disk->queue, 0); if (result) return result; @@ -752,7 +742,7 @@ int bdev_write_page(struct block_device *bdev, sector_t sector, clean_page_buffers(page); unlock_page(page); } - blk_queue_exit(bdev->bd_queue); + blk_queue_exit(bdev->bd_disk->queue); return result; } @@ -783,7 +773,6 @@ static void init_once(void *foo) memset(bdev, 0, sizeof(*bdev)); mutex_init(&bdev->bd_mutex); - INIT_LIST_HEAD(&bdev->bd_list); #ifdef CONFIG_SYSFS INIT_LIST_HEAD(&bdev->bd_holder_disks); #endif @@ -799,9 +788,6 @@ static void bdev_evict_inode(struct inode *inode) truncate_inode_pages_final(&inode->i_data); invalidate_inode_buffers(inode); /* is it needed here? */ clear_inode(inode); - spin_lock(&bdev_lock); - list_del_init(&bdev->bd_list); - spin_unlock(&bdev_lock); /* Detach inode from wb early as bdi_put() may free bdi->wb */ inode_detach_wb(inode); if (bdev->bd_bdi != &noop_backing_dev_info) { @@ -876,8 +862,6 @@ static int bdev_set(struct inode *inode, void *data) return 0; } -static LIST_HEAD(all_bdevs); - struct block_device *bdget(dev_t dev) { struct block_device *bdev; @@ -895,7 +879,6 @@ struct block_device *bdget(dev_t dev) bdev->bd_contains = NULL; bdev->bd_super = NULL; bdev->bd_inode = inode; - bdev->bd_block_size = i_blocksize(inode); bdev->bd_part_count = 0; bdev->bd_invalidated = 0; inode->i_mode = S_IFBLK; @@ -903,9 +886,6 @@ struct block_device *bdget(dev_t dev) inode->i_bdev = bdev; inode->i_data.a_ops = &def_blk_aops; mapping_set_gfp_mask(&inode->i_data, GFP_USER); - spin_lock(&bdev_lock); - list_add(&bdev->bd_list, &all_bdevs); - spin_unlock(&bdev_lock); unlock_new_inode(inode); } return bdev; @@ -926,13 +906,14 @@ EXPORT_SYMBOL(bdgrab); long nr_blockdev_pages(void) { - struct block_device *bdev; + struct inode *inode; long ret = 0; - spin_lock(&bdev_lock); - list_for_each_entry(bdev, &all_bdevs, bd_list) { - ret += bdev->bd_inode->i_mapping->nrpages; - } - spin_unlock(&bdev_lock); + + spin_lock(&blockdev_superblock->s_inode_list_lock); + list_for_each_entry(inode, &blockdev_superblock->s_inodes, i_sb_list) + ret += inode->i_mapping->nrpages; + spin_unlock(&blockdev_superblock->s_inode_list_lock); + return ret; } @@ -1034,30 +1015,28 @@ static bool bd_may_claim(struct block_device *bdev, struct block_device *whole, } /** - * bd_prepare_to_claim - prepare to claim a block device + * bd_prepare_to_claim - claim a block device * @bdev: block device of interest * @whole: the whole device containing @bdev, may equal @bdev * @holder: holder trying to claim @bdev * - * Prepare to claim @bdev. This function fails if @bdev is already - * claimed by another holder and waits if another claiming is in - * progress. This function doesn't actually claim. On successful - * return, the caller has ownership of bd_claiming and bd_holder[s]. - * - * CONTEXT: - * spin_lock(&bdev_lock). Might release bdev_lock, sleep and regrab - * it multiple times. + * Claim @bdev. This function fails if @bdev is already claimed by another + * holder and waits if another claiming is in progress. return, the caller + * has ownership of bd_claiming and bd_holder[s]. * * RETURNS: * 0 if @bdev can be claimed, -EBUSY otherwise. */ -static int bd_prepare_to_claim(struct block_device *bdev, - struct block_device *whole, void *holder) +int bd_prepare_to_claim(struct block_device *bdev, struct block_device *whole, + void *holder) { retry: + spin_lock(&bdev_lock); /* if someone else claimed, fail */ - if (!bd_may_claim(bdev, whole, holder)) + if (!bd_may_claim(bdev, whole, holder)) { + spin_unlock(&bdev_lock); return -EBUSY; + } /* if claiming is already in progress, wait for it to finish */ if (whole->bd_claiming) { @@ -1068,13 +1047,15 @@ retry: spin_unlock(&bdev_lock); schedule(); finish_wait(wq, &wait); - spin_lock(&bdev_lock); goto retry; } /* yay, all mine */ + whole->bd_claiming = holder; + spin_unlock(&bdev_lock); return 0; } +EXPORT_SYMBOL_GPL(bd_prepare_to_claim); /* only for the loop driver */ static struct gendisk *bdev_get_gendisk(struct block_device *bdev, int *partno) { @@ -1097,78 +1078,6 @@ static struct gendisk *bdev_get_gendisk(struct block_device *bdev, int *partno) return disk; } -/** - * bd_start_claiming - start claiming a block device - * @bdev: block device of interest - * @holder: holder trying to claim @bdev - * - * @bdev is about to be opened exclusively. Check @bdev can be opened - * exclusively and mark that an exclusive open is in progress. Each - * successful call to this function must be matched with a call to - * either bd_finish_claiming() or bd_abort_claiming() (which do not - * fail). - * - * This function is used to gain exclusive access to the block device - * without actually causing other exclusive open attempts to fail. It - * should be used when the open sequence itself requires exclusive - * access but may subsequently fail. - * - * CONTEXT: - * Might sleep. - * - * RETURNS: - * Pointer to the block device containing @bdev on success, ERR_PTR() - * value on failure. - */ -struct block_device *bd_start_claiming(struct block_device *bdev, void *holder) -{ - struct gendisk *disk; - struct block_device *whole; - int partno, err; - - might_sleep(); - - /* - * @bdev might not have been initialized properly yet, look up - * and grab the outer block device the hard way. - */ - disk = bdev_get_gendisk(bdev, &partno); - if (!disk) - return ERR_PTR(-ENXIO); - - /* - * Normally, @bdev should equal what's returned from bdget_disk() - * if partno is 0; however, some drivers (floppy) use multiple - * bdev's for the same physical device and @bdev may be one of the - * aliases. Keep @bdev if partno is 0. This means claimer - * tracking is broken for those devices but it has always been that - * way. - */ - if (partno) - whole = bdget_disk(disk, 0); - else - whole = bdgrab(bdev); - - put_disk_and_module(disk); - if (!whole) - return ERR_PTR(-ENOMEM); - - /* prepare to claim, if successful, mark claiming in progress */ - spin_lock(&bdev_lock); - - err = bd_prepare_to_claim(bdev, whole, holder); - if (err == 0) { - whole->bd_claiming = holder; - spin_unlock(&bdev_lock); - return whole; - } else { - spin_unlock(&bdev_lock); - bdput(whole); - return ERR_PTR(err); - } -} -EXPORT_SYMBOL(bd_start_claiming); - static void bd_clear_claiming(struct block_device *whole, void *holder) { lockdep_assert_held(&bdev_lock); @@ -1181,14 +1090,14 @@ static void bd_clear_claiming(struct block_device *whole, void *holder) /** * bd_finish_claiming - finish claiming of a block device * @bdev: block device of interest - * @whole: whole block device (returned from bd_start_claiming()) + * @whole: whole block device * @holder: holder that has claimed @bdev * * Finish exclusive open of a block device. Mark the device as exlusively * open by the holder and wake up all waiters for exclusive open to finish. */ -void bd_finish_claiming(struct block_device *bdev, struct block_device *whole, - void *holder) +static void bd_finish_claiming(struct block_device *bdev, + struct block_device *whole, void *holder) { spin_lock(&bdev_lock); BUG_ON(!bd_may_claim(bdev, whole, holder)); @@ -1203,12 +1112,11 @@ void bd_finish_claiming(struct block_device *bdev, struct block_device *whole, bd_clear_claiming(whole, holder); spin_unlock(&bdev_lock); } -EXPORT_SYMBOL(bd_finish_claiming); /** * bd_abort_claiming - abort claiming of a block device * @bdev: block device of interest - * @whole: whole block device (returned from bd_start_claiming()) + * @whole: whole block device * @holder: holder that has claimed @bdev * * Abort claiming of a block device when the exclusive open failed. This can be @@ -1368,26 +1276,6 @@ EXPORT_SYMBOL_GPL(bd_unlink_disk_holder); #endif /** - * flush_disk - invalidates all buffer-cache entries on a disk - * - * @bdev: struct block device to be flushed - * @kill_dirty: flag to guide handling of dirty inodes - * - * Invalidates all buffer-cache entries on a disk. It should be called - * when a disk has been changed -- either by a media change or online - * resize. - */ -static void flush_disk(struct block_device *bdev, bool kill_dirty) -{ - if (__invalidate_device(bdev, kill_dirty)) { - printk(KERN_WARNING "VFS: busy inodes on changed media or " - "resized disk %s\n", - bdev->bd_disk ? bdev->bd_disk->disk_name : ""); - } - bdev->bd_invalidated = 1; -} - -/** * check_disk_size_change - checks for disk size change and adjusts bdev size. * @disk: struct gendisk to check * @bdev: struct bdev to adjust. @@ -1411,8 +1299,9 @@ static void check_disk_size_change(struct gendisk *disk, disk->disk_name, bdev_size, disk_size); } i_size_write(bdev->bd_inode, disk_size); - if (bdev_size > disk_size) - flush_disk(bdev, false); + if (bdev_size > disk_size && __invalidate_device(bdev, false)) + pr_warn("VFS: busy inodes on resized disk %s\n", + disk->disk_name); } bdev->bd_invalidated = 0; } @@ -1471,7 +1360,10 @@ int check_disk_change(struct block_device *bdev) if (!(events & DISK_EVENT_MEDIA_CHANGE)) return 0; - flush_disk(bdev, true); + if (__invalidate_device(bdev, true)) + pr_warn("VFS: busy inodes on changed media %s\n", + disk->disk_name); + bdev->bd_invalidated = 1; if (bdops->revalidate_disk) bdops->revalidate_disk(bdev->bd_disk); return 1; @@ -1547,13 +1439,15 @@ EXPORT_SYMBOL_GPL(bdev_disk_changed); * mutex_lock_nested(whole->bd_mutex, 1) */ -static int __blkdev_get(struct block_device *bdev, fmode_t mode, int for_part) +static int __blkdev_get(struct block_device *bdev, fmode_t mode, void *holder, + int for_part) { + struct block_device *whole = NULL, *claiming = NULL; struct gendisk *disk; int ret; int partno; int perm = 0; - bool first_open = false; + bool first_open = false, unblock_events = true, need_restart; if (mode & FMODE_READ) perm |= MAY_READ; @@ -1569,18 +1463,36 @@ static int __blkdev_get(struct block_device *bdev, fmode_t mode, int for_part) } restart: - + need_restart = false; ret = -ENXIO; disk = bdev_get_gendisk(bdev, &partno); if (!disk) goto out; + if (partno) { + whole = bdget_disk(disk, 0); + if (!whole) { + ret = -ENOMEM; + goto out_put_disk; + } + } + + if (!for_part && (mode & FMODE_EXCL)) { + WARN_ON_ONCE(!holder); + if (whole) + claiming = whole; + else + claiming = bdev; + ret = bd_prepare_to_claim(bdev, claiming, holder); + if (ret) + goto out_put_whole; + } + disk_block_events(disk); mutex_lock_nested(&bdev->bd_mutex, for_part); if (!bdev->bd_openers) { first_open = true; bdev->bd_disk = disk; - bdev->bd_queue = disk->queue; bdev->bd_contains = bdev; bdev->bd_partno = partno; @@ -1593,20 +1505,12 @@ static int __blkdev_get(struct block_device *bdev, fmode_t mode, int for_part) ret = 0; if (disk->fops->open) { ret = disk->fops->open(bdev, mode); - if (ret == -ERESTARTSYS) { - /* Lost a race with 'disk' being - * deleted, try again. - * See md.c - */ - disk_put_part(bdev->bd_part); - bdev->bd_part = NULL; - bdev->bd_disk = NULL; - bdev->bd_queue = NULL; - mutex_unlock(&bdev->bd_mutex); - disk_unblock_events(disk); - put_disk_and_module(disk); - goto restart; - } + /* + * If we lost a race with 'disk' being deleted, + * try again. See md.c + */ + if (ret == -ERESTARTSYS) + need_restart = true; } if (!ret) { @@ -1627,18 +1531,11 @@ static int __blkdev_get(struct block_device *bdev, fmode_t mode, int for_part) if (ret) goto out_clear; } else { - struct block_device *whole; - whole = bdget_disk(disk, 0); - ret = -ENOMEM; - if (!whole) - goto out_clear; BUG_ON(for_part); - ret = __blkdev_get(whole, mode, 1); - if (ret) { - bdput(whole); + ret = __blkdev_get(whole, mode, NULL, 1); + if (ret) goto out_clear; - } - bdev->bd_contains = whole; + bdev->bd_contains = bdgrab(whole); bdev->bd_part = disk_get_part(disk, partno); if (!(disk->flags & GENHD_FL_UP) || !bdev->bd_part || !bdev->bd_part->nr_sects) { @@ -1667,27 +1564,52 @@ static int __blkdev_get(struct block_device *bdev, fmode_t mode, int for_part) bdev->bd_openers++; if (for_part) bdev->bd_part_count++; + if (claiming) + bd_finish_claiming(bdev, claiming, holder); + + /* + * Block event polling for write claims if requested. Any write holder + * makes the write_holder state stick until all are released. This is + * good enough and tracking individual writeable reference is too + * fragile given the way @mode is used in blkdev_get/put(). + */ + if (claiming && (mode & FMODE_WRITE) && !bdev->bd_write_holder && + (disk->flags & GENHD_FL_BLOCK_EVENTS_ON_EXCL_WRITE)) { + bdev->bd_write_holder = true; + unblock_events = false; + } mutex_unlock(&bdev->bd_mutex); - disk_unblock_events(disk); + + if (unblock_events) + disk_unblock_events(disk); + /* only one opener holds refs to the module and disk */ if (!first_open) put_disk_and_module(disk); + if (whole) + bdput(whole); return 0; out_clear: disk_put_part(bdev->bd_part); bdev->bd_disk = NULL; bdev->bd_part = NULL; - bdev->bd_queue = NULL; if (bdev != bdev->bd_contains) __blkdev_put(bdev->bd_contains, mode, 1); bdev->bd_contains = NULL; out_unlock_bdev: + if (claiming) + bd_abort_claiming(bdev, claiming, holder); mutex_unlock(&bdev->bd_mutex); disk_unblock_events(disk); + out_put_whole: + if (whole) + bdput(whole); + out_put_disk: put_disk_and_module(disk); + if (need_restart) + goto restart; out: - return ret; } @@ -1712,50 +1634,11 @@ static int __blkdev_get(struct block_device *bdev, fmode_t mode, int for_part) */ int blkdev_get(struct block_device *bdev, fmode_t mode, void *holder) { - struct block_device *whole = NULL; int res; - WARN_ON_ONCE((mode & FMODE_EXCL) && !holder); - - if ((mode & FMODE_EXCL) && holder) { - whole = bd_start_claiming(bdev, holder); - if (IS_ERR(whole)) { - bdput(bdev); - return PTR_ERR(whole); - } - } - - res = __blkdev_get(bdev, mode, 0); - - if (whole) { - struct gendisk *disk = whole->bd_disk; - - /* finish claiming */ - mutex_lock(&bdev->bd_mutex); - if (!res) - bd_finish_claiming(bdev, whole, holder); - else - bd_abort_claiming(bdev, whole, holder); - /* - * Block event polling for write claims if requested. Any - * write holder makes the write_holder state stick until - * all are released. This is good enough and tracking - * individual writeable reference is too fragile given the - * way @mode is used in blkdev_get/put(). - */ - if (!res && (mode & FMODE_WRITE) && !bdev->bd_write_holder && - (disk->flags & GENHD_FL_BLOCK_EVENTS_ON_EXCL_WRITE)) { - bdev->bd_write_holder = true; - disk_block_events(disk); - } - - mutex_unlock(&bdev->bd_mutex); - bdput(whole); - } - + res =__blkdev_get(bdev, mode, holder, 0); if (res) bdput(bdev); - return res; } EXPORT_SYMBOL(blkdev_get); @@ -1851,7 +1734,7 @@ static int blkdev_open(struct inode * inode, struct file * filp) */ filp->f_flags |= O_LARGEFILE; - filp->f_mode |= FMODE_NOWAIT; + filp->f_mode |= FMODE_NOWAIT | FMODE_BUF_RASYNC; if (filp->f_flags & O_NDELAY) filp->f_mode |= FMODE_NDELAY; diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c index 9fe9ec6aab71..abf86b202b43 100644 --- a/fs/btrfs/disk-io.c +++ b/fs/btrfs/disk-io.c @@ -1677,27 +1677,6 @@ struct btrfs_root *btrfs_get_new_fs_root(struct btrfs_fs_info *fs_info, return btrfs_get_root_ref(fs_info, objectid, anon_dev, true); } -static int btrfs_congested_fn(void *congested_data, int bdi_bits) -{ - struct btrfs_fs_info *info = (struct btrfs_fs_info *)congested_data; - int ret = 0; - struct btrfs_device *device; - struct backing_dev_info *bdi; - - rcu_read_lock(); - list_for_each_entry_rcu(device, &info->fs_devices->devices, dev_list) { - if (!device->bdev) - continue; - bdi = device->bdev->bd_bdi; - if (bdi_congested(bdi, bdi_bits)) { - ret = 1; - break; - } - } - rcu_read_unlock(); - return ret; -} - /* * called by the kthread helper functions to finally call the bio end_io * functions. This is where read checksum verification actually happens @@ -3112,8 +3091,6 @@ int __cold open_ctree(struct super_block *sb, struct btrfs_fs_devices *fs_device goto fail_sb_buffer; } - sb->s_bdi->congested_fn = btrfs_congested_fn; - sb->s_bdi->congested_data = fs_info; sb->s_bdi->capabilities |= BDI_CAP_CGROUP_WRITEBACK; sb->s_bdi->ra_pages = VM_READAHEAD_PAGES; sb->s_bdi->ra_pages *= btrfs_super_num_devices(disk_super); diff --git a/fs/btrfs/extent_io.c b/fs/btrfs/extent_io.c index c15ab6c1897f..a940edb1e64f 100644 --- a/fs/btrfs/extent_io.c +++ b/fs/btrfs/extent_io.c @@ -4541,6 +4541,8 @@ next: /* once for us */ free_extent_map(em); + + cond_resched(); /* Allow large-extent preemption. */ } } return try_release_extent_state(tree, page, mask); diff --git a/fs/btrfs/file.c b/fs/btrfs/file.c index 3abaff4d2cff..4507c3d09399 100644 --- a/fs/btrfs/file.c +++ b/fs/btrfs/file.c @@ -3537,7 +3537,7 @@ static loff_t btrfs_file_llseek(struct file *file, loff_t offset, int whence) static int btrfs_file_open(struct inode *inode, struct file *filp) { - filp->f_mode |= FMODE_NOWAIT; + filp->f_mode |= FMODE_NOWAIT | FMODE_BUF_RASYNC; return generic_file_open(inode, filp); } diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c index 46e04247193a..9570458aa847 100644 --- a/fs/btrfs/inode.c +++ b/fs/btrfs/inode.c @@ -3,6 +3,7 @@ * Copyright (C) 2007 Oracle. All rights reserved. */ +#include <crypto/hash.h> #include <linux/kernel.h> #include <linux/bio.h> #include <linux/buffer_head.h> diff --git a/fs/buffer.c b/fs/buffer.c index 64fe82ec65ff..50bbc99e3d96 100644 --- a/fs/buffer.c +++ b/fs/buffer.c @@ -320,9 +320,8 @@ static void decrypt_bh(struct work_struct *work) static void end_buffer_async_read_io(struct buffer_head *bh, int uptodate) { /* Decrypt if needed */ - if (uptodate && IS_ENABLED(CONFIG_FS_ENCRYPTION) && - IS_ENCRYPTED(bh->b_page->mapping->host) && - S_ISREG(bh->b_page->mapping->host->i_mode)) { + if (uptodate && + fscrypt_inode_uses_fs_layer_crypto(bh->b_page->mapping->host)) { struct decrypt_bh_ctx *ctx = kmalloc(sizeof(*ctx), GFP_ATOMIC); if (ctx) { @@ -1959,7 +1958,7 @@ iomap_to_bh(struct inode *inode, sector_t block, struct buffer_head *bh, */ set_buffer_new(bh); set_buffer_unwritten(bh); - /* FALLTHRU */ + fallthrough; case IOMAP_MAPPED: if ((iomap->flags & IOMAP_F_NEW) || offset >= i_size_read(inode)) @@ -3040,12 +3039,10 @@ static int submit_bh_wbc(int op, int op_flags, struct buffer_head *bh, if (test_set_buffer_req(bh) && (op == REQ_OP_WRITE)) clear_buffer_write_io_error(bh); - /* - * from here on down, it's all bio -- do the initial mapping, - * submit_bio -> generic_make_request may further map this bio around - */ bio = bio_alloc(GFP_NOIO, 1); + fscrypt_set_bio_crypt_ctx_bh(bio, bh, GFP_NOIO); + bio->bi_iter.bi_sector = bh->b_blocknr * (bh->b_size >> 9); bio_set_dev(bio, bh->b_bdev); bio->bi_write_hint = write_hint; @@ -3160,6 +3157,15 @@ int __sync_dirty_buffer(struct buffer_head *bh, int op_flags) WARN_ON(atomic_read(&bh->b_count) < 1); lock_buffer(bh); if (test_clear_buffer_dirty(bh)) { + /* + * The bh should be mapped, but it might not be if the + * device was hot-removed. Not much we can do but fail the I/O. + */ + if (!buffer_mapped(bh)) { + unlock_buffer(bh); + return -EIO; + } + get_bh(bh); bh->b_end_io = end_buffer_write_sync; ret = submit_bh(REQ_OP_WRITE, op_flags, bh); diff --git a/fs/ceph/Kconfig b/fs/ceph/Kconfig index cf235f6eacf9..471e40156065 100644 --- a/fs/ceph/Kconfig +++ b/fs/ceph/Kconfig @@ -13,7 +13,7 @@ config CEPH_FS scalable file system designed to provide high performance, reliable access to petabytes of storage. - More information at http://ceph.newdream.net/. + More information at https://ceph.io/. If unsure, say N. diff --git a/fs/ceph/addr.c b/fs/ceph/addr.c index 01ad09733ac7..6ea761c84494 100644 --- a/fs/ceph/addr.c +++ b/fs/ceph/addr.c @@ -862,8 +862,7 @@ static void writepages_finish(struct ceph_osd_request *req) osd_data = osd_req_op_extent_osd_data(req, 0); if (osd_data->pages_from_pool) - mempool_free(osd_data->pages, - ceph_sb_to_client(inode->i_sb)->wb_pagevec_pool); + mempool_free(osd_data->pages, ceph_wb_pagevec_pool); else kfree(osd_data->pages); ceph_osdc_put_request(req); @@ -955,10 +954,10 @@ retry: int num_ops = 0, op_idx; unsigned i, pvec_pages, max_pages, locked_pages = 0; struct page **pages = NULL, **data_pages; - mempool_t *pool = NULL; /* Becomes non-null if mempool used */ struct page *page; pgoff_t strip_unit_end = 0; u64 offset = 0, len = 0; + bool from_pool = false; max_pages = wsize >> PAGE_SHIFT; @@ -1057,16 +1056,16 @@ get_more_pages: sizeof(*pages), GFP_NOFS); if (!pages) { - pool = fsc->wb_pagevec_pool; - pages = mempool_alloc(pool, GFP_NOFS); + from_pool = true; + pages = mempool_alloc(ceph_wb_pagevec_pool, GFP_NOFS); BUG_ON(!pages); } len = 0; } else if (page->index != (offset + len) >> PAGE_SHIFT) { - if (num_ops >= (pool ? CEPH_OSD_SLAB_OPS : - CEPH_OSD_MAX_OPS)) { + if (num_ops >= (from_pool ? CEPH_OSD_SLAB_OPS : + CEPH_OSD_MAX_OPS)) { redirty_page_for_writepage(wbc, page); unlock_page(page); break; @@ -1161,7 +1160,7 @@ new_request: offset, len); osd_req_op_extent_osd_data_pages(req, op_idx, data_pages, len, 0, - !!pool, false); + from_pool, false); osd_req_op_extent_update(req, op_idx, len); len = 0; @@ -1188,12 +1187,12 @@ new_request: dout("writepages got pages at %llu~%llu\n", offset, len); osd_req_op_extent_osd_data_pages(req, op_idx, data_pages, len, - 0, !!pool, false); + 0, from_pool, false); osd_req_op_extent_update(req, op_idx, len); BUG_ON(op_idx + 1 != req->r_num_ops); - pool = NULL; + from_pool = false; if (i < locked_pages) { BUG_ON(num_ops <= req->r_num_ops); num_ops -= req->r_num_ops; @@ -1204,8 +1203,8 @@ new_request: pages = kmalloc_array(locked_pages, sizeof(*pages), GFP_NOFS); if (!pages) { - pool = fsc->wb_pagevec_pool; - pages = mempool_alloc(pool, GFP_NOFS); + from_pool = true; + pages = mempool_alloc(ceph_wb_pagevec_pool, GFP_NOFS); BUG_ON(!pages); } memcpy(pages, data_pages + i, diff --git a/fs/ceph/caps.c b/fs/ceph/caps.c index 972c13aa4225..034b3f4fdd3a 100644 --- a/fs/ceph/caps.c +++ b/fs/ceph/caps.c @@ -668,6 +668,7 @@ void ceph_add_cap(struct inode *inode, spin_lock(&session->s_cap_lock); list_add_tail(&cap->session_caps, &session->s_caps); session->s_nr_caps++; + atomic64_inc(&mdsc->metric.total_caps); spin_unlock(&session->s_cap_lock); } else { spin_lock(&session->s_cap_lock); @@ -886,8 +887,8 @@ int __ceph_caps_issued_mask(struct ceph_inode_info *ci, int mask, int touch) int have = ci->i_snap_caps; if ((have & mask) == mask) { - dout("__ceph_caps_issued_mask ino 0x%lx snap issued %s" - " (mask %s)\n", ci->vfs_inode.i_ino, + dout("__ceph_caps_issued_mask ino 0x%llx snap issued %s" + " (mask %s)\n", ceph_ino(&ci->vfs_inode), ceph_cap_string(have), ceph_cap_string(mask)); return 1; @@ -898,8 +899,8 @@ int __ceph_caps_issued_mask(struct ceph_inode_info *ci, int mask, int touch) if (!__cap_is_valid(cap)) continue; if ((cap->issued & mask) == mask) { - dout("__ceph_caps_issued_mask ino 0x%lx cap %p issued %s" - " (mask %s)\n", ci->vfs_inode.i_ino, cap, + dout("__ceph_caps_issued_mask ino 0x%llx cap %p issued %s" + " (mask %s)\n", ceph_ino(&ci->vfs_inode), cap, ceph_cap_string(cap->issued), ceph_cap_string(mask)); if (touch) @@ -910,8 +911,8 @@ int __ceph_caps_issued_mask(struct ceph_inode_info *ci, int mask, int touch) /* does a combination of caps satisfy mask? */ have |= cap->issued; if ((have & mask) == mask) { - dout("__ceph_caps_issued_mask ino 0x%lx combo issued %s" - " (mask %s)\n", ci->vfs_inode.i_ino, + dout("__ceph_caps_issued_mask ino 0x%llx combo issued %s" + " (mask %s)\n", ceph_ino(&ci->vfs_inode), ceph_cap_string(cap->issued), ceph_cap_string(mask)); if (touch) { @@ -1161,6 +1162,7 @@ void __ceph_remove_cap(struct ceph_cap *cap, bool queue_release) } else { list_del_init(&cap->session_caps); session->s_nr_caps--; + atomic64_dec(&mdsc->metric.total_caps); cap->session = NULL; removed = 1; } @@ -2870,7 +2872,7 @@ int ceph_get_caps(struct file *filp, int need, int want, struct cap_wait cw; DEFINE_WAIT_FUNC(wait, woken_wake_function); - cw.ino = inode->i_ino; + cw.ino = ceph_ino(inode); cw.tgid = current->tgid; cw.need = need; cw.want = want; @@ -4187,10 +4189,8 @@ void ceph_check_delayed_caps(struct ceph_mds_client *mdsc) struct ceph_inode_info *ci; dout("check_delayed_caps\n"); - while (1) { - spin_lock(&mdsc->cap_delay_lock); - if (list_empty(&mdsc->cap_delay_list)) - break; + spin_lock(&mdsc->cap_delay_lock); + while (!list_empty(&mdsc->cap_delay_list)) { ci = list_first_entry(&mdsc->cap_delay_list, struct ceph_inode_info, i_cap_delay_list); @@ -4200,13 +4200,13 @@ void ceph_check_delayed_caps(struct ceph_mds_client *mdsc) list_del_init(&ci->i_cap_delay_list); inode = igrab(&ci->vfs_inode); - spin_unlock(&mdsc->cap_delay_lock); - if (inode) { + spin_unlock(&mdsc->cap_delay_lock); dout("check_delayed_caps on %p\n", inode); ceph_check_caps(ci, 0, NULL); /* avoid calling iput_final() in tick thread */ ceph_async_iput(inode); + spin_lock(&mdsc->cap_delay_lock); } } spin_unlock(&mdsc->cap_delay_lock); diff --git a/fs/ceph/debugfs.c b/fs/ceph/debugfs.c index 070ed8481340..3e3fcda9b276 100644 --- a/fs/ceph/debugfs.c +++ b/fs/ceph/debugfs.c @@ -145,7 +145,7 @@ static int metric_show(struct seq_file *s, void *p) struct ceph_fs_client *fsc = s->private; struct ceph_mds_client *mdsc = fsc->mdsc; struct ceph_client_metric *m = &mdsc->metric; - int i, nr_caps = 0; + int nr_caps = 0; s64 total, sum, avg, min, max, sq; seq_printf(s, "item total avg_lat(us) min_lat(us) max_lat(us) stdev(us)\n"); @@ -190,17 +190,7 @@ static int metric_show(struct seq_file *s, void *p) percpu_counter_sum(&m->d_lease_mis), percpu_counter_sum(&m->d_lease_hit)); - mutex_lock(&mdsc->mutex); - for (i = 0; i < mdsc->max_sessions; i++) { - struct ceph_mds_session *s; - - s = __ceph_lookup_mds_session(mdsc, i); - if (!s) - continue; - nr_caps += s->s_nr_caps; - ceph_put_mds_session(s); - } - mutex_unlock(&mdsc->mutex); + nr_caps = atomic64_read(&m->total_caps); seq_printf(s, "%-14s%-16d%-16lld%lld\n", "caps", nr_caps, percpu_counter_sum(&m->i_caps_mis), percpu_counter_sum(&m->i_caps_hit)); @@ -212,7 +202,7 @@ static int caps_show_cb(struct inode *inode, struct ceph_cap *cap, void *p) { struct seq_file *s = p; - seq_printf(s, "0x%-17lx%-17s%-17s\n", inode->i_ino, + seq_printf(s, "0x%-17llx%-17s%-17s\n", ceph_ino(inode), ceph_cap_string(cap->issued), ceph_cap_string(cap->implemented)); return 0; @@ -257,7 +247,7 @@ static int caps_show(struct seq_file *s, void *p) spin_lock(&mdsc->caps_list_lock); list_for_each_entry(cw, &mdsc->cap_wait_list, list) { - seq_printf(s, "%-13d0x%-17lx%-17s%-17s\n", cw->tgid, cw->ino, + seq_printf(s, "%-13d0x%-17llx%-17s%-17s\n", cw->tgid, cw->ino, ceph_cap_string(cw->need), ceph_cap_string(cw->want)); } @@ -272,7 +262,7 @@ static int mds_sessions_show(struct seq_file *s, void *ptr) struct ceph_mds_client *mdsc = fsc->mdsc; struct ceph_auth_client *ac = fsc->client->monc.auth; struct ceph_options *opt = fsc->client->options; - int mds = -1; + int mds; mutex_lock(&mdsc->mutex); diff --git a/fs/ceph/dir.c b/fs/ceph/dir.c index 39f5311404b0..d72e4a12bb69 100644 --- a/fs/ceph/dir.c +++ b/fs/ceph/dir.c @@ -259,9 +259,7 @@ static int __dcache_readdir(struct file *file, struct dir_context *ctx, dentry, dentry, d_inode(dentry)); ctx->pos = di->offset; if (!dir_emit(ctx, dentry->d_name.name, - dentry->d_name.len, - ceph_translate_ino(dentry->d_sb, - d_inode(dentry)->i_ino), + dentry->d_name.len, ceph_present_inode(d_inode(dentry)), d_inode(dentry)->i_mode >> 12)) { dput(dentry); err = 0; @@ -324,18 +322,21 @@ static int ceph_readdir(struct file *file, struct dir_context *ctx) /* always start with . and .. */ if (ctx->pos == 0) { dout("readdir off 0 -> '.'\n"); - if (!dir_emit(ctx, ".", 1, - ceph_translate_ino(inode->i_sb, inode->i_ino), + if (!dir_emit(ctx, ".", 1, ceph_present_inode(inode), inode->i_mode >> 12)) return 0; ctx->pos = 1; } if (ctx->pos == 1) { - ino_t ino = parent_ino(file->f_path.dentry); + u64 ino; + struct dentry *dentry = file->f_path.dentry; + + spin_lock(&dentry->d_lock); + ino = ceph_present_inode(dentry->d_parent->d_inode); + spin_unlock(&dentry->d_lock); + dout("readdir off 1 -> '..'\n"); - if (!dir_emit(ctx, "..", 2, - ceph_translate_ino(inode->i_sb, ino), - inode->i_mode >> 12)) + if (!dir_emit(ctx, "..", 2, ino, inode->i_mode >> 12)) return 0; ctx->pos = 2; } @@ -507,9 +508,6 @@ more: } for (; i < rinfo->dir_nr; i++) { struct ceph_mds_reply_dir_entry *rde = rinfo->dir_entries + i; - struct ceph_vino vino; - ino_t ino; - u32 ftype; BUG_ON(rde->offset < ctx->pos); @@ -519,13 +517,10 @@ more: rde->name_len, rde->name, &rde->inode.in); BUG_ON(!rde->inode.in); - ftype = le32_to_cpu(rde->inode.in->mode) >> 12; - vino.ino = le64_to_cpu(rde->inode.in->ino); - vino.snap = le64_to_cpu(rde->inode.in->snapid); - ino = ceph_vino_to_ino(vino); if (!dir_emit(ctx, rde->name, rde->name_len, - ceph_translate_ino(inode->i_sb, ino), ftype)) { + ceph_present_ino(inode->i_sb, le64_to_cpu(rde->inode.in->ino)), + le32_to_cpu(rde->inode.in->mode) >> 12)) { dout("filldir stopping us...\n"); return 0; } @@ -930,6 +925,10 @@ static int ceph_symlink(struct inode *dir, struct dentry *dentry, req->r_num_caps = 2; req->r_dentry_drop = CEPH_CAP_FILE_SHARED | CEPH_CAP_AUTH_EXCL; req->r_dentry_unless = CEPH_CAP_FILE_EXCL; + if (as_ctx.pagelist) { + req->r_pagelist = as_ctx.pagelist; + as_ctx.pagelist = NULL; + } err = ceph_mdsc_do_request(mdsc, dir, req); if (!err && !req->r_reply_info.head->is_dentry) err = ceph_handle_notrace_create(dir, dentry); @@ -1157,7 +1156,7 @@ retry: if (try_async && op == CEPH_MDS_OP_UNLINK && (req->r_dir_caps = get_caps_for_async_unlink(dir, dentry))) { - dout("async unlink on %lu/%.*s caps=%s", dir->i_ino, + dout("async unlink on %llu/%.*s caps=%s", ceph_ino(dir), dentry->d_name.len, dentry->d_name.name, ceph_cap_string(req->r_dir_caps)); set_bit(CEPH_MDS_R_ASYNC, &req->r_req_flags); @@ -1741,7 +1740,7 @@ static int ceph_d_revalidate(struct dentry *dentry, unsigned int flags) case -ENOENT: if (d_really_is_negative(dentry)) valid = 1; - /* Fallthrough */ + fallthrough; default: break; } diff --git a/fs/ceph/file.c b/fs/ceph/file.c index 160644ddaeed..3f4c993dfc6f 100644 --- a/fs/ceph/file.c +++ b/fs/ceph/file.c @@ -252,7 +252,7 @@ static int ceph_init_file(struct inode *inode, struct file *file, int fmode) case S_IFREG: ceph_fscache_register_inode_cookie(inode); ceph_fscache_file_set_cookie(inode, file); - /* fall through */ + fallthrough; case S_IFDIR: ret = ceph_init_file_info(inode, file, fmode, S_ISDIR(inode->i_mode)); @@ -630,8 +630,8 @@ static int ceph_finish_async_create(struct inode *dir, struct dentry *dentry, } else { struct dentry *dn; - dout("%s d_adding new inode 0x%llx to 0x%lx/%s\n", __func__, - vino.ino, dir->i_ino, dentry->d_name.name); + dout("%s d_adding new inode 0x%llx to 0x%llx/%s\n", __func__, + vino.ino, ceph_ino(dir), dentry->d_name.name); ceph_dir_clear_ordered(dir); ceph_init_inode_acls(inode, as_ctx); if (inode->i_state & I_NEW) { @@ -1538,6 +1538,7 @@ static ssize_t ceph_read_iter(struct kiocb *iocb, struct iov_iter *to) struct inode *inode = file_inode(filp); struct ceph_inode_info *ci = ceph_inode(inode); struct page *pinned_page = NULL; + bool direct_lock = iocb->ki_flags & IOCB_DIRECT; ssize_t ret; int want, got = 0; int retry_op = 0, read = 0; @@ -1546,7 +1547,7 @@ again: dout("aio_read %p %llx.%llx %llu~%u trying to get caps on %p\n", inode, ceph_vinop(inode), iocb->ki_pos, (unsigned)len, inode); - if (iocb->ki_flags & IOCB_DIRECT) + if (direct_lock) ceph_start_io_direct(inode); else ceph_start_io_read(inode); @@ -1603,7 +1604,7 @@ again: } ceph_put_cap_refs(ci, got); - if (iocb->ki_flags & IOCB_DIRECT) + if (direct_lock) ceph_end_io_direct(inode); else ceph_end_io_read(inode); @@ -2506,6 +2507,7 @@ const struct file_operations ceph_file_fops = { .mmap = ceph_mmap, .fsync = ceph_fsync, .lock = ceph_lock, + .setlease = simple_nosetlease, .flock = ceph_flock, .splice_read = generic_file_splice_read, .splice_write = iter_file_splice_write, diff --git a/fs/ceph/inode.c b/fs/ceph/inode.c index 357c937699d5..d163fa96cb40 100644 --- a/fs/ceph/inode.c +++ b/fs/ceph/inode.c @@ -41,8 +41,10 @@ static void ceph_inode_work(struct work_struct *work); */ static int ceph_set_ino_cb(struct inode *inode, void *data) { - ceph_inode(inode)->i_vino = *(struct ceph_vino *)data; - inode->i_ino = ceph_vino_to_ino(*(struct ceph_vino *)data); + struct ceph_inode_info *ci = ceph_inode(inode); + + ci->i_vino = *(struct ceph_vino *)data; + inode->i_ino = ceph_vino_to_ino_t(ci->i_vino); inode_set_iversion_raw(inode, 0); return 0; } @@ -50,17 +52,14 @@ static int ceph_set_ino_cb(struct inode *inode, void *data) struct inode *ceph_get_inode(struct super_block *sb, struct ceph_vino vino) { struct inode *inode; - ino_t t = ceph_vino_to_ino(vino); - inode = iget5_locked(sb, t, ceph_ino_compare, ceph_set_ino_cb, &vino); + inode = iget5_locked(sb, (unsigned long)vino.ino, ceph_ino_compare, + ceph_set_ino_cb, &vino); if (!inode) return ERR_PTR(-ENOMEM); - if (inode->i_state & I_NEW) - dout("get_inode created new inode %p %llx.%llx ino %llx\n", - inode, ceph_vinop(inode), (u64)inode->i_ino); - dout("get_inode on %lu=%llx.%llx got %p\n", inode->i_ino, vino.ino, - vino.snap, inode); + dout("get_inode on %llu=%llx.%llx got %p new %d\n", ceph_present_inode(inode), + ceph_vinop(inode), inode, !!(inode->i_state & I_NEW)); return inode; } @@ -2378,7 +2377,7 @@ int ceph_getattr(const struct path *path, struct kstat *stat, } generic_fillattr(inode, stat); - stat->ino = ceph_translate_ino(inode->i_sb, inode->i_ino); + stat->ino = ceph_present_inode(inode); /* * btime on newly-allocated inodes is 0, so if this is still set to diff --git a/fs/ceph/mds_client.c b/fs/ceph/mds_client.c index a50497142e59..4a26862d7667 100644 --- a/fs/ceph/mds_client.c +++ b/fs/ceph/mds_client.c @@ -1103,8 +1103,7 @@ static int __choose_mds(struct ceph_mds_client *mdsc, frag.frag, mds); if (ceph_mdsmap_get_state(mdsc->mdsmap, mds) >= CEPH_MDS_STATE_ACTIVE) { - if (mode == USE_ANY_MDS && - !ceph_mdsmap_is_laggy(mdsc->mdsmap, + if (!ceph_mdsmap_is_laggy(mdsc->mdsmap, mds)) goto out; } @@ -1168,7 +1167,7 @@ static struct ceph_msg *create_session_msg(u32 op, u64 seq) static const unsigned char feature_bits[] = CEPHFS_FEATURES_CLIENT_SUPPORTED; #define FEATURE_BYTES(c) (DIV_ROUND_UP((size_t)feature_bits[c - 1] + 1, 64) * 8) -static void encode_supported_features(void **p, void *end) +static int encode_supported_features(void **p, void *end) { static const size_t count = ARRAY_SIZE(feature_bits); @@ -1176,16 +1175,64 @@ static void encode_supported_features(void **p, void *end) size_t i; size_t size = FEATURE_BYTES(count); - BUG_ON(*p + 4 + size > end); + if (WARN_ON_ONCE(*p + 4 + size > end)) + return -ERANGE; + ceph_encode_32(p, size); memset(*p, 0, size); for (i = 0; i < count; i++) ((unsigned char*)(*p))[i / 8] |= BIT(feature_bits[i] % 8); *p += size; } else { - BUG_ON(*p + 4 > end); + if (WARN_ON_ONCE(*p + 4 > end)) + return -ERANGE; + ceph_encode_32(p, 0); } + + return 0; +} + +static const unsigned char metric_bits[] = CEPHFS_METRIC_SPEC_CLIENT_SUPPORTED; +#define METRIC_BYTES(cnt) (DIV_ROUND_UP((size_t)metric_bits[cnt - 1] + 1, 64) * 8) +static int encode_metric_spec(void **p, void *end) +{ + static const size_t count = ARRAY_SIZE(metric_bits); + + /* header */ + if (WARN_ON_ONCE(*p + 2 > end)) + return -ERANGE; + + ceph_encode_8(p, 1); /* version */ + ceph_encode_8(p, 1); /* compat */ + + if (count > 0) { + size_t i; + size_t size = METRIC_BYTES(count); + + if (WARN_ON_ONCE(*p + 4 + 4 + size > end)) + return -ERANGE; + + /* metric spec info length */ + ceph_encode_32(p, 4 + size); + + /* metric spec */ + ceph_encode_32(p, size); + memset(*p, 0, size); + for (i = 0; i < count; i++) + ((unsigned char *)(*p))[i / 8] |= BIT(metric_bits[i] % 8); + *p += size; + } else { + if (WARN_ON_ONCE(*p + 4 + 4 > end)) + return -ERANGE; + + /* metric spec info length */ + ceph_encode_32(p, 4); + /* metric spec */ + ceph_encode_32(p, 0); + } + + return 0; } /* @@ -1203,6 +1250,7 @@ static struct ceph_msg *create_session_open_msg(struct ceph_mds_client *mdsc, u6 struct ceph_mount_options *fsopt = mdsc->fsc->mount_options; size_t size, count; void *p, *end; + int ret; const char* metadata[][2] = { {"hostname", mdsc->nodename}, @@ -1227,12 +1275,19 @@ static struct ceph_msg *create_session_open_msg(struct ceph_mds_client *mdsc, u6 size = FEATURE_BYTES(count); extra_bytes += 4 + size; + /* metric spec */ + size = 0; + count = ARRAY_SIZE(metric_bits); + if (count > 0) + size = METRIC_BYTES(count); + extra_bytes += 2 + 4 + 4 + size; + /* Allocate the message */ msg = ceph_msg_new(CEPH_MSG_CLIENT_SESSION, sizeof(*h) + extra_bytes, GFP_NOFS, false); if (!msg) { pr_err("create_session_msg ENOMEM creating msg\n"); - return NULL; + return ERR_PTR(-ENOMEM); } p = msg->front.iov_base; end = p + msg->front.iov_len; @@ -1245,9 +1300,9 @@ static struct ceph_msg *create_session_open_msg(struct ceph_mds_client *mdsc, u6 * Serialize client metadata into waiting buffer space, using * the format that userspace expects for map<string, string> * - * ClientSession messages with metadata are v3 + * ClientSession messages with metadata are v4 */ - msg->hdr.version = cpu_to_le16(3); + msg->hdr.version = cpu_to_le16(4); msg->hdr.compat_version = cpu_to_le16(1); /* The write pointer, following the session_head structure */ @@ -1269,7 +1324,20 @@ static struct ceph_msg *create_session_open_msg(struct ceph_mds_client *mdsc, u6 p += val_len; } - encode_supported_features(&p, end); + ret = encode_supported_features(&p, end); + if (ret) { + pr_err("encode_supported_features failed!\n"); + ceph_msg_put(msg); + return ERR_PTR(ret); + } + + ret = encode_metric_spec(&p, end); + if (ret) { + pr_err("encode_metric_spec failed!\n"); + ceph_msg_put(msg); + return ERR_PTR(ret); + } + msg->front.iov_len = p - msg->front.iov_base; msg->hdr.front_len = cpu_to_le32(msg->front.iov_len); @@ -1297,8 +1365,8 @@ static int __open_session(struct ceph_mds_client *mdsc, /* send connect message */ msg = create_session_open_msg(mdsc, session->s_seq); - if (!msg) - return -ENOMEM; + if (IS_ERR(msg)) + return PTR_ERR(msg); ceph_con_send(&session->s_con, msg); return 0; } @@ -1312,6 +1380,7 @@ static struct ceph_mds_session * __open_export_target_session(struct ceph_mds_client *mdsc, int target) { struct ceph_mds_session *session; + int ret; session = __ceph_lookup_mds_session(mdsc, target); if (!session) { @@ -1320,8 +1389,11 @@ __open_export_target_session(struct ceph_mds_client *mdsc, int target) return session; } if (session->s_state == CEPH_MDS_SESSION_NEW || - session->s_state == CEPH_MDS_SESSION_CLOSING) - __open_session(mdsc, session); + session->s_state == CEPH_MDS_SESSION_CLOSING) { + ret = __open_session(mdsc, session); + if (ret) + return ERR_PTR(ret); + } return session; } @@ -1485,6 +1557,7 @@ int ceph_iterate_session_caps(struct ceph_mds_session *session, cap->session = NULL; list_del_init(&cap->session_caps); session->s_nr_caps--; + atomic64_dec(&session->s_mdsc->metric.total_caps); if (cap->queue_release) __ceph_queue_cap_release(session, cap); else @@ -1785,8 +1858,7 @@ static void renewed_caps(struct ceph_mds_client *mdsc, /* * send a session close request */ -static int request_close_session(struct ceph_mds_client *mdsc, - struct ceph_mds_session *session) +static int request_close_session(struct ceph_mds_session *session) { struct ceph_msg *msg; @@ -1809,7 +1881,7 @@ static int __close_session(struct ceph_mds_client *mdsc, if (session->s_state >= CEPH_MDS_SESSION_CLOSING) return 0; session->s_state = CEPH_MDS_SESSION_CLOSING; - return request_close_session(mdsc, session); + return request_close_session(session); } static bool drop_negative_children(struct dentry *dentry) @@ -2520,7 +2592,12 @@ static struct ceph_msg *create_request_message(struct ceph_mds_client *mdsc, ceph_encode_copy(&p, &ts, sizeof(ts)); } - BUG_ON(p > end); + if (WARN_ON_ONCE(p > end)) { + ceph_msg_put(msg); + msg = ERR_PTR(-ERANGE); + goto out_free2; + } + msg->front.iov_len = p - msg->front.iov_base; msg->hdr.front_len = cpu_to_le32(msg->front.iov_len); @@ -2756,7 +2833,9 @@ static void __do_request(struct ceph_mds_client *mdsc, } if (session->s_state == CEPH_MDS_SESSION_NEW || session->s_state == CEPH_MDS_SESSION_CLOSING) { - __open_session(mdsc, session); + err = __open_session(mdsc, session); + if (err) + goto out_session; /* retry the same mds later */ if (random) req->r_resend_mds = mds; @@ -3279,8 +3358,10 @@ static void handle_session(struct ceph_mds_session *session, goto bad; /* version >= 3, feature bits */ ceph_decode_32_safe(&p, end, len, bad); - ceph_decode_64_safe(&p, end, features, bad); - p += len - sizeof(features); + if (len) { + ceph_decode_64_safe(&p, end, features, bad); + p += len - sizeof(features); + } } mutex_lock(&mdsc->mutex); @@ -3310,6 +3391,8 @@ static void handle_session(struct ceph_mds_session *session, session->s_state = CEPH_MDS_SESSION_OPEN; session->s_features = features; renewed_caps(mdsc, session, 0); + if (test_bit(CEPHFS_FEATURE_METRIC_COLLECT, &session->s_features)) + metric_schedule_delayed(&mdsc->metric); wake = 1; if (mdsc->stopping) __close_session(mdsc, session); @@ -4263,6 +4346,30 @@ static void maybe_recover_session(struct ceph_mds_client *mdsc) ceph_force_reconnect(fsc->sb); } +bool check_session_state(struct ceph_mds_session *s) +{ + if (s->s_state == CEPH_MDS_SESSION_CLOSING) { + dout("resending session close request for mds%d\n", + s->s_mds); + request_close_session(s); + return false; + } + if (s->s_ttl && time_after(jiffies, s->s_ttl)) { + if (s->s_state == CEPH_MDS_SESSION_OPEN) { + s->s_state = CEPH_MDS_SESSION_HUNG; + pr_info("mds%d hung\n", s->s_mds); + } + } + if (s->s_state == CEPH_MDS_SESSION_NEW || + s->s_state == CEPH_MDS_SESSION_RESTARTING || + s->s_state == CEPH_MDS_SESSION_CLOSED || + s->s_state == CEPH_MDS_SESSION_REJECTED) + /* this mds is failed or recovering, just wait */ + return false; + + return true; +} + /* * delayed work -- periodically trim expired leases, renew caps with mds */ @@ -4283,6 +4390,9 @@ static void delayed_work(struct work_struct *work) dout("mdsc delayed_work\n"); + if (mdsc->stopping) + return; + mutex_lock(&mdsc->mutex); renew_interval = mdsc->mdsmap->m_session_timeout >> 2; renew_caps = time_after_eq(jiffies, HZ*renew_interval + @@ -4294,23 +4404,8 @@ static void delayed_work(struct work_struct *work) struct ceph_mds_session *s = __ceph_lookup_mds_session(mdsc, i); if (!s) continue; - if (s->s_state == CEPH_MDS_SESSION_CLOSING) { - dout("resending session close request for mds%d\n", - s->s_mds); - request_close_session(mdsc, s); - ceph_put_mds_session(s); - continue; - } - if (s->s_ttl && time_after(jiffies, s->s_ttl)) { - if (s->s_state == CEPH_MDS_SESSION_OPEN) { - s->s_state = CEPH_MDS_SESSION_HUNG; - pr_info("mds%d hung\n", s->s_mds); - } - } - if (s->s_state == CEPH_MDS_SESSION_NEW || - s->s_state == CEPH_MDS_SESSION_RESTARTING || - s->s_state == CEPH_MDS_SESSION_REJECTED) { - /* this mds is failed or recovering, just wait */ + + if (!check_session_state(s)) { ceph_put_mds_session(s); continue; } @@ -4359,7 +4454,6 @@ int ceph_mdsc_init(struct ceph_fs_client *fsc) goto err_mdsc; } - fsc->mdsc = mdsc; init_completion(&mdsc->safe_umount_waiters); init_waitqueue_head(&mdsc->session_close_wq); INIT_LIST_HEAD(&mdsc->waiting_for_map); @@ -4414,6 +4508,8 @@ int ceph_mdsc_init(struct ceph_fs_client *fsc) strscpy(mdsc->nodename, utsname()->nodename, sizeof(mdsc->nodename)); + + fsc->mdsc = mdsc; return 0; err_mdsmap: @@ -4657,7 +4753,16 @@ void ceph_mdsc_force_umount(struct ceph_mds_client *mdsc) static void ceph_mdsc_stop(struct ceph_mds_client *mdsc) { dout("stop\n"); - cancel_delayed_work_sync(&mdsc->delayed_work); /* cancel timer */ + /* + * Make sure the delayed work stopped before releasing + * the resources. + * + * Because the cancel_delayed_work_sync() will only + * guarantee that the work finishes executing. But the + * delayed work will re-arm itself again after that. + */ + flush_delayed_work(&mdsc->delayed_work); + if (mdsc->mdsmap) ceph_mdsmap_destroy(mdsc->mdsmap); kfree(mdsc->sessions); @@ -4680,6 +4785,7 @@ void ceph_mdsc_destroy(struct ceph_fs_client *fsc) ceph_metric_destroy(&mdsc->metric); + flush_delayed_work(&mdsc->metric.delayed_work); fsc->mdsc = NULL; kfree(mdsc); dout("mdsc_destroy %p done\n", mdsc); diff --git a/fs/ceph/mds_client.h b/fs/ceph/mds_client.h index 5e0c4073a6be..658800605bfb 100644 --- a/fs/ceph/mds_client.h +++ b/fs/ceph/mds_client.h @@ -18,6 +18,7 @@ #include <linux/ceph/auth.h> #include "metric.h" +#include "super.h" /* The first 8 bits are reserved for old ceph releases */ enum ceph_feature_type { @@ -27,8 +28,9 @@ enum ceph_feature_type { CEPHFS_FEATURE_LAZY_CAP_WANTED, CEPHFS_FEATURE_MULTI_RECONNECT, CEPHFS_FEATURE_DELEG_INO, + CEPHFS_FEATURE_METRIC_COLLECT, - CEPHFS_FEATURE_MAX = CEPHFS_FEATURE_DELEG_INO, + CEPHFS_FEATURE_MAX = CEPHFS_FEATURE_METRIC_COLLECT, }; /* @@ -42,6 +44,7 @@ enum ceph_feature_type { CEPHFS_FEATURE_LAZY_CAP_WANTED, \ CEPHFS_FEATURE_MULTI_RECONNECT, \ CEPHFS_FEATURE_DELEG_INO, \ + CEPHFS_FEATURE_METRIC_COLLECT, \ \ CEPHFS_FEATURE_MAX, \ } @@ -369,7 +372,7 @@ struct ceph_quotarealm_inode { struct cap_wait { struct list_head list; - unsigned long ino; + u64 ino; pid_t tgid; int need; int want; @@ -476,6 +479,8 @@ struct ceph_mds_client { extern const char *ceph_mds_op_name(int op); +extern bool check_session_state(struct ceph_mds_session *s); + extern struct ceph_mds_session * __ceph_lookup_mds_session(struct ceph_mds_client *, int mds); diff --git a/fs/ceph/mdsmap.c b/fs/ceph/mdsmap.c index 889627817e52..e4aba6c6d3b5 100644 --- a/fs/ceph/mdsmap.c +++ b/fs/ceph/mdsmap.c @@ -120,7 +120,7 @@ struct ceph_mdsmap *ceph_mdsmap_decode(void **p, void *end) const void *start = *p; int i, j, n; int err; - u8 mdsmap_v, mdsmap_cv; + u8 mdsmap_v; u16 mdsmap_ev; m = kzalloc(sizeof(*m), GFP_NOFS); @@ -129,7 +129,7 @@ struct ceph_mdsmap *ceph_mdsmap_decode(void **p, void *end) ceph_decode_need(p, end, 1 + 1, bad); mdsmap_v = ceph_decode_8(p); - mdsmap_cv = ceph_decode_8(p); + *p += sizeof(u8); /* mdsmap_cv */ if (mdsmap_v >= 4) { u32 mdsmap_len; ceph_decode_32_safe(p, end, mdsmap_len, bad); @@ -174,7 +174,6 @@ struct ceph_mdsmap *ceph_mdsmap_decode(void **p, void *end) u64 global_id; u32 namelen; s32 mds, inc, state; - u64 state_seq; u8 info_v; void *info_end = NULL; struct ceph_entity_addr addr; @@ -189,9 +188,8 @@ struct ceph_mdsmap *ceph_mdsmap_decode(void **p, void *end) info_v= ceph_decode_8(p); if (info_v >= 4) { u32 info_len; - u8 info_cv; ceph_decode_need(p, end, 1 + sizeof(u32), bad); - info_cv = ceph_decode_8(p); + *p += sizeof(u8); /* info_cv */ info_len = ceph_decode_32(p); info_end = *p + info_len; if (info_end > end) @@ -210,7 +208,7 @@ struct ceph_mdsmap *ceph_mdsmap_decode(void **p, void *end) mds = ceph_decode_32(p); inc = ceph_decode_32(p); state = ceph_decode_32(p); - state_seq = ceph_decode_64(p); + *p += sizeof(u64); /* state_seq */ err = ceph_decode_entity_addr(p, end, &addr); if (err) goto corrupt; diff --git a/fs/ceph/metric.c b/fs/ceph/metric.c index 9217f35bc2b9..2466b261fba2 100644 --- a/fs/ceph/metric.c +++ b/fs/ceph/metric.c @@ -1,10 +1,150 @@ /* SPDX-License-Identifier: GPL-2.0 */ +#include <linux/ceph/ceph_debug.h> #include <linux/types.h> #include <linux/percpu_counter.h> #include <linux/math64.h> #include "metric.h" +#include "mds_client.h" + +static bool ceph_mdsc_send_metrics(struct ceph_mds_client *mdsc, + struct ceph_mds_session *s) +{ + struct ceph_metric_head *head; + struct ceph_metric_cap *cap; + struct ceph_metric_read_latency *read; + struct ceph_metric_write_latency *write; + struct ceph_metric_metadata_latency *meta; + struct ceph_client_metric *m = &mdsc->metric; + u64 nr_caps = atomic64_read(&m->total_caps); + struct ceph_msg *msg; + struct timespec64 ts; + s64 sum; + s32 items = 0; + s32 len; + + len = sizeof(*head) + sizeof(*cap) + sizeof(*read) + sizeof(*write) + + sizeof(*meta); + + msg = ceph_msg_new(CEPH_MSG_CLIENT_METRICS, len, GFP_NOFS, true); + if (!msg) { + pr_err("send metrics to mds%d, failed to allocate message\n", + s->s_mds); + return false; + } + + head = msg->front.iov_base; + + /* encode the cap metric */ + cap = (struct ceph_metric_cap *)(head + 1); + cap->type = cpu_to_le32(CLIENT_METRIC_TYPE_CAP_INFO); + cap->ver = 1; + cap->compat = 1; + cap->data_len = cpu_to_le32(sizeof(*cap) - 10); + cap->hit = cpu_to_le64(percpu_counter_sum(&mdsc->metric.i_caps_hit)); + cap->mis = cpu_to_le64(percpu_counter_sum(&mdsc->metric.i_caps_mis)); + cap->total = cpu_to_le64(nr_caps); + items++; + + /* encode the read latency metric */ + read = (struct ceph_metric_read_latency *)(cap + 1); + read->type = cpu_to_le32(CLIENT_METRIC_TYPE_READ_LATENCY); + read->ver = 1; + read->compat = 1; + read->data_len = cpu_to_le32(sizeof(*read) - 10); + sum = m->read_latency_sum; + jiffies_to_timespec64(sum, &ts); + read->sec = cpu_to_le32(ts.tv_sec); + read->nsec = cpu_to_le32(ts.tv_nsec); + items++; + + /* encode the write latency metric */ + write = (struct ceph_metric_write_latency *)(read + 1); + write->type = cpu_to_le32(CLIENT_METRIC_TYPE_WRITE_LATENCY); + write->ver = 1; + write->compat = 1; + write->data_len = cpu_to_le32(sizeof(*write) - 10); + sum = m->write_latency_sum; + jiffies_to_timespec64(sum, &ts); + write->sec = cpu_to_le32(ts.tv_sec); + write->nsec = cpu_to_le32(ts.tv_nsec); + items++; + + /* encode the metadata latency metric */ + meta = (struct ceph_metric_metadata_latency *)(write + 1); + meta->type = cpu_to_le32(CLIENT_METRIC_TYPE_METADATA_LATENCY); + meta->ver = 1; + meta->compat = 1; + meta->data_len = cpu_to_le32(sizeof(*meta) - 10); + sum = m->metadata_latency_sum; + jiffies_to_timespec64(sum, &ts); + meta->sec = cpu_to_le32(ts.tv_sec); + meta->nsec = cpu_to_le32(ts.tv_nsec); + items++; + + put_unaligned_le32(items, &head->num); + msg->front.iov_len = len; + msg->hdr.version = cpu_to_le16(1); + msg->hdr.compat_version = cpu_to_le16(1); + msg->hdr.front_len = cpu_to_le32(msg->front.iov_len); + dout("client%llu send metrics to mds%d\n", + ceph_client_gid(mdsc->fsc->client), s->s_mds); + ceph_con_send(&s->s_con, msg); + + return true; +} + + +static void metric_get_session(struct ceph_mds_client *mdsc) +{ + struct ceph_mds_session *s; + int i; + + mutex_lock(&mdsc->mutex); + for (i = 0; i < mdsc->max_sessions; i++) { + s = __ceph_lookup_mds_session(mdsc, i); + if (!s) + continue; + + /* + * Skip it if MDS doesn't support the metric collection, + * or the MDS will close the session's socket connection + * directly when it get this message. + */ + if (check_session_state(s) && + test_bit(CEPHFS_FEATURE_METRIC_COLLECT, &s->s_features)) { + mdsc->metric.session = s; + break; + } + + ceph_put_mds_session(s); + } + mutex_unlock(&mdsc->mutex); +} + +static void metric_delayed_work(struct work_struct *work) +{ + struct ceph_client_metric *m = + container_of(work, struct ceph_client_metric, delayed_work.work); + struct ceph_mds_client *mdsc = + container_of(m, struct ceph_mds_client, metric); + + if (mdsc->stopping) + return; + + if (!m->session || !check_session_state(m->session)) { + if (m->session) { + ceph_put_mds_session(m->session); + m->session = NULL; + } + metric_get_session(mdsc); + } + if (m->session) { + ceph_mdsc_send_metrics(mdsc, m->session); + metric_schedule_delayed(m); + } +} int ceph_metric_init(struct ceph_client_metric *m) { @@ -22,6 +162,7 @@ int ceph_metric_init(struct ceph_client_metric *m) if (ret) goto err_d_lease_mis; + atomic64_set(&m->total_caps, 0); ret = percpu_counter_init(&m->i_caps_hit, 0, GFP_KERNEL); if (ret) goto err_i_caps_hit; @@ -51,6 +192,9 @@ int ceph_metric_init(struct ceph_client_metric *m) m->total_metadatas = 0; m->metadata_latency_sum = 0; + m->session = NULL; + INIT_DELAYED_WORK(&m->delayed_work, metric_delayed_work); + return 0; err_i_caps_mis: @@ -72,6 +216,11 @@ void ceph_metric_destroy(struct ceph_client_metric *m) percpu_counter_destroy(&m->i_caps_hit); percpu_counter_destroy(&m->d_lease_mis); percpu_counter_destroy(&m->d_lease_hit); + + cancel_delayed_work_sync(&m->delayed_work); + + if (m->session) + ceph_put_mds_session(m->session); } static inline void __update_latency(ktime_t *totalp, ktime_t *lsump, diff --git a/fs/ceph/metric.h b/fs/ceph/metric.h index ccd81285a450..1d0959d669d7 100644 --- a/fs/ceph/metric.h +++ b/fs/ceph/metric.h @@ -6,12 +6,91 @@ #include <linux/percpu_counter.h> #include <linux/ktime.h> +extern bool disable_send_metrics; + +enum ceph_metric_type { + CLIENT_METRIC_TYPE_CAP_INFO, + CLIENT_METRIC_TYPE_READ_LATENCY, + CLIENT_METRIC_TYPE_WRITE_LATENCY, + CLIENT_METRIC_TYPE_METADATA_LATENCY, + CLIENT_METRIC_TYPE_DENTRY_LEASE, + + CLIENT_METRIC_TYPE_MAX = CLIENT_METRIC_TYPE_DENTRY_LEASE, +}; + +/* + * This will always have the highest metric bit value + * as the last element of the array. + */ +#define CEPHFS_METRIC_SPEC_CLIENT_SUPPORTED { \ + CLIENT_METRIC_TYPE_CAP_INFO, \ + CLIENT_METRIC_TYPE_READ_LATENCY, \ + CLIENT_METRIC_TYPE_WRITE_LATENCY, \ + CLIENT_METRIC_TYPE_METADATA_LATENCY, \ + \ + CLIENT_METRIC_TYPE_MAX, \ +} + +/* metric caps header */ +struct ceph_metric_cap { + __le32 type; /* ceph metric type */ + + __u8 ver; + __u8 compat; + + __le32 data_len; /* length of sizeof(hit + mis + total) */ + __le64 hit; + __le64 mis; + __le64 total; +} __packed; + +/* metric read latency header */ +struct ceph_metric_read_latency { + __le32 type; /* ceph metric type */ + + __u8 ver; + __u8 compat; + + __le32 data_len; /* length of sizeof(sec + nsec) */ + __le32 sec; + __le32 nsec; +} __packed; + +/* metric write latency header */ +struct ceph_metric_write_latency { + __le32 type; /* ceph metric type */ + + __u8 ver; + __u8 compat; + + __le32 data_len; /* length of sizeof(sec + nsec) */ + __le32 sec; + __le32 nsec; +} __packed; + +/* metric metadata latency header */ +struct ceph_metric_metadata_latency { + __le32 type; /* ceph metric type */ + + __u8 ver; + __u8 compat; + + __le32 data_len; /* length of sizeof(sec + nsec) */ + __le32 sec; + __le32 nsec; +} __packed; + +struct ceph_metric_head { + __le32 num; /* the number of metrics that will be sent */ +} __packed; + /* This is the global metrics */ struct ceph_client_metric { atomic64_t total_dentries; struct percpu_counter d_lease_hit; struct percpu_counter d_lease_mis; + atomic64_t total_caps; struct percpu_counter i_caps_hit; struct percpu_counter i_caps_mis; @@ -35,8 +114,20 @@ struct ceph_client_metric { ktime_t metadata_latency_sq_sum; ktime_t metadata_latency_min; ktime_t metadata_latency_max; + + struct ceph_mds_session *session; + struct delayed_work delayed_work; /* delayed work */ }; +static inline void metric_schedule_delayed(struct ceph_client_metric *m) +{ + if (disable_send_metrics) + return; + + /* per second */ + schedule_delayed_work(&m->delayed_work, round_jiffies_relative(HZ)); +} + extern int ceph_metric_init(struct ceph_client_metric *m); extern void ceph_metric_destroy(struct ceph_client_metric *m); diff --git a/fs/ceph/quota.c b/fs/ceph/quota.c index 198ddde5c1e6..cc2c4d40b022 100644 --- a/fs/ceph/quota.c +++ b/fs/ceph/quota.c @@ -23,12 +23,12 @@ static inline bool ceph_has_realms_with_quotas(struct inode *inode) { struct ceph_mds_client *mdsc = ceph_inode_to_client(inode)->mdsc; struct super_block *sb = mdsc->fsc->sb; + struct inode *root = d_inode(sb->s_root); if (atomic64_read(&mdsc->quotarealms_count) > 0) return true; /* if root is the real CephFS root, we don't have quota realms */ - if (sb->s_root->d_inode && - (sb->s_root->d_inode->i_ino == CEPH_INO_ROOT)) + if (root && ceph_ino(root) == CEPH_INO_ROOT) return false; /* otherwise, we can't know for sure */ return true; diff --git a/fs/ceph/super.c b/fs/ceph/super.c index c9784eb1159a..7ec0e6d03d10 100644 --- a/fs/ceph/super.c +++ b/fs/ceph/super.c @@ -27,6 +27,9 @@ #include <linux/ceph/auth.h> #include <linux/ceph/debugfs.h> +static DEFINE_SPINLOCK(ceph_fsc_lock); +static LIST_HEAD(ceph_fsc_list); + /* * Ceph superblock operations * @@ -634,8 +637,6 @@ static struct ceph_fs_client *create_fs_client(struct ceph_mount_options *fsopt, struct ceph_options *opt) { struct ceph_fs_client *fsc; - int page_count; - size_t size; int err; fsc = kzalloc(sizeof(*fsc), GFP_KERNEL); @@ -683,18 +684,12 @@ static struct ceph_fs_client *create_fs_client(struct ceph_mount_options *fsopt, if (!fsc->cap_wq) goto fail_inode_wq; - /* set up mempools */ - err = -ENOMEM; - page_count = fsc->mount_options->wsize >> PAGE_SHIFT; - size = sizeof (struct page *) * (page_count ? page_count : 1); - fsc->wb_pagevec_pool = mempool_create_kmalloc_pool(10, size); - if (!fsc->wb_pagevec_pool) - goto fail_cap_wq; + spin_lock(&ceph_fsc_lock); + list_add_tail(&fsc->metric_wakeup, &ceph_fsc_list); + spin_unlock(&ceph_fsc_lock); return fsc; -fail_cap_wq: - destroy_workqueue(fsc->cap_wq); fail_inode_wq: destroy_workqueue(fsc->inode_wq); fail_client: @@ -717,12 +712,14 @@ static void destroy_fs_client(struct ceph_fs_client *fsc) { dout("destroy_fs_client %p\n", fsc); + spin_lock(&ceph_fsc_lock); + list_del(&fsc->metric_wakeup); + spin_unlock(&ceph_fsc_lock); + ceph_mdsc_destroy(fsc); destroy_workqueue(fsc->inode_wq); destroy_workqueue(fsc->cap_wq); - mempool_destroy(fsc->wb_pagevec_pool); - destroy_mount_options(fsc->mount_options); ceph_destroy_client(fsc->client); @@ -741,6 +738,7 @@ struct kmem_cache *ceph_dentry_cachep; struct kmem_cache *ceph_file_cachep; struct kmem_cache *ceph_dir_file_cachep; struct kmem_cache *ceph_mds_request_cachep; +mempool_t *ceph_wb_pagevec_pool; static void ceph_inode_init_once(void *foo) { @@ -785,6 +783,10 @@ static int __init init_caches(void) if (!ceph_mds_request_cachep) goto bad_mds_req; + ceph_wb_pagevec_pool = mempool_create_kmalloc_pool(10, CEPH_MAX_WRITE_SIZE >> PAGE_SHIFT); + if (!ceph_wb_pagevec_pool) + goto bad_pagevec_pool; + error = ceph_fscache_register(); if (error) goto bad_fscache; @@ -793,6 +795,8 @@ static int __init init_caches(void) bad_fscache: kmem_cache_destroy(ceph_mds_request_cachep); +bad_pagevec_pool: + mempool_destroy(ceph_wb_pagevec_pool); bad_mds_req: kmem_cache_destroy(ceph_dir_file_cachep); bad_dir_file: @@ -823,12 +827,13 @@ static void destroy_caches(void) kmem_cache_destroy(ceph_file_cachep); kmem_cache_destroy(ceph_dir_file_cachep); kmem_cache_destroy(ceph_mds_request_cachep); + mempool_destroy(ceph_wb_pagevec_pool); ceph_fscache_unregister(); } /* - * ceph_umount_begin - initiate forced umount. Tear down down the + * ceph_umount_begin - initiate forced umount. Tear down the * mount, skipping steps that may hang while waiting for server(s). */ static void ceph_umount_begin(struct super_block *sb) @@ -1282,6 +1287,37 @@ static void __exit exit_ceph(void) destroy_caches(); } +static int param_set_metrics(const char *val, const struct kernel_param *kp) +{ + struct ceph_fs_client *fsc; + int ret; + + ret = param_set_bool(val, kp); + if (ret) { + pr_err("Failed to parse sending metrics switch value '%s'\n", + val); + return ret; + } else if (!disable_send_metrics) { + // wake up all the mds clients + spin_lock(&ceph_fsc_lock); + list_for_each_entry(fsc, &ceph_fsc_list, metric_wakeup) { + metric_schedule_delayed(&fsc->mdsc->metric); + } + spin_unlock(&ceph_fsc_lock); + } + + return 0; +} + +static const struct kernel_param_ops param_ops_metrics = { + .set = param_set_metrics, + .get = param_get_bool, +}; + +bool disable_send_metrics = false; +module_param_cb(disable_send_metrics, ¶m_ops_metrics, &disable_send_metrics, 0644); +MODULE_PARM_DESC(disable_send_metrics, "Enable sending perf metrics to ceph cluster (default: on)"); + module_init(init_ceph); module_exit(exit_ceph); diff --git a/fs/ceph/super.h b/fs/ceph/super.h index 5a6cdd39bc10..a3995ebe0623 100644 --- a/fs/ceph/super.h +++ b/fs/ceph/super.h @@ -101,6 +101,8 @@ struct ceph_mount_options { struct ceph_fs_client { struct super_block *sb; + struct list_head metric_wakeup; + struct ceph_mount_options *mount_options; struct ceph_client *client; @@ -116,8 +118,6 @@ struct ceph_fs_client { struct ceph_mds_client *mdsc; - /* writeback */ - mempool_t *wb_pagevec_pool; atomic_long_t writeback_count; struct workqueue_struct *inode_wq; @@ -353,7 +353,7 @@ struct ceph_inode_info { unsigned i_dirty_caps, i_flushing_caps; /* mask of dirtied fields */ /* - * Link to the the auth cap's session's s_cap_dirty list. s_cap_dirty + * Link to the auth cap's session's s_cap_dirty list. s_cap_dirty * is protected by the mdsc->cap_dirty_lock, but each individual item * is also protected by the inode's i_ceph_lock. Walking s_cap_dirty * requires the mdsc->cap_dirty_lock. List presence for an item can @@ -457,15 +457,7 @@ ceph_vino(const struct inode *inode) return ceph_inode(inode)->i_vino; } -/* - * ino_t is <64 bits on many architectures, blech. - * - * i_ino (kernel inode) st_ino (userspace) - * i386 32 32 - * x86_64+ino32 64 32 - * x86_64 64 64 - */ -static inline u32 ceph_ino_to_ino32(__u64 vino) +static inline u32 ceph_ino_to_ino32(u64 vino) { u32 ino = vino & 0xffffffff; ino ^= vino >> 32; @@ -475,35 +467,18 @@ static inline u32 ceph_ino_to_ino32(__u64 vino) } /* - * kernel i_ino value + * Inode numbers in cephfs are 64 bits, but inode->i_ino is 32-bits on + * some arches. We generally do not use this value inside the ceph driver, but + * we do want to set it to something, so that generic vfs code has an + * appropriate value for tracepoints and the like. */ -static inline ino_t ceph_vino_to_ino(struct ceph_vino vino) +static inline ino_t ceph_vino_to_ino_t(struct ceph_vino vino) { -#if BITS_PER_LONG == 32 - return ceph_ino_to_ino32(vino.ino); -#else + if (sizeof(ino_t) == sizeof(u32)) + return ceph_ino_to_ino32(vino.ino); return (ino_t)vino.ino; -#endif } -/* - * user-visible ino (stat, filldir) - */ -#if BITS_PER_LONG == 32 -static inline ino_t ceph_translate_ino(struct super_block *sb, ino_t ino) -{ - return ino; -} -#else -static inline ino_t ceph_translate_ino(struct super_block *sb, ino_t ino) -{ - if (ceph_test_mount_opt(ceph_sb_to_client(sb), INO32)) - ino = ceph_ino_to_ino32(ino); - return ino; -} -#endif - - /* for printf-style formatting */ #define ceph_vinop(i) ceph_inode(i)->i_vino.ino, ceph_inode(i)->i_vino.snap @@ -511,11 +486,34 @@ static inline u64 ceph_ino(struct inode *inode) { return ceph_inode(inode)->i_vino.ino; } + static inline u64 ceph_snap(struct inode *inode) { return ceph_inode(inode)->i_vino.snap; } +/** + * ceph_present_ino - format an inode number for presentation to userland + * @sb: superblock where the inode lives + * @ino: inode number to (possibly) convert + * + * If the user mounted with the ino32 option, then the 64-bit value needs + * to be converted to something that can fit inside 32 bits. Note that + * internal kernel code never uses this value, so this is entirely for + * userland consumption. + */ +static inline u64 ceph_present_ino(struct super_block *sb, u64 ino) +{ + if (unlikely(ceph_test_mount_opt(ceph_sb_to_client(sb), INO32))) + return ceph_ino_to_ino32(ino); + return ino; +} + +static inline u64 ceph_present_inode(struct inode *inode) +{ + return ceph_present_ino(inode->i_sb, ceph_ino(inode)); +} + static inline int ceph_ino_compare(struct inode *inode, void *data) { struct ceph_vino *pvino = (struct ceph_vino *)data; @@ -524,11 +522,16 @@ static inline int ceph_ino_compare(struct inode *inode, void *data) ci->i_vino.snap == pvino->snap; } + static inline struct inode *ceph_find_inode(struct super_block *sb, struct ceph_vino vino) { - ino_t t = ceph_vino_to_ino(vino); - return ilookup5(sb, t, ceph_ino_compare, &vino); + /* + * NB: The hashval will be run through the fs/inode.c hash function + * anyway, so there is no need to squash the inode number down to + * 32-bits first. Just use low-order bits on arches with 32-bit long. + */ + return ilookup5(sb, (unsigned long)vino.ino, ceph_ino_compare, &vino); } diff --git a/fs/ceph/xattr.c b/fs/ceph/xattr.c index 71ee34d160c3..3a733ac33d9b 100644 --- a/fs/ceph/xattr.c +++ b/fs/ceph/xattr.c @@ -497,10 +497,10 @@ static int __set_xattr(struct ceph_inode_info *ci, kfree(*newxattr); *newxattr = NULL; if (xattr->should_free_val) - kfree((void *)xattr->val); + kfree(xattr->val); if (update_xattr) { - kfree((void *)name); + kfree(name); name = xattr->name; } ci->i_xattrs.names_size -= xattr->name_len; @@ -566,9 +566,9 @@ static void __free_xattr(struct ceph_inode_xattr *xattr) BUG_ON(!xattr); if (xattr->should_free_name) - kfree((void *)xattr->name); + kfree(xattr->name); if (xattr->should_free_val) - kfree((void *)xattr->val); + kfree(xattr->val); kfree(xattr); } @@ -582,9 +582,9 @@ static int __remove_xattr(struct ceph_inode_info *ci, rb_erase(&xattr->node, &ci->i_xattrs.index); if (xattr->should_free_name) - kfree((void *)xattr->name); + kfree(xattr->name); if (xattr->should_free_val) - kfree((void *)xattr->val); + kfree(xattr->val); ci->i_xattrs.names_size -= xattr->name_len; ci->i_xattrs.vals_size -= xattr->val_len; diff --git a/fs/cifs/cifsacl.c b/fs/cifs/cifsacl.c index 6025d7fc7bbf..fcff14ef1c70 100644 --- a/fs/cifs/cifsacl.c +++ b/fs/cifs/cifsacl.c @@ -49,7 +49,7 @@ static const struct cifs_sid sid_unix_groups = { 1, 1, {0, 0, 0, 0, 0, 22}, {cpu_to_le32(2), 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} }; /* - * See http://technet.microsoft.com/en-us/library/hh509017(v=ws.10).aspx + * See https://technet.microsoft.com/en-us/library/hh509017(v=ws.10).aspx */ /* S-1-5-88 MS NFS and Apple style UID/GID/mode */ @@ -825,7 +825,7 @@ unsigned int setup_authusers_ACE(struct cifs_ace *pntace) /* * Fill in the special SID based on the mode. See - * http://technet.microsoft.com/en-us/library/hh509017(v=ws.10).aspx + * https://technet.microsoft.com/en-us/library/hh509017(v=ws.10).aspx */ unsigned int setup_special_mode_ACE(struct cifs_ace *pntace, __u64 nmode) { diff --git a/fs/cifs/cifsacl.h b/fs/cifs/cifsacl.h index 17562ea00e18..45665ff87b64 100644 --- a/fs/cifs/cifsacl.h +++ b/fs/cifs/cifsacl.h @@ -132,7 +132,7 @@ struct cifs_ace { /* * The current SMB3 form of security descriptor is similar to what was used for * cifs (see above) but some fields are split, and fields in the struct below - * matches names of fields to the the spec, MS-DTYP (see sections 2.4.5 and + * matches names of fields to the spec, MS-DTYP (see sections 2.4.5 and * 2.4.6). Note that "CamelCase" fields are used in this struct in order to * match the MS-DTYP and MS-SMB2 specs which define the wire format. */ @@ -178,7 +178,7 @@ struct smb3_acl { /* * Used to store the special 'NFS SIDs' used to persist the POSIX uid and gid - * See See http://technet.microsoft.com/en-us/library/hh509017(v=ws.10).aspx + * See http://technet.microsoft.com/en-us/library/hh509017(v=ws.10).aspx */ struct owner_sid { u8 Revision; diff --git a/fs/cifs/cifsencrypt.c b/fs/cifs/cifsencrypt.c index 874a551f339c..9daa256f69d4 100644 --- a/fs/cifs/cifsencrypt.c +++ b/fs/cifs/cifsencrypt.c @@ -797,7 +797,7 @@ calc_seckey(struct cifs_ses *ses) ses->auth_key.len = CIFS_SESS_KEY_SIZE; memzero_explicit(sec_key, CIFS_SESS_KEY_SIZE); - kzfree(ctx_arc4); + kfree_sensitive(ctx_arc4); return 0; } diff --git a/fs/cifs/cifsglob.h b/fs/cifs/cifsglob.h index fad37d61910a..b565d83ba89e 100644 --- a/fs/cifs/cifsglob.h +++ b/fs/cifs/cifsglob.h @@ -928,7 +928,7 @@ compare_mid(__u16 mid, const struct smb_hdr *smb) * * Citation: * - * http://blogs.msdn.com/b/openspecification/archive/2009/04/10/smb-maximum-transmit-buffer-size-and-performance-tuning.aspx + * https://blogs.msdn.com/b/openspecification/archive/2009/04/10/smb-maximum-transmit-buffer-size-and-performance-tuning.aspx */ #define CIFS_DEFAULT_NON_POSIX_RSIZE (60 * 1024) #define CIFS_DEFAULT_NON_POSIX_WSIZE (65536) @@ -1466,7 +1466,7 @@ struct cifsInodeInfo { struct list_head llist; /* locks helb by this inode */ /* * NOTE: Some code paths call down_read(lock_sem) twice, so - * we must always use use cifs_down_write() instead of down_write() + * we must always use cifs_down_write() instead of down_write() * for this semaphore to avoid deadlocks. */ struct rw_semaphore lock_sem; /* protect the fields above */ @@ -2031,4 +2031,19 @@ static inline bool is_smb1_server(struct TCP_Server_Info *server) return strcmp(server->vals->version_string, SMB1_VERSION_STRING) == 0; } +static inline bool is_tcon_dfs(struct cifs_tcon *tcon) +{ + /* + * For SMB1, see MS-CIFS 2.4.55 SMB_COM_TREE_CONNECT_ANDX (0x75) and MS-CIFS 3.3.4.4 DFS + * Subsystem Notifies That a Share Is a DFS Share. + * + * For SMB2+, see MS-SMB2 2.2.10 SMB2 TREE_CONNECT Response and MS-SMB2 3.3.4.14 Server + * Application Updates a Share. + */ + if (!tcon || !tcon->ses || !tcon->ses->server) + return false; + return is_smb1_server(tcon->ses->server) ? tcon->Flags & SMB_SHARE_IS_IN_DFS : + tcon->share_flags & (SHI1005_FLAGS_DFS | SHI1005_FLAGS_DFS_ROOT); +} + #endif /* _CIFS_GLOB_H */ diff --git a/fs/cifs/cifsproto.h b/fs/cifs/cifsproto.h index 7a836ec0438e..bb68cbf81074 100644 --- a/fs/cifs/cifsproto.h +++ b/fs/cifs/cifsproto.h @@ -154,6 +154,7 @@ extern int decode_negTokenInit(unsigned char *security_blob, int length, extern int cifs_convert_address(struct sockaddr *dst, const char *src, int len); extern void cifs_set_port(struct sockaddr *addr, const unsigned short int port); extern int map_smb_to_linux_error(char *buf, bool logErr); +extern int map_and_check_smb_error(struct mid_q_entry *mid, bool logErr); extern void header_assemble(struct smb_hdr *, char /* command */ , const struct cifs_tcon *, int /* length of fixed section (word count) in two byte units */); @@ -271,6 +272,9 @@ extern void cifs_move_llist(struct list_head *source, struct list_head *dest); extern void cifs_free_llist(struct list_head *llist); extern void cifs_del_lock_waiters(struct cifsLockInfo *lock); +extern int cifs_tree_connect(const unsigned int xid, struct cifs_tcon *tcon, + const struct nls_table *nlsc); + extern int cifs_negotiate_protocol(const unsigned int xid, struct cifs_ses *ses); extern int cifs_setup_session(const unsigned int xid, struct cifs_ses *ses, @@ -344,7 +348,7 @@ extern int CIFSSMBQFSPosixInfo(const unsigned int xid, struct cifs_tcon *tcon, extern int CIFSSMBSetPathInfo(const unsigned int xid, struct cifs_tcon *tcon, const char *fileName, const FILE_BASIC_INFO *data, const struct nls_table *nls_codepage, - int remap_special_chars); + struct cifs_sb_info *cifs_sb); extern int CIFSSMBSetFileInfo(const unsigned int xid, struct cifs_tcon *tcon, const FILE_BASIC_INFO *data, __u16 fid, __u32 pid_of_opener); @@ -613,8 +617,7 @@ int smb2_parse_query_directory(struct cifs_tcon *tcon, struct kvec *rsp_iov, struct super_block *cifs_get_tcp_super(struct TCP_Server_Info *server); void cifs_put_tcp_super(struct super_block *sb); -int update_super_prepath(struct cifs_tcon *tcon, const char *prefix, - size_t prefix_len); +int update_super_prepath(struct cifs_tcon *tcon, char *prefix); #ifdef CONFIG_CIFS_DFS_UPCALL static inline int get_dfs_path(const unsigned int xid, struct cifs_ses *ses, diff --git a/fs/cifs/cifssmb.c b/fs/cifs/cifssmb.c index bf41ee048396..0496934feecb 100644 --- a/fs/cifs/cifssmb.c +++ b/fs/cifs/cifssmb.c @@ -124,116 +124,6 @@ cifs_mark_open_files_invalid(struct cifs_tcon *tcon) */ } -#ifdef CONFIG_CIFS_DFS_UPCALL -static int __cifs_reconnect_tcon(const struct nls_table *nlsc, - struct cifs_tcon *tcon) -{ - int rc; - struct TCP_Server_Info *server = tcon->ses->server; - struct dfs_cache_tgt_list tl; - struct dfs_cache_tgt_iterator *it = NULL; - char *tree; - const char *tcp_host; - size_t tcp_host_len; - const char *dfs_host; - size_t dfs_host_len; - - tree = kzalloc(MAX_TREE_SIZE, GFP_KERNEL); - if (!tree) - return -ENOMEM; - - if (!tcon->dfs_path) { - if (tcon->ipc) { - scnprintf(tree, MAX_TREE_SIZE, "\\\\%s\\IPC$", - server->hostname); - rc = CIFSTCon(0, tcon->ses, tree, tcon, nlsc); - } else { - rc = CIFSTCon(0, tcon->ses, tcon->treeName, tcon, nlsc); - } - goto out; - } - - rc = dfs_cache_noreq_find(tcon->dfs_path + 1, NULL, &tl); - if (rc) - goto out; - - extract_unc_hostname(server->hostname, &tcp_host, &tcp_host_len); - - for (it = dfs_cache_get_tgt_iterator(&tl); it; - it = dfs_cache_get_next_tgt(&tl, it)) { - const char *share, *prefix; - size_t share_len, prefix_len; - bool target_match; - - rc = dfs_cache_get_tgt_share(it, &share, &share_len, &prefix, - &prefix_len); - if (rc) { - cifs_dbg(VFS, "%s: failed to parse target share %d\n", - __func__, rc); - continue; - } - - extract_unc_hostname(share, &dfs_host, &dfs_host_len); - - if (dfs_host_len != tcp_host_len - || strncasecmp(dfs_host, tcp_host, dfs_host_len) != 0) { - cifs_dbg(FYI, "%s: %.*s doesn't match %.*s\n", - __func__, - (int)dfs_host_len, dfs_host, - (int)tcp_host_len, tcp_host); - - rc = match_target_ip(server, dfs_host, dfs_host_len, - &target_match); - if (rc) { - cifs_dbg(VFS, "%s: failed to match target ip: %d\n", - __func__, rc); - break; - } - - if (!target_match) { - cifs_dbg(FYI, "%s: skipping target\n", __func__); - continue; - } - } - - if (tcon->ipc) { - scnprintf(tree, MAX_TREE_SIZE, "\\\\%.*s\\IPC$", - (int)share_len, share); - rc = CIFSTCon(0, tcon->ses, tree, tcon, nlsc); - } else { - scnprintf(tree, MAX_TREE_SIZE, "\\%.*s", (int)share_len, - share); - rc = CIFSTCon(0, tcon->ses, tree, tcon, nlsc); - if (!rc) { - rc = update_super_prepath(tcon, prefix, - prefix_len); - break; - } - } - if (rc == -EREMOTE) - break; - } - - if (!rc) { - if (it) - rc = dfs_cache_noreq_update_tgthint(tcon->dfs_path + 1, - it); - else - rc = -ENOENT; - } - dfs_cache_free_tgts(&tl); -out: - kfree(tree); - return rc; -} -#else -static inline int __cifs_reconnect_tcon(const struct nls_table *nlsc, - struct cifs_tcon *tcon) -{ - return CIFSTCon(0, tcon->ses, tcon->treeName, tcon, nlsc); -} -#endif - /* reconnect the socket, tcon, and smb session if needed */ static int cifs_reconnect_tcon(struct cifs_tcon *tcon, int smb_command) @@ -338,7 +228,7 @@ cifs_reconnect_tcon(struct cifs_tcon *tcon, int smb_command) } cifs_mark_open_files_invalid(tcon); - rc = __cifs_reconnect_tcon(nls_codepage, tcon); + rc = cifs_tree_connect(0, tcon, nls_codepage); mutex_unlock(&ses->session_mutex); cifs_dbg(FYI, "reconnect tcon rc = %d\n", rc); @@ -691,7 +581,7 @@ should_set_ext_sec_flag(enum securityEnum sectype) if (global_secflags & (CIFSSEC_MAY_KRB5 | CIFSSEC_MAY_NTLMSSP)) return true; - /* Fallthrough */ + fallthrough; default: return false; } @@ -5913,10 +5803,42 @@ CIFSSMBSetFileDisposition(const unsigned int xid, struct cifs_tcon *tcon, return rc; } +static int +CIFSSMBSetPathInfoFB(const unsigned int xid, struct cifs_tcon *tcon, + const char *fileName, const FILE_BASIC_INFO *data, + const struct nls_table *nls_codepage, + struct cifs_sb_info *cifs_sb) +{ + int oplock = 0; + struct cifs_open_parms oparms; + struct cifs_fid fid; + int rc; + + oparms.tcon = tcon; + oparms.cifs_sb = cifs_sb; + oparms.desired_access = GENERIC_WRITE; + oparms.create_options = cifs_create_options(cifs_sb, 0); + oparms.disposition = FILE_OPEN; + oparms.path = fileName; + oparms.fid = &fid; + oparms.reconnect = false; + + rc = CIFS_open(xid, &oparms, &oplock, NULL); + if (rc) + goto out; + + rc = CIFSSMBSetFileInfo(xid, tcon, data, fid.netfid, current->tgid); + CIFSSMBClose(xid, tcon, fid.netfid); +out: + + return rc; +} + int CIFSSMBSetPathInfo(const unsigned int xid, struct cifs_tcon *tcon, const char *fileName, const FILE_BASIC_INFO *data, - const struct nls_table *nls_codepage, int remap) + const struct nls_table *nls_codepage, + struct cifs_sb_info *cifs_sb) { TRANSACTION2_SPI_REQ *pSMB = NULL; TRANSACTION2_SPI_RSP *pSMBr = NULL; @@ -5925,6 +5847,7 @@ CIFSSMBSetPathInfo(const unsigned int xid, struct cifs_tcon *tcon, int bytes_returned = 0; char *data_offset; __u16 params, param_offset, offset, byte_count, count; + int remap = cifs_remap(cifs_sb); cifs_dbg(FYI, "In SetTimes\n"); @@ -5987,6 +5910,10 @@ SetTimesRetry: if (rc == -EAGAIN) goto SetTimesRetry; + if (rc == -EOPNOTSUPP) + return CIFSSMBSetPathInfoFB(xid, tcon, fileName, data, + nls_codepage, cifs_sb); + return rc; } diff --git a/fs/cifs/connect.c b/fs/cifs/connect.c index a61abde09ffe..a5731dd6e656 100644 --- a/fs/cifs/connect.c +++ b/fs/cifs/connect.c @@ -393,15 +393,14 @@ static inline int reconn_set_ipaddr(struct TCP_Server_Info *server) #ifdef CONFIG_CIFS_DFS_UPCALL /* These functions must be called with server->srv_mutex held */ -static void reconn_inval_dfs_target(struct TCP_Server_Info *server, - struct cifs_sb_info *cifs_sb, - struct dfs_cache_tgt_list *tgt_list, - struct dfs_cache_tgt_iterator **tgt_it) +static void reconn_set_next_dfs_target(struct TCP_Server_Info *server, + struct cifs_sb_info *cifs_sb, + struct dfs_cache_tgt_list *tgt_list, + struct dfs_cache_tgt_iterator **tgt_it) { const char *name; - if (!cifs_sb || !cifs_sb->origin_fullpath || !tgt_list || - !server->nr_targets) + if (!cifs_sb || !cifs_sb->origin_fullpath) return; if (!*tgt_it) { @@ -471,11 +470,13 @@ cifs_reconnect(struct TCP_Server_Info *server) sb = NULL; } else { cifs_sb = CIFS_SB(sb); - rc = reconn_setup_dfs_targets(cifs_sb, &tgt_list); - if (rc && (rc != -EOPNOTSUPP)) { - cifs_server_dbg(VFS, "%s: no target servers for DFS failover\n", - __func__); + if (rc) { + cifs_sb = NULL; + if (rc != -EOPNOTSUPP) { + cifs_server_dbg(VFS, "%s: no target servers for DFS failover\n", + __func__); + } } else { server->nr_targets = dfs_cache_get_nr_tgts(&tgt_list); } @@ -578,7 +579,7 @@ cifs_reconnect(struct TCP_Server_Info *server) * feature is disabled, then we will retry last server we * connected to before. */ - reconn_inval_dfs_target(server, cifs_sb, &tgt_list, &tgt_it); + reconn_set_next_dfs_target(server, cifs_sb, &tgt_list, &tgt_it); #endif rc = reconn_set_ipaddr(server); if (rc) { @@ -1377,25 +1378,25 @@ static int cifs_parse_security_flavors(char *value, return 1; case Opt_sec_krb5i: vol->sign = true; - /* Fallthrough */ + fallthrough; case Opt_sec_krb5: vol->sectype = Kerberos; break; case Opt_sec_ntlmsspi: vol->sign = true; - /* Fallthrough */ + fallthrough; case Opt_sec_ntlmssp: vol->sectype = RawNTLMSSP; break; case Opt_sec_ntlmi: vol->sign = true; - /* Fallthrough */ + fallthrough; case Opt_ntlm: vol->sectype = NTLM; break; case Opt_sec_ntlmv2i: vol->sign = true; - /* Fallthrough */ + fallthrough; case Opt_sec_ntlmv2: vol->sectype = NTLMv2; break; @@ -2182,11 +2183,11 @@ cifs_parse_mount_options(const char *mountdata, const char *devname, tmp_end++; if (!(tmp_end < end && tmp_end[1] == delim)) { /* No it is not. Set the password to NULL */ - kzfree(vol->password); + kfree_sensitive(vol->password); vol->password = NULL; break; } - /* Fallthrough - to Opt_pass below.*/ + fallthrough; /* to Opt_pass below */ case Opt_pass: /* Obtain the value string */ value = strchr(data, '='); @@ -2220,7 +2221,7 @@ cifs_parse_mount_options(const char *mountdata, const char *devname, options = end; } - kzfree(vol->password); + kfree_sensitive(vol->password); /* Now build new password string */ temp_len = strlen(value); vol->password = kzalloc(temp_len+1, GFP_KERNEL); @@ -3198,7 +3199,7 @@ cifs_set_cifscreds(struct smb_vol *vol, struct cifs_ses *ses) rc = -ENOMEM; kfree(vol->username); vol->username = NULL; - kzfree(vol->password); + kfree_sensitive(vol->password); vol->password = NULL; goto out_key_put; } @@ -4219,7 +4220,7 @@ void cifs_cleanup_volume_info_contents(struct smb_vol *volume_info) { kfree(volume_info->username); - kzfree(volume_info->password); + kfree_sensitive(volume_info->password); kfree(volume_info->UNC); kfree(volume_info->domainname); kfree(volume_info->iocharset); @@ -4422,11 +4423,11 @@ build_unc_path_to_root(const struct smb_vol *vol, static int expand_dfs_referral(const unsigned int xid, struct cifs_ses *ses, struct smb_vol *volume_info, struct cifs_sb_info *cifs_sb, - int check_prefix) + char *ref_path) { int rc; struct dfs_info3_param referral = {0}; - char *full_path = NULL, *ref_path = NULL, *mdata = NULL; + char *full_path = NULL, *mdata = NULL; if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NO_DFS) return -EREMOTE; @@ -4435,9 +4436,6 @@ expand_dfs_referral(const unsigned int xid, struct cifs_ses *ses, if (IS_ERR(full_path)) return PTR_ERR(full_path); - /* For DFS paths, skip the first '\' of the UNC */ - ref_path = check_prefix ? full_path + 1 : volume_info->UNC + 1; - rc = dfs_cache_find(xid, ses, cifs_sb->local_nls, cifs_remap(cifs_sb), ref_path, &referral, NULL); if (!rc) { @@ -4500,13 +4498,10 @@ static int update_vol_info(const struct dfs_cache_tgt_iterator *tgt_it, return 0; } -static int setup_dfs_tgt_conn(const char *path, +static int setup_dfs_tgt_conn(const char *path, const char *full_path, const struct dfs_cache_tgt_iterator *tgt_it, - struct cifs_sb_info *cifs_sb, - struct smb_vol *vol, - unsigned int *xid, - struct TCP_Server_Info **server, - struct cifs_ses **ses, + struct cifs_sb_info *cifs_sb, struct smb_vol *vol, unsigned int *xid, + struct TCP_Server_Info **server, struct cifs_ses **ses, struct cifs_tcon **tcon) { int rc; @@ -4520,8 +4515,7 @@ static int setup_dfs_tgt_conn(const char *path, if (rc) return rc; - mdata = cifs_compose_mount_options(cifs_sb->mountdata, path, &ref, - &fake_devname); + mdata = cifs_compose_mount_options(cifs_sb->mountdata, full_path + 1, &ref, &fake_devname); free_dfs_info_param(&ref); if (IS_ERR(mdata)) { @@ -4544,7 +4538,7 @@ static int setup_dfs_tgt_conn(const char *path, mount_put_conns(cifs_sb, *xid, *server, *ses, *tcon); rc = mount_get_conns(&fake_vol, cifs_sb, xid, server, ses, tcon); - if (!rc) { + if (!rc || (*server && *ses)) { /* * We were able to connect to new target server. * Update current volume info with new target server. @@ -4556,14 +4550,10 @@ static int setup_dfs_tgt_conn(const char *path, return rc; } -static int mount_do_dfs_failover(const char *path, - struct cifs_sb_info *cifs_sb, - struct smb_vol *vol, - struct cifs_ses *root_ses, - unsigned int *xid, - struct TCP_Server_Info **server, - struct cifs_ses **ses, - struct cifs_tcon **tcon) +static int do_dfs_failover(const char *path, const char *full_path, struct cifs_sb_info *cifs_sb, + struct smb_vol *vol, struct cifs_ses *root_ses, unsigned int *xid, + struct TCP_Server_Info **server, struct cifs_ses **ses, + struct cifs_tcon **tcon) { int rc; struct dfs_cache_tgt_list tgt_list; @@ -4582,9 +4572,9 @@ static int mount_do_dfs_failover(const char *path, if (rc) break; /* Connect to next DFS target */ - rc = setup_dfs_tgt_conn(path, tgt_it, cifs_sb, vol, xid, server, - ses, tcon); - if (!rc || rc == -EACCES || rc == -EOPNOTSUPP) + rc = setup_dfs_tgt_conn(path, full_path, tgt_it, cifs_sb, vol, xid, server, ses, + tcon); + if (!rc || (*server && *ses)) break; } if (!rc) { @@ -4754,207 +4744,211 @@ static int is_path_remote(struct cifs_sb_info *cifs_sb, struct smb_vol *vol, } #ifdef CONFIG_CIFS_DFS_UPCALL -static inline void set_root_tcon(struct cifs_sb_info *cifs_sb, - struct cifs_tcon *tcon, - struct cifs_tcon **root) +static void set_root_ses(struct cifs_sb_info *cifs_sb, struct cifs_ses *ses, + struct cifs_ses **root_ses) { - spin_lock(&cifs_tcp_ses_lock); - tcon->tc_count++; - tcon->remap = cifs_remap(cifs_sb); - spin_unlock(&cifs_tcp_ses_lock); - *root = tcon; + if (ses) { + spin_lock(&cifs_tcp_ses_lock); + ses->ses_count++; + ses->tcon_ipc->remap = cifs_remap(cifs_sb); + spin_unlock(&cifs_tcp_ses_lock); + } + *root_ses = ses; +} + +static void put_root_ses(struct cifs_ses *ses) +{ + if (ses) + cifs_put_smb_ses(ses); +} + +/* Check if a path component is remote and then update @dfs_path accordingly */ +static int check_dfs_prepath(struct cifs_sb_info *cifs_sb, struct smb_vol *vol, + const unsigned int xid, struct TCP_Server_Info *server, + struct cifs_tcon *tcon, char **dfs_path) +{ + char *path, *s; + char sep = CIFS_DIR_SEP(cifs_sb), tmp; + char *npath; + int rc = 0; + int added_treename = tcon->Flags & SMB_SHARE_IS_IN_DFS; + int skip = added_treename; + + path = cifs_build_path_to_root(vol, cifs_sb, tcon, added_treename); + if (!path) + return -ENOMEM; + + /* + * Walk through the path components in @path and check if they're accessible. In case any of + * the components is -EREMOTE, then update @dfs_path with the next DFS referral request path + * (NOT including the remaining components). + */ + s = path; + do { + /* skip separators */ + while (*s && *s == sep) + s++; + if (!*s) + break; + /* next separator */ + while (*s && *s != sep) + s++; + /* + * if the treename is added, we then have to skip the first + * part within the separators + */ + if (skip) { + skip = 0; + continue; + } + tmp = *s; + *s = 0; + rc = server->ops->is_path_accessible(xid, tcon, cifs_sb, path); + if (rc && rc == -EREMOTE) { + struct smb_vol v = {NULL}; + /* if @path contains a tree name, skip it in the prefix path */ + if (added_treename) { + rc = cifs_parse_devname(path, &v); + if (rc) + break; + rc = -EREMOTE; + npath = build_unc_path_to_root(&v, cifs_sb, true); + cifs_cleanup_volume_info_contents(&v); + } else { + v.UNC = vol->UNC; + v.prepath = path + 1; + npath = build_unc_path_to_root(&v, cifs_sb, true); + } + if (IS_ERR(npath)) { + rc = PTR_ERR(npath); + break; + } + kfree(*dfs_path); + *dfs_path = npath; + } + *s = tmp; + } while (rc == 0); + + kfree(path); + return rc; } int cifs_mount(struct cifs_sb_info *cifs_sb, struct smb_vol *vol) { int rc = 0; unsigned int xid; - struct cifs_ses *ses; - struct cifs_tcon *root_tcon = NULL; + struct TCP_Server_Info *server = NULL; + struct cifs_ses *ses = NULL, *root_ses = NULL; struct cifs_tcon *tcon = NULL; - struct TCP_Server_Info *server; - char *root_path = NULL, *full_path = NULL; - char *old_mountdata, *origin_mountdata = NULL; - int count; + int count = 0; + char *ref_path = NULL, *full_path = NULL; + char *oldmnt = NULL; + char *mntdata = NULL; rc = mount_get_conns(vol, cifs_sb, &xid, &server, &ses, &tcon); - if (!rc && tcon) { - /* If not a standalone DFS root, then check if path is remote */ - rc = dfs_cache_find(xid, ses, cifs_sb->local_nls, - cifs_remap(cifs_sb), vol->UNC + 1, NULL, - NULL); - if (rc) { - rc = is_path_remote(cifs_sb, vol, xid, server, tcon); - if (!rc) - goto out; - if (rc != -EREMOTE) - goto error; - } - } /* - * If first DFS target server went offline and we failed to connect it, - * server and ses pointers are NULL at this point, though we still have - * chance to get a cached DFS referral in expand_dfs_referral() and - * retry next target available in it. + * Unconditionally try to get an DFS referral (even cached) to determine whether it is an + * DFS mount. * - * If a NULL ses ptr is passed to dfs_cache_find(), a lookup will be - * performed against DFS path and *no* requests will be sent to server - * for any new DFS referrals. Hence it's safe to skip checking whether - * server or ses ptr is NULL. + * Skip prefix path to provide support for DFS referrals from w2k8 servers which don't seem + * to respond with PATH_NOT_COVERED to requests that include the prefix. */ - if (rc == -EACCES || rc == -EOPNOTSUPP) - goto error; - - root_path = build_unc_path_to_root(vol, cifs_sb, false); - if (IS_ERR(root_path)) { - rc = PTR_ERR(root_path); - root_path = NULL; - goto error; - } - - full_path = build_unc_path_to_root(vol, cifs_sb, true); - if (IS_ERR(full_path)) { - rc = PTR_ERR(full_path); - full_path = NULL; - goto error; - } - /* - * Perform an unconditional check for whether there are DFS - * referrals for this path without prefix, to provide support - * for DFS referrals from w2k8 servers which don't seem to respond - * with PATH_NOT_COVERED to requests that include the prefix. - * Chase the referral if found, otherwise continue normally. - */ - old_mountdata = cifs_sb->mountdata; - (void)expand_dfs_referral(xid, ses, vol, cifs_sb, false); - - if (cifs_sb->mountdata == NULL) { - rc = -ENOENT; - goto error; + if (dfs_cache_find(xid, ses, cifs_sb->local_nls, cifs_remap(cifs_sb), vol->UNC + 1, NULL, + NULL)) { + /* No DFS referral was returned. Looks like a regular share. */ + if (rc) + goto error; + /* Check if it is fully accessible and then mount it */ + rc = is_path_remote(cifs_sb, vol, xid, server, tcon); + if (!rc) + goto out; + if (rc != -EREMOTE) + goto error; } - - /* Save DFS root volume information for DFS refresh worker */ - origin_mountdata = kstrndup(cifs_sb->mountdata, - strlen(cifs_sb->mountdata), GFP_KERNEL); - if (!origin_mountdata) { + /* Save mount options */ + mntdata = kstrndup(cifs_sb->mountdata, strlen(cifs_sb->mountdata), GFP_KERNEL); + if (!mntdata) { rc = -ENOMEM; goto error; } - - if (cifs_sb->mountdata != old_mountdata) { - /* If we were redirected, reconnect to new target server */ - mount_put_conns(cifs_sb, xid, server, ses, tcon); - rc = mount_get_conns(vol, cifs_sb, &xid, &server, &ses, &tcon); - } - if (rc) { - if (rc == -EACCES || rc == -EOPNOTSUPP) - goto error; - /* Perform DFS failover to any other DFS targets */ - rc = mount_do_dfs_failover(root_path + 1, cifs_sb, vol, NULL, - &xid, &server, &ses, &tcon); - if (rc) - goto error; - } - - kfree(root_path); - root_path = build_unc_path_to_root(vol, cifs_sb, false); - if (IS_ERR(root_path)) { - rc = PTR_ERR(root_path); - root_path = NULL; + /* Get path of DFS root */ + ref_path = build_unc_path_to_root(vol, cifs_sb, false); + if (IS_ERR(ref_path)) { + rc = PTR_ERR(ref_path); + ref_path = NULL; goto error; } - /* Cache out resolved root server */ - (void)dfs_cache_find(xid, ses, cifs_sb->local_nls, cifs_remap(cifs_sb), - root_path + 1, NULL, NULL); - kfree(root_path); - root_path = NULL; - - set_root_tcon(cifs_sb, tcon, &root_tcon); - - for (count = 1; ;) { - if (!rc && tcon) { - rc = is_path_remote(cifs_sb, vol, xid, server, tcon); - if (!rc || rc != -EREMOTE) - break; - } - /* - * BB: when we implement proper loop detection, - * we will remove this check. But now we need it - * to prevent an indefinite loop if 'DFS tree' is - * misconfigured (i.e. has loops). - */ - if (count++ > MAX_NESTED_LINKS) { - rc = -ELOOP; - break; - } + set_root_ses(cifs_sb, ses, &root_ses); + do { + /* Save full path of last DFS path we used to resolve final target server */ kfree(full_path); - full_path = build_unc_path_to_root(vol, cifs_sb, true); + full_path = build_unc_path_to_root(vol, cifs_sb, !!count); if (IS_ERR(full_path)) { rc = PTR_ERR(full_path); full_path = NULL; break; } - - old_mountdata = cifs_sb->mountdata; - rc = expand_dfs_referral(xid, root_tcon->ses, vol, cifs_sb, - true); + /* Chase referral */ + oldmnt = cifs_sb->mountdata; + rc = expand_dfs_referral(xid, root_ses, vol, cifs_sb, ref_path + 1); if (rc) break; - - if (cifs_sb->mountdata != old_mountdata) { + /* Connect to new DFS target only if we were redirected */ + if (oldmnt != cifs_sb->mountdata) { mount_put_conns(cifs_sb, xid, server, ses, tcon); - rc = mount_get_conns(vol, cifs_sb, &xid, &server, &ses, - &tcon); - /* - * Ensure that DFS referrals go through new root server. - */ - if (!rc && tcon && - (tcon->share_flags & (SHI1005_FLAGS_DFS | - SHI1005_FLAGS_DFS_ROOT))) { - cifs_put_tcon(root_tcon); - set_root_tcon(cifs_sb, tcon, &root_tcon); - } + rc = mount_get_conns(vol, cifs_sb, &xid, &server, &ses, &tcon); } - if (rc) { - if (rc == -EACCES || rc == -EOPNOTSUPP) - break; - /* Perform DFS failover to any other DFS targets */ - rc = mount_do_dfs_failover(full_path + 1, cifs_sb, vol, - root_tcon->ses, &xid, - &server, &ses, &tcon); - if (rc == -EACCES || rc == -EOPNOTSUPP || !server || - !ses) - goto error; + if (rc && !server && !ses) { + /* Failed to connect. Try to connect to other targets in the referral. */ + rc = do_dfs_failover(ref_path + 1, full_path, cifs_sb, vol, root_ses, &xid, + &server, &ses, &tcon); } - } - cifs_put_tcon(root_tcon); + if (rc == -EACCES || rc == -EOPNOTSUPP || !server || !ses) + break; + if (!tcon) + continue; + /* Make sure that requests go through new root servers */ + if (is_tcon_dfs(tcon)) { + put_root_ses(root_ses); + set_root_ses(cifs_sb, ses, &root_ses); + } + /* Check for remaining path components and then continue chasing them (-EREMOTE) */ + rc = check_dfs_prepath(cifs_sb, vol, xid, server, tcon, &ref_path); + /* Prevent recursion on broken link referrals */ + if (rc == -EREMOTE && ++count > MAX_NESTED_LINKS) + rc = -ELOOP; + } while (rc == -EREMOTE); if (rc) goto error; - - spin_lock(&cifs_tcp_ses_lock); - if (!tcon->dfs_path) { - /* Save full path in new tcon to do failover when reconnecting tcons */ - tcon->dfs_path = full_path; - full_path = NULL; - tcon->remap = cifs_remap(cifs_sb); - } - cifs_sb->origin_fullpath = kstrndup(tcon->dfs_path, - strlen(tcon->dfs_path), - GFP_ATOMIC); + put_root_ses(root_ses); + root_ses = NULL; + kfree(ref_path); + ref_path = NULL; + /* + * Store DFS full path in both superblock and tree connect structures. + * + * For DFS root mounts, the prefix path (cifs_sb->prepath) is preserved during reconnect so + * only the root path is set in cifs_sb->origin_fullpath and tcon->dfs_path. And for DFS + * links, the prefix path is included in both and may be changed during reconnect. See + * cifs_tree_connect(). + */ + cifs_sb->origin_fullpath = kstrndup(full_path, strlen(full_path), GFP_KERNEL); if (!cifs_sb->origin_fullpath) { - spin_unlock(&cifs_tcp_ses_lock); rc = -ENOMEM; goto error; } + spin_lock(&cifs_tcp_ses_lock); + tcon->dfs_path = full_path; + full_path = NULL; + tcon->remap = cifs_remap(cifs_sb); spin_unlock(&cifs_tcp_ses_lock); - rc = dfs_cache_add_vol(origin_mountdata, vol, cifs_sb->origin_fullpath); - if (rc) { - kfree(cifs_sb->origin_fullpath); + /* Add original volume information for DFS cache to be used when refreshing referrals */ + rc = dfs_cache_add_vol(mntdata, vol, cifs_sb->origin_fullpath); + if (rc) goto error; - } /* * After reconnecting to a different server, unique ids won't * match anymore, so we disable serverino. This prevents @@ -4976,9 +4970,11 @@ out: return mount_setup_tlink(cifs_sb, ses, tcon); error: + kfree(ref_path); kfree(full_path); - kfree(root_path); - kfree(origin_mountdata); + kfree(mntdata); + kfree(cifs_sb->origin_fullpath); + put_root_ses(root_ses); mount_put_conns(cifs_sb, xid, server, ses, tcon); return rc; } @@ -5114,8 +5110,7 @@ CIFSTCon(const unsigned int xid, struct cifs_ses *ses, bcc_ptr += strlen("?????"); bcc_ptr += 1; count = bcc_ptr - &pSMB->Password[0]; - pSMB->hdr.smb_buf_length = cpu_to_be32(be32_to_cpu( - pSMB->hdr.smb_buf_length) + count); + be32_add_cpu(&pSMB->hdr.smb_buf_length, count); pSMB->ByteCount = cpu_to_le16(count); rc = SendReceive(xid, ses, smb_buffer, smb_buffer_response, &length, @@ -5345,7 +5340,7 @@ cifs_construct_tcon(struct cifs_sb_info *cifs_sb, kuid_t fsuid) out: kfree(vol_info->username); - kzfree(vol_info->password); + kfree_sensitive(vol_info->password); kfree(vol_info); return tcon; @@ -5533,3 +5528,115 @@ cifs_prune_tlinks(struct work_struct *work) queue_delayed_work(cifsiod_wq, &cifs_sb->prune_tlinks, TLINK_IDLE_EXPIRE); } + +#ifdef CONFIG_CIFS_DFS_UPCALL +int cifs_tree_connect(const unsigned int xid, struct cifs_tcon *tcon, const struct nls_table *nlsc) +{ + int rc; + struct TCP_Server_Info *server = tcon->ses->server; + const struct smb_version_operations *ops = server->ops; + struct dfs_cache_tgt_list tl; + struct dfs_cache_tgt_iterator *it = NULL; + char *tree; + const char *tcp_host; + size_t tcp_host_len; + const char *dfs_host; + size_t dfs_host_len; + char *share = NULL, *prefix = NULL; + struct dfs_info3_param ref = {0}; + bool isroot; + + tree = kzalloc(MAX_TREE_SIZE, GFP_KERNEL); + if (!tree) + return -ENOMEM; + + if (!tcon->dfs_path) { + if (tcon->ipc) { + scnprintf(tree, MAX_TREE_SIZE, "\\\\%s\\IPC$", server->hostname); + rc = ops->tree_connect(xid, tcon->ses, tree, tcon, nlsc); + } else { + rc = ops->tree_connect(xid, tcon->ses, tcon->treeName, tcon, nlsc); + } + goto out; + } + + rc = dfs_cache_noreq_find(tcon->dfs_path + 1, &ref, &tl); + if (rc) + goto out; + isroot = ref.server_type == DFS_TYPE_ROOT; + free_dfs_info_param(&ref); + + extract_unc_hostname(server->hostname, &tcp_host, &tcp_host_len); + + for (it = dfs_cache_get_tgt_iterator(&tl); it; it = dfs_cache_get_next_tgt(&tl, it)) { + bool target_match; + + kfree(share); + kfree(prefix); + share = NULL; + prefix = NULL; + + rc = dfs_cache_get_tgt_share(tcon->dfs_path + 1, it, &share, &prefix); + if (rc) { + cifs_dbg(VFS, "%s: failed to parse target share %d\n", + __func__, rc); + continue; + } + + extract_unc_hostname(share, &dfs_host, &dfs_host_len); + + if (dfs_host_len != tcp_host_len + || strncasecmp(dfs_host, tcp_host, dfs_host_len) != 0) { + cifs_dbg(FYI, "%s: %.*s doesn't match %.*s\n", __func__, (int)dfs_host_len, + dfs_host, (int)tcp_host_len, tcp_host); + + rc = match_target_ip(server, dfs_host, dfs_host_len, &target_match); + if (rc) { + cifs_dbg(VFS, "%s: failed to match target ip: %d\n", __func__, rc); + break; + } + + if (!target_match) { + cifs_dbg(FYI, "%s: skipping target\n", __func__); + continue; + } + } + + if (tcon->ipc) { + scnprintf(tree, MAX_TREE_SIZE, "\\\\%s\\IPC$", share); + rc = ops->tree_connect(xid, tcon->ses, tree, tcon, nlsc); + } else { + scnprintf(tree, MAX_TREE_SIZE, "\\%s", share); + rc = ops->tree_connect(xid, tcon->ses, tree, tcon, nlsc); + /* Only handle prefix paths of DFS link targets */ + if (!rc && !isroot) { + rc = update_super_prepath(tcon, prefix); + break; + } + } + if (rc == -EREMOTE) + break; + } + + kfree(share); + kfree(prefix); + + if (!rc) { + if (it) + rc = dfs_cache_noreq_update_tgthint(tcon->dfs_path + 1, it); + else + rc = -ENOENT; + } + dfs_cache_free_tgts(&tl); +out: + kfree(tree); + return rc; +} +#else +int cifs_tree_connect(const unsigned int xid, struct cifs_tcon *tcon, const struct nls_table *nlsc) +{ + const struct smb_version_operations *ops = tcon->ses->server->ops; + + return ops->tree_connect(xid, tcon->ses, tcon->treeName, tcon, nlsc); +} +#endif diff --git a/fs/cifs/dfs_cache.c b/fs/cifs/dfs_cache.c index df81c718d2fa..6ee849698962 100644 --- a/fs/cifs/dfs_cache.c +++ b/fs/cifs/dfs_cache.c @@ -29,6 +29,7 @@ struct cache_dfs_tgt { char *name; + int path_consumed; struct list_head list; }; @@ -350,7 +351,7 @@ static inline struct timespec64 get_expire_time(int ttl) } /* Allocate a new DFS target */ -static struct cache_dfs_tgt *alloc_target(const char *name) +static struct cache_dfs_tgt *alloc_target(const char *name, int path_consumed) { struct cache_dfs_tgt *t; @@ -362,6 +363,7 @@ static struct cache_dfs_tgt *alloc_target(const char *name) kfree(t); return ERR_PTR(-ENOMEM); } + t->path_consumed = path_consumed; INIT_LIST_HEAD(&t->list); return t; } @@ -384,7 +386,7 @@ static int copy_ref_data(const struct dfs_info3_param *refs, int numrefs, for (i = 0; i < numrefs; i++) { struct cache_dfs_tgt *t; - t = alloc_target(refs[i].node_name); + t = alloc_target(refs[i].node_name, refs[i].path_consumed); if (IS_ERR(t)) { free_tgts(ce); return PTR_ERR(t); @@ -490,16 +492,7 @@ static int add_cache_entry(const char *path, unsigned int hash, return 0; } -/* - * Find a DFS cache entry in hash table and optionally check prefix path against - * @path. - * Use whole path components in the match. - * Must be called with htable_rw_lock held. - * - * Return ERR_PTR(-ENOENT) if the entry is not found. - */ -static struct cache_entry *lookup_cache_entry(const char *path, - unsigned int *hash) +static struct cache_entry *__lookup_cache_entry(const char *path) { struct cache_entry *ce; unsigned int h; @@ -517,9 +510,75 @@ static struct cache_entry *lookup_cache_entry(const char *path, if (!found) ce = ERR_PTR(-ENOENT); + return ce; +} + +/* + * Find a DFS cache entry in hash table and optionally check prefix path against + * @path. + * Use whole path components in the match. + * Must be called with htable_rw_lock held. + * + * Return ERR_PTR(-ENOENT) if the entry is not found. + */ +static struct cache_entry *lookup_cache_entry(const char *path, unsigned int *hash) +{ + struct cache_entry *ce = ERR_PTR(-ENOENT); + unsigned int h; + int cnt = 0; + char *npath; + char *s, *e; + char sep; + + npath = kstrndup(path, strlen(path), GFP_KERNEL); + if (!npath) + return ERR_PTR(-ENOMEM); + + s = npath; + sep = *npath; + while ((s = strchr(s, sep)) && ++cnt < 3) + s++; + + if (cnt < 3) { + h = cache_entry_hash(path, strlen(path)); + ce = __lookup_cache_entry(path); + goto out; + } + /* + * Handle paths that have more than two path components and are a complete prefix of the DFS + * referral request path (@path). + * + * See MS-DFSC 3.2.5.5 "Receiving a Root Referral Request or Link Referral Request". + */ + h = cache_entry_hash(npath, strlen(npath)); + e = npath + strlen(npath) - 1; + while (e > s) { + char tmp; + + /* skip separators */ + while (e > s && *e == sep) + e--; + if (e == s) + goto out; + + tmp = *(e+1); + *(e+1) = 0; + + ce = __lookup_cache_entry(npath); + if (!IS_ERR(ce)) { + h = cache_entry_hash(npath, strlen(npath)); + break; + } + + *(e+1) = tmp; + /* backward until separator */ + while (e > s && *e != sep) + e--; + } +out: if (hash) *hash = h; - + kfree(npath); return ce; } @@ -773,6 +832,7 @@ static int get_targets(struct cache_entry *ce, struct dfs_cache_tgt_list *tl) rc = -ENOMEM; goto err_free_it; } + it->it_path_consumed = t->path_consumed; if (ce->tgthint == t) list_add(&it->it_list, head); @@ -1131,7 +1191,7 @@ err_free_domainname: err_free_unc: kfree(new->UNC); err_free_password: - kzfree(new->password); + kfree_sensitive(new->password); err_free_username: kfree(new->username); kfree(new); @@ -1263,23 +1323,26 @@ void dfs_cache_del_vol(const char *fullpath) /** * dfs_cache_get_tgt_share - parse a DFS target * + * @path: DFS full path * @it: DFS target iterator. * @share: tree name. - * @share_len: length of tree name. * @prefix: prefix path. - * @prefix_len: length of prefix path. * * Return zero if target was parsed correctly, otherwise non-zero. */ -int dfs_cache_get_tgt_share(const struct dfs_cache_tgt_iterator *it, - const char **share, size_t *share_len, - const char **prefix, size_t *prefix_len) +int dfs_cache_get_tgt_share(char *path, const struct dfs_cache_tgt_iterator *it, + char **share, char **prefix) { - char *s, sep; + char *s, sep, *p; + size_t len; + size_t plen1, plen2; - if (!it || !share || !share_len || !prefix || !prefix_len) + if (!it || !path || !share || !prefix || strlen(path) < it->it_path_consumed) return -EINVAL; + *share = NULL; + *prefix = NULL; + sep = it->it_name[0]; if (sep != '\\' && sep != '/') return -EINVAL; @@ -1288,13 +1351,38 @@ int dfs_cache_get_tgt_share(const struct dfs_cache_tgt_iterator *it, if (!s) return -EINVAL; + /* point to prefix in target node */ s = strchrnul(s + 1, sep); - *share = it->it_name; - *share_len = s - it->it_name; - *prefix = *s ? s + 1 : s; - *prefix_len = &it->it_name[strlen(it->it_name)] - *prefix; + /* extract target share */ + *share = kstrndup(it->it_name, s - it->it_name, GFP_KERNEL); + if (!*share) + return -ENOMEM; + /* skip separator */ + if (*s) + s++; + /* point to prefix in DFS path */ + p = path + it->it_path_consumed; + if (*p == sep) + p++; + + /* merge prefix paths from DFS path and target node */ + plen1 = it->it_name + strlen(it->it_name) - s; + plen2 = path + strlen(path) - p; + if (plen1 || plen2) { + len = plen1 + plen2 + 2; + *prefix = kmalloc(len, GFP_KERNEL); + if (!*prefix) { + kfree(*share); + *share = NULL; + return -ENOMEM; + } + if (plen1) + scnprintf(*prefix, len, "%.*s%c%.*s", (int)plen1, s, sep, (int)plen2, p); + else + strscpy(*prefix, p, len); + } return 0; } diff --git a/fs/cifs/dfs_cache.h b/fs/cifs/dfs_cache.h index bf94d08cfb5a..3d7c05194536 100644 --- a/fs/cifs/dfs_cache.h +++ b/fs/cifs/dfs_cache.h @@ -19,6 +19,7 @@ struct dfs_cache_tgt_list { struct dfs_cache_tgt_iterator { char *it_name; + int it_path_consumed; struct list_head it_list; }; @@ -48,10 +49,8 @@ extern int dfs_cache_add_vol(char *mntdata, struct smb_vol *vol, extern int dfs_cache_update_vol(const char *fullpath, struct TCP_Server_Info *server); extern void dfs_cache_del_vol(const char *fullpath); - -extern int dfs_cache_get_tgt_share(const struct dfs_cache_tgt_iterator *it, - const char **share, size_t *share_len, - const char **prefix, size_t *prefix_len); +extern int dfs_cache_get_tgt_share(char *path, const struct dfs_cache_tgt_iterator *it, + char **share, char **prefix); static inline struct dfs_cache_tgt_iterator * dfs_cache_get_next_tgt(struct dfs_cache_tgt_list *tl, diff --git a/fs/cifs/inode.c b/fs/cifs/inode.c index ce95801e9b66..1f75b25e559a 100644 --- a/fs/cifs/inode.c +++ b/fs/cifs/inode.c @@ -1017,6 +1017,8 @@ handle_mnt_opt: if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MODE_FROM_SID) { rc = cifs_acl_to_fattr(cifs_sb, &fattr, *inode, true, full_path, fid); + if (rc == -EREMOTE) + rc = 0; if (rc) { cifs_dbg(FYI, "%s: Get mode from SID failed. rc=%d\n", __func__, rc); @@ -1025,6 +1027,8 @@ handle_mnt_opt: } else if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_CIFS_ACL) { rc = cifs_acl_to_fattr(cifs_sb, &fattr, *inode, false, full_path, fid); + if (rc == -EREMOTE) + rc = 0; if (rc) { cifs_dbg(FYI, "%s: Getting ACL failed with error: %d\n", __func__, rc); @@ -1086,7 +1090,6 @@ smb311_posix_get_inode_info(struct inode **inode, struct super_block *sb, unsigned int xid) { struct cifs_tcon *tcon; - struct TCP_Server_Info *server; struct tcon_link *tlink; struct cifs_sb_info *cifs_sb = CIFS_SB(sb); bool adjust_tz = false; @@ -1100,7 +1103,6 @@ smb311_posix_get_inode_info(struct inode **inode, if (IS_ERR(tlink)) return PTR_ERR(tlink); tcon = tlink_tcon(tlink); - server = tcon->ses->server; /* * 1. Fetch file metadata diff --git a/fs/cifs/misc.c b/fs/cifs/misc.c index e44d049142d0..1c14cf01dbef 100644 --- a/fs/cifs/misc.c +++ b/fs/cifs/misc.c @@ -103,12 +103,12 @@ sesInfoFree(struct cifs_ses *buf_to_free) kfree(buf_to_free->serverOS); kfree(buf_to_free->serverDomain); kfree(buf_to_free->serverNOS); - kzfree(buf_to_free->password); + kfree_sensitive(buf_to_free->password); kfree(buf_to_free->user_name); kfree(buf_to_free->domainName); - kzfree(buf_to_free->auth_key.response); + kfree_sensitive(buf_to_free->auth_key.response); kfree(buf_to_free->iface_list); - kzfree(buf_to_free); + kfree_sensitive(buf_to_free); } struct cifs_tcon * @@ -148,7 +148,7 @@ tconInfoFree(struct cifs_tcon *buf_to_free) } atomic_dec(&tconInfoAllocCount); kfree(buf_to_free->nativeFileSystem); - kzfree(buf_to_free->password); + kfree_sensitive(buf_to_free->password); kfree(buf_to_free->crfid.fid); #ifdef CONFIG_CIFS_DFS_UPCALL kfree(buf_to_free->dfs_path); @@ -1164,8 +1164,7 @@ static inline void cifs_put_tcon_super(struct super_block *sb) } #endif -int update_super_prepath(struct cifs_tcon *tcon, const char *prefix, - size_t prefix_len) +int update_super_prepath(struct cifs_tcon *tcon, char *prefix) { struct super_block *sb; struct cifs_sb_info *cifs_sb; @@ -1179,8 +1178,8 @@ int update_super_prepath(struct cifs_tcon *tcon, const char *prefix, kfree(cifs_sb->prepath); - if (*prefix && prefix_len) { - cifs_sb->prepath = kstrndup(prefix, prefix_len, GFP_ATOMIC); + if (prefix && *prefix) { + cifs_sb->prepath = kstrndup(prefix, strlen(prefix), GFP_ATOMIC); if (!cifs_sb->prepath) { rc = -ENOMEM; goto out; diff --git a/fs/cifs/netmisc.c b/fs/cifs/netmisc.c index b7ca4960d4ca..0e728aac67e9 100644 --- a/fs/cifs/netmisc.c +++ b/fs/cifs/netmisc.c @@ -881,6 +881,33 @@ map_smb_to_linux_error(char *buf, bool logErr) return rc; } +int +map_and_check_smb_error(struct mid_q_entry *mid, bool logErr) +{ + int rc; + struct smb_hdr *smb = (struct smb_hdr *)mid->resp_buf; + + rc = map_smb_to_linux_error((char *)smb, logErr); + if (rc == -EACCES && !(smb->Flags2 & SMBFLG2_ERR_STATUS)) { + /* possible ERRBaduid */ + __u8 class = smb->Status.DosError.ErrorClass; + __u16 code = le16_to_cpu(smb->Status.DosError.Error); + + /* switch can be used to handle different errors */ + if (class == ERRSRV && code == ERRbaduid) { + cifs_dbg(FYI, "Server returned 0x%x, reconnecting session...\n", + code); + spin_lock(&GlobalMid_Lock); + if (mid->server->tcpStatus != CifsExiting) + mid->server->tcpStatus = CifsNeedReconnect; + spin_unlock(&GlobalMid_Lock); + } + } + + return rc; +} + + /* * calculate the size of the SMB message based on the fixed header * portion, the number of word parameters and the data portion of the message diff --git a/fs/cifs/sess.c b/fs/cifs/sess.c index 5d05bd2822d2..de564368a887 100644 --- a/fs/cifs/sess.c +++ b/fs/cifs/sess.c @@ -798,7 +798,7 @@ cifs_select_sectype(struct TCP_Server_Info *server, enum securityEnum requested) if ((server->sec_kerberos || server->sec_mskerberos) && (global_secflags & CIFSSEC_MAY_KRB5)) return Kerberos; - /* Fallthrough */ + fallthrough; default: return Unspecified; } @@ -815,7 +815,7 @@ cifs_select_sectype(struct TCP_Server_Info *server, enum securityEnum requested) default: break; } - /* Fallthrough - to attempt LANMAN authentication next */ + fallthrough; /* to attempt LANMAN authentication next */ case CIFS_NEGFLAVOR_LANMAN: switch (requested) { case LANMAN: @@ -823,7 +823,7 @@ cifs_select_sectype(struct TCP_Server_Info *server, enum securityEnum requested) case Unspecified: if (global_secflags & CIFSSEC_MAY_LANMAN) return LANMAN; - /* Fallthrough */ + fallthrough; default: return Unspecified; } @@ -938,8 +938,7 @@ sess_sendreceive(struct sess_data *sess_data) struct kvec rsp_iov = { NULL, 0 }; count = sess_data->iov[1].iov_len + sess_data->iov[2].iov_len; - smb_buf->smb_buf_length = - cpu_to_be32(be32_to_cpu(smb_buf->smb_buf_length) + count); + be32_add_cpu(&smb_buf->smb_buf_length, count); put_bcc(count, smb_buf); rc = SendReceive2(sess_data->xid, sess_data->ses, @@ -1705,7 +1704,6 @@ static int select_sec(struct cifs_ses *ses, struct sess_data *sess_data) #else cifs_dbg(VFS, "Kerberos negotiated but upcall support disabled!\n"); return -ENOSYS; - break; #endif /* CONFIG_CIFS_UPCALL */ case RawNTLMSSP: sess_data->func = sess_auth_rawntlmssp_negotiate; diff --git a/fs/cifs/smb1ops.c b/fs/cifs/smb1ops.c index 197ed455e657..80287c26cfac 100644 --- a/fs/cifs/smb1ops.c +++ b/fs/cifs/smb1ops.c @@ -688,7 +688,7 @@ cifs_mkdir_setinfo(struct inode *inode, const char *full_path, dosattrs = cifsInode->cifsAttrs|ATTR_READONLY; info.Attributes = cpu_to_le32(dosattrs); rc = CIFSSMBSetPathInfo(xid, tcon, full_path, &info, cifs_sb->local_nls, - cifs_remap(cifs_sb)); + cifs_sb); if (rc == 0) cifsInode->cifsAttrs = dosattrs; } @@ -783,7 +783,7 @@ smb_set_file_info(struct inode *inode, const char *full_path, tcon = tlink_tcon(tlink); rc = CIFSSMBSetPathInfo(xid, tcon, full_path, buf, cifs_sb->local_nls, - cifs_remap(cifs_sb)); + cifs_sb); if (rc == 0) { cinode->cifsAttrs = le32_to_cpu(buf->Attributes); goto out; diff --git a/fs/cifs/smb2inode.c b/fs/cifs/smb2inode.c index b9db73687eaa..eba01d0908dd 100644 --- a/fs/cifs/smb2inode.c +++ b/fs/cifs/smb2inode.c @@ -115,6 +115,7 @@ smb2_compound_op(const unsigned int xid, struct cifs_tcon *tcon, vars->oparms.fid = &fid; vars->oparms.reconnect = false; vars->oparms.mode = mode; + vars->oparms.cifs_sb = cifs_sb; rqst[num_rqst].rq_iov = &vars->open_iov[0]; rqst[num_rqst].rq_nvec = SMB2_CREATE_IOV_SIZE; diff --git a/fs/cifs/smb2misc.c b/fs/cifs/smb2misc.c index 157992864ce7..d88e2683626e 100644 --- a/fs/cifs/smb2misc.c +++ b/fs/cifs/smb2misc.c @@ -508,15 +508,31 @@ cifs_ses_oplock_break(struct work_struct *work) kfree(lw); } +static void +smb2_queue_pending_open_break(struct tcon_link *tlink, __u8 *lease_key, + __le32 new_lease_state) +{ + struct smb2_lease_break_work *lw; + + lw = kmalloc(sizeof(struct smb2_lease_break_work), GFP_KERNEL); + if (!lw) { + cifs_put_tlink(tlink); + return; + } + + INIT_WORK(&lw->lease_break, cifs_ses_oplock_break); + lw->tlink = tlink; + lw->lease_state = new_lease_state; + memcpy(lw->lease_key, lease_key, SMB2_LEASE_KEY_SIZE); + queue_work(cifsiod_wq, &lw->lease_break); +} + static bool -smb2_tcon_has_lease(struct cifs_tcon *tcon, struct smb2_lease_break *rsp, - struct smb2_lease_break_work *lw) +smb2_tcon_has_lease(struct cifs_tcon *tcon, struct smb2_lease_break *rsp) { - bool found; __u8 lease_state; struct list_head *tmp; struct cifsFileInfo *cfile; - struct cifs_pending_open *open; struct cifsInodeInfo *cinode; int ack_req = le32_to_cpu(rsp->Flags & SMB2_NOTIFY_BREAK_LEASE_FLAG_ACK_REQUIRED); @@ -546,22 +562,29 @@ smb2_tcon_has_lease(struct cifs_tcon *tcon, struct smb2_lease_break *rsp, cfile->oplock_level = lease_state; cifs_queue_oplock_break(cfile); - kfree(lw); return true; } - found = false; + return false; +} + +static struct cifs_pending_open * +smb2_tcon_find_pending_open_lease(struct cifs_tcon *tcon, + struct smb2_lease_break *rsp) +{ + __u8 lease_state = le32_to_cpu(rsp->NewLeaseState); + int ack_req = le32_to_cpu(rsp->Flags & + SMB2_NOTIFY_BREAK_LEASE_FLAG_ACK_REQUIRED); + struct cifs_pending_open *open; + struct cifs_pending_open *found = NULL; + list_for_each_entry(open, &tcon->pending_opens, olist) { if (memcmp(open->lease_key, rsp->LeaseKey, SMB2_LEASE_KEY_SIZE)) continue; if (!found && ack_req) { - found = true; - memcpy(lw->lease_key, open->lease_key, - SMB2_LEASE_KEY_SIZE); - lw->tlink = cifs_get_tlink(open->tlink); - queue_work(cifsiod_wq, &lw->lease_break); + found = open; } cifs_dbg(FYI, "found in the pending open list\n"); @@ -582,14 +605,7 @@ smb2_is_valid_lease_break(char *buffer) struct TCP_Server_Info *server; struct cifs_ses *ses; struct cifs_tcon *tcon; - struct smb2_lease_break_work *lw; - - lw = kmalloc(sizeof(struct smb2_lease_break_work), GFP_KERNEL); - if (!lw) - return false; - - INIT_WORK(&lw->lease_break, cifs_ses_oplock_break); - lw->lease_state = rsp->NewLeaseState; + struct cifs_pending_open *open; cifs_dbg(FYI, "Checking for lease break\n"); @@ -607,11 +623,27 @@ smb2_is_valid_lease_break(char *buffer) spin_lock(&tcon->open_file_lock); cifs_stats_inc( &tcon->stats.cifs_stats.num_oplock_brks); - if (smb2_tcon_has_lease(tcon, rsp, lw)) { + if (smb2_tcon_has_lease(tcon, rsp)) { spin_unlock(&tcon->open_file_lock); spin_unlock(&cifs_tcp_ses_lock); return true; } + open = smb2_tcon_find_pending_open_lease(tcon, + rsp); + if (open) { + __u8 lease_key[SMB2_LEASE_KEY_SIZE]; + struct tcon_link *tlink; + + tlink = cifs_get_tlink(open->tlink); + memcpy(lease_key, open->lease_key, + SMB2_LEASE_KEY_SIZE); + spin_unlock(&tcon->open_file_lock); + spin_unlock(&cifs_tcp_ses_lock); + smb2_queue_pending_open_break(tlink, + lease_key, + rsp->NewLeaseState); + return true; + } spin_unlock(&tcon->open_file_lock); if (tcon->crfid.is_valid && @@ -629,7 +661,6 @@ smb2_is_valid_lease_break(char *buffer) } } spin_unlock(&cifs_tcp_ses_lock); - kfree(lw); cifs_dbg(FYI, "Can not process lease break - no lease matched\n"); return false; } diff --git a/fs/cifs/smb2pdu.c b/fs/cifs/smb2pdu.c index 2f4cdd290c46..96c172d94fba 100644 --- a/fs/cifs/smb2pdu.c +++ b/fs/cifs/smb2pdu.c @@ -152,117 +152,6 @@ out: return; } -#ifdef CONFIG_CIFS_DFS_UPCALL -static int __smb2_reconnect(const struct nls_table *nlsc, - struct cifs_tcon *tcon) -{ - int rc; - struct TCP_Server_Info *server = tcon->ses->server; - struct dfs_cache_tgt_list tl; - struct dfs_cache_tgt_iterator *it = NULL; - char *tree; - const char *tcp_host; - size_t tcp_host_len; - const char *dfs_host; - size_t dfs_host_len; - - tree = kzalloc(MAX_TREE_SIZE, GFP_KERNEL); - if (!tree) - return -ENOMEM; - - if (!tcon->dfs_path) { - if (tcon->ipc) { - scnprintf(tree, MAX_TREE_SIZE, "\\\\%s\\IPC$", - server->hostname); - rc = SMB2_tcon(0, tcon->ses, tree, tcon, nlsc); - } else { - rc = SMB2_tcon(0, tcon->ses, tcon->treeName, tcon, - nlsc); - } - goto out; - } - - rc = dfs_cache_noreq_find(tcon->dfs_path + 1, NULL, &tl); - if (rc) - goto out; - - extract_unc_hostname(server->hostname, &tcp_host, &tcp_host_len); - - for (it = dfs_cache_get_tgt_iterator(&tl); it; - it = dfs_cache_get_next_tgt(&tl, it)) { - const char *share, *prefix; - size_t share_len, prefix_len; - bool target_match; - - rc = dfs_cache_get_tgt_share(it, &share, &share_len, &prefix, - &prefix_len); - if (rc) { - cifs_dbg(VFS, "%s: failed to parse target share %d\n", - __func__, rc); - continue; - } - - extract_unc_hostname(share, &dfs_host, &dfs_host_len); - - if (dfs_host_len != tcp_host_len - || strncasecmp(dfs_host, tcp_host, dfs_host_len) != 0) { - cifs_dbg(FYI, "%s: %.*s doesn't match %.*s\n", - __func__, - (int)dfs_host_len, dfs_host, - (int)tcp_host_len, tcp_host); - - rc = match_target_ip(server, dfs_host, dfs_host_len, - &target_match); - if (rc) { - cifs_dbg(VFS, "%s: failed to match target ip: %d\n", - __func__, rc); - break; - } - - if (!target_match) { - cifs_dbg(FYI, "%s: skipping target\n", __func__); - continue; - } - } - - if (tcon->ipc) { - scnprintf(tree, MAX_TREE_SIZE, "\\\\%.*s\\IPC$", - (int)share_len, share); - rc = SMB2_tcon(0, tcon->ses, tree, tcon, nlsc); - } else { - scnprintf(tree, MAX_TREE_SIZE, "\\%.*s", (int)share_len, - share); - rc = SMB2_tcon(0, tcon->ses, tree, tcon, nlsc); - if (!rc) { - rc = update_super_prepath(tcon, prefix, - prefix_len); - break; - } - } - if (rc == -EREMOTE) - break; - } - - if (!rc) { - if (it) - rc = dfs_cache_noreq_update_tgthint(tcon->dfs_path + 1, - it); - else - rc = -ENOENT; - } - dfs_cache_free_tgts(&tl); -out: - kfree(tree); - return rc; -} -#else -static inline int __smb2_reconnect(const struct nls_table *nlsc, - struct cifs_tcon *tcon) -{ - return SMB2_tcon(0, tcon->ses, tcon->treeName, tcon, nlsc); -} -#endif - static int smb2_reconnect(__le16 smb2_command, struct cifs_tcon *tcon, struct TCP_Server_Info *server) @@ -409,7 +298,7 @@ smb2_reconnect(__le16 smb2_command, struct cifs_tcon *tcon, if (tcon->use_persistent) tcon->need_reopen_files = true; - rc = __smb2_reconnect(nls_codepage, tcon); + rc = cifs_tree_connect(0, tcon, nls_codepage); mutex_unlock(&tcon->ses->session_mutex); cifs_dbg(FYI, "reconnect tcon rc = %d\n", rc); @@ -1212,7 +1101,7 @@ smb2_select_sectype(struct TCP_Server_Info *server, enum securityEnum requested) if ((server->sec_kerberos || server->sec_mskerberos) && (global_secflags & CIFSSEC_MAY_KRB5)) return Kerberos; - /* Fallthrough */ + fallthrough; default: return Unspecified; } @@ -1387,6 +1276,8 @@ SMB2_auth_kerberos(struct SMB2_sess_data *sess_data) spnego_key = cifs_get_spnego_key(ses); if (IS_ERR(spnego_key)) { rc = PTR_ERR(spnego_key); + if (rc == -ENOKEY) + cifs_dbg(VFS, "Verify user has a krb5 ticket and keyutils is installed\n"); spnego_key = NULL; goto out; } @@ -4022,7 +3913,7 @@ smb2_readv_callback(struct mid_q_entry *mid) case MID_RESPONSE_MALFORMED: credits.value = le16_to_cpu(shdr->CreditRequest); credits.instance = server->reconnect_instance; - /* fall through */ + fallthrough; default: rdata->result = -EIO; } @@ -4255,7 +4146,7 @@ smb2_writev_callback(struct mid_q_entry *mid) case MID_RESPONSE_MALFORMED: credits.value = le16_to_cpu(rsp->sync_hdr.CreditRequest); credits.instance = server->reconnect_instance; - /* fall through */ + fallthrough; default: wdata->result = -EIO; break; diff --git a/fs/cifs/smb2pdu.h b/fs/cifs/smb2pdu.h index 236814d61248..c3f1baf5bde2 100644 --- a/fs/cifs/smb2pdu.h +++ b/fs/cifs/smb2pdu.h @@ -31,7 +31,7 @@ * Note that, due to trying to use names similar to the protocol specifications, * there are many mixed case field names in the structures below. Although * this does not match typical Linux kernel style, it is necessary to be - * be able to match against the protocol specfication. + * able to match against the protocol specfication. * * SMB2 commands * Some commands have minimal (wct=0,bcc=0), or uninteresting, responses diff --git a/fs/cifs/transport.c b/fs/cifs/transport.c index 84433d0653f9..ac7632482736 100644 --- a/fs/cifs/transport.c +++ b/fs/cifs/transport.c @@ -936,7 +936,7 @@ cifs_check_receive(struct mid_q_entry *mid, struct TCP_Server_Info *server, } /* BB special case reconnect tid and uid here? */ - return map_smb_to_linux_error(mid->resp_buf, log_error); + return map_and_check_smb_error(mid, log_error); } struct mid_q_entry * diff --git a/fs/cifs/winucase.c b/fs/cifs/winucase.c index 1a23a1d2ebf9..59b6c577aa0a 100644 --- a/fs/cifs/winucase.c +++ b/fs/cifs/winucase.c @@ -9,7 +9,7 @@ * * 3.1.5.3 Mapping UTF-16 Strings to Upper Case: * - * http://msdn.microsoft.com/en-us/library/hh877830.aspx + * https://msdn.microsoft.com/en-us/library/hh877830.aspx * http://www.microsoft.com/en-us/download/details.aspx?displaylang=en&id=10921 * * In particular, the table in "Windows 8 Upper Case Mapping Table.txt" was diff --git a/fs/configfs/dir.c b/fs/configfs/dir.c index cb733652ecca..ca2273727225 100644 --- a/fs/configfs/dir.c +++ b/fs/configfs/dir.c @@ -1688,11 +1688,11 @@ static loff_t configfs_dir_lseek(struct file *file, loff_t offset, int whence) switch (whence) { case 1: offset += file->f_pos; - /* fall through */ + fallthrough; case 0: if (offset >= 0) break; - /* fall through */ + fallthrough; default: return -EINVAL; } diff --git a/fs/coredump.c b/fs/coredump.c index 7237f07ff6be..76e7c10edfc0 100644 --- a/fs/coredump.c +++ b/fs/coredump.c @@ -153,10 +153,10 @@ int cn_esc_printf(struct core_name *cn, const char *fmt, ...) return ret; } -static int cn_print_exe_file(struct core_name *cn) +static int cn_print_exe_file(struct core_name *cn, bool name_only) { struct file *exe_file; - char *pathbuf, *path; + char *pathbuf, *path, *ptr; int ret; exe_file = get_mm_exe_file(current->mm); @@ -175,6 +175,11 @@ static int cn_print_exe_file(struct core_name *cn) goto free_buf; } + if (name_only) { + ptr = strrchr(path, '/'); + if (ptr) + path = ptr + 1; + } ret = cn_esc_printf(cn, "%s", path); free_buf: @@ -301,12 +306,16 @@ static int format_corename(struct core_name *cn, struct coredump_params *cprm, utsname()->nodename); up_read(&uts_sem); break; - /* executable */ + /* executable, could be changed by prctl PR_SET_NAME etc */ case 'e': err = cn_esc_printf(cn, "%s", current->comm); break; + /* file name of executable */ + case 'f': + err = cn_print_exe_file(cn, true); + break; case 'E': - err = cn_print_exe_file(cn); + err = cn_print_exe_file(cn, false); break; /* core limit size */ case 'c': diff --git a/fs/crypto/Kconfig b/fs/crypto/Kconfig index 8046d7c7a3e9..a5f5c30368a2 100644 --- a/fs/crypto/Kconfig +++ b/fs/crypto/Kconfig @@ -4,6 +4,7 @@ config FS_ENCRYPTION select CRYPTO select CRYPTO_HASH select CRYPTO_SKCIPHER + select CRYPTO_LIB_SHA256 select KEYS help Enable encryption of files and directories. This @@ -21,6 +22,11 @@ config FS_ENCRYPTION_ALGS select CRYPTO_CTS select CRYPTO_ECB select CRYPTO_HMAC - select CRYPTO_SHA256 select CRYPTO_SHA512 select CRYPTO_XTS + +config FS_ENCRYPTION_INLINE_CRYPT + bool "Enable fscrypt to use inline crypto" + depends on FS_ENCRYPTION && BLK_INLINE_ENCRYPTION + help + Enable fscrypt to use inline encryption hardware if available. diff --git a/fs/crypto/Makefile b/fs/crypto/Makefile index 232e2bb5a337..652c7180ec6d 100644 --- a/fs/crypto/Makefile +++ b/fs/crypto/Makefile @@ -11,3 +11,4 @@ fscrypto-y := crypto.o \ policy.o fscrypto-$(CONFIG_BLOCK) += bio.o +fscrypto-$(CONFIG_FS_ENCRYPTION_INLINE_CRYPT) += inline_crypt.o diff --git a/fs/crypto/bio.c b/fs/crypto/bio.c index 4fa18fff9c4e..b048a0e38516 100644 --- a/fs/crypto/bio.c +++ b/fs/crypto/bio.c @@ -41,6 +41,53 @@ void fscrypt_decrypt_bio(struct bio *bio) } EXPORT_SYMBOL(fscrypt_decrypt_bio); +static int fscrypt_zeroout_range_inline_crypt(const struct inode *inode, + pgoff_t lblk, sector_t pblk, + unsigned int len) +{ + const unsigned int blockbits = inode->i_blkbits; + const unsigned int blocks_per_page = 1 << (PAGE_SHIFT - blockbits); + struct bio *bio; + int ret, err = 0; + int num_pages = 0; + + /* This always succeeds since __GFP_DIRECT_RECLAIM is set. */ + bio = bio_alloc(GFP_NOFS, BIO_MAX_PAGES); + + while (len) { + unsigned int blocks_this_page = min(len, blocks_per_page); + unsigned int bytes_this_page = blocks_this_page << blockbits; + + if (num_pages == 0) { + fscrypt_set_bio_crypt_ctx(bio, inode, lblk, GFP_NOFS); + bio_set_dev(bio, inode->i_sb->s_bdev); + bio->bi_iter.bi_sector = + pblk << (blockbits - SECTOR_SHIFT); + bio_set_op_attrs(bio, REQ_OP_WRITE, 0); + } + ret = bio_add_page(bio, ZERO_PAGE(0), bytes_this_page, 0); + if (WARN_ON(ret != bytes_this_page)) { + err = -EIO; + goto out; + } + num_pages++; + len -= blocks_this_page; + lblk += blocks_this_page; + pblk += blocks_this_page; + if (num_pages == BIO_MAX_PAGES || !len || + !fscrypt_mergeable_bio(bio, inode, lblk)) { + err = submit_bio_wait(bio); + if (err) + goto out; + bio_reset(bio); + num_pages = 0; + } + } +out: + bio_put(bio); + return err; +} + /** * fscrypt_zeroout_range() - zero out a range of blocks in an encrypted file * @inode: the file's inode @@ -75,6 +122,10 @@ int fscrypt_zeroout_range(const struct inode *inode, pgoff_t lblk, if (len == 0) return 0; + if (fscrypt_inode_uses_inline_crypto(inode)) + return fscrypt_zeroout_range_inline_crypt(inode, lblk, pblk, + len); + BUILD_BUG_ON(ARRAY_SIZE(pages) > BIO_MAX_PAGES); nr_pages = min_t(unsigned int, ARRAY_SIZE(pages), (len + blocks_per_page - 1) >> blocks_per_page_bits); diff --git a/fs/crypto/crypto.c b/fs/crypto/crypto.c index ed015cb66c7c..9212325763b0 100644 --- a/fs/crypto/crypto.c +++ b/fs/crypto/crypto.c @@ -84,7 +84,7 @@ void fscrypt_generate_iv(union fscrypt_iv *iv, u64 lblk_num, WARN_ON_ONCE(lblk_num > U32_MAX); lblk_num = (u32)(ci->ci_hashed_ino + lblk_num); } else if (flags & FSCRYPT_POLICY_FLAG_DIRECT_KEY) { - memcpy(iv->nonce, ci->ci_nonce, FS_KEY_DERIVATION_NONCE_SIZE); + memcpy(iv->nonce, ci->ci_nonce, FSCRYPT_FILE_NONCE_SIZE); } iv->lblk_num = cpu_to_le64(lblk_num); } @@ -100,7 +100,7 @@ int fscrypt_crypt_block(const struct inode *inode, fscrypt_direction_t rw, DECLARE_CRYPTO_WAIT(wait); struct scatterlist dst, src; struct fscrypt_info *ci = inode->i_crypt_info; - struct crypto_skcipher *tfm = ci->ci_ctfm; + struct crypto_skcipher *tfm = ci->ci_enc_key.tfm; int res = 0; if (WARN_ON_ONCE(len <= 0)) diff --git a/fs/crypto/fname.c b/fs/crypto/fname.c index 83ca5f1e7934..011830f84d8d 100644 --- a/fs/crypto/fname.c +++ b/fs/crypto/fname.c @@ -61,30 +61,13 @@ struct fscrypt_nokey_name { */ #define FSCRYPT_NOKEY_NAME_MAX offsetofend(struct fscrypt_nokey_name, sha256) -static struct crypto_shash *sha256_hash_tfm; - -static int fscrypt_do_sha256(const u8 *data, unsigned int data_len, u8 *result) +static void fscrypt_do_sha256(const u8 *data, unsigned int data_len, u8 *result) { - struct crypto_shash *tfm = READ_ONCE(sha256_hash_tfm); - - if (unlikely(!tfm)) { - struct crypto_shash *prev_tfm; - - tfm = crypto_alloc_shash("sha256", 0, 0); - if (IS_ERR(tfm)) { - fscrypt_err(NULL, - "Error allocating SHA-256 transform: %ld", - PTR_ERR(tfm)); - return PTR_ERR(tfm); - } - prev_tfm = cmpxchg(&sha256_hash_tfm, NULL, tfm); - if (prev_tfm) { - crypto_free_shash(tfm); - tfm = prev_tfm; - } - } + struct sha256_state sctx; - return crypto_shash_tfm_digest(tfm, data, data_len, result); + sha256_init(&sctx); + sha256_update(&sctx, data, data_len); + sha256_final(&sctx, result); } static inline bool fscrypt_is_dot_dotdot(const struct qstr *str) @@ -115,7 +98,7 @@ int fscrypt_fname_encrypt(const struct inode *inode, const struct qstr *iname, struct skcipher_request *req = NULL; DECLARE_CRYPTO_WAIT(wait); const struct fscrypt_info *ci = inode->i_crypt_info; - struct crypto_skcipher *tfm = ci->ci_ctfm; + struct crypto_skcipher *tfm = ci->ci_enc_key.tfm; union fscrypt_iv iv; struct scatterlist sg; int res; @@ -171,7 +154,7 @@ static int fname_decrypt(const struct inode *inode, DECLARE_CRYPTO_WAIT(wait); struct scatterlist src_sg, dst_sg; const struct fscrypt_info *ci = inode->i_crypt_info; - struct crypto_skcipher *tfm = ci->ci_ctfm; + struct crypto_skcipher *tfm = ci->ci_enc_key.tfm; union fscrypt_iv iv; int res; @@ -349,7 +332,6 @@ int fscrypt_fname_disk_to_usr(const struct inode *inode, const struct qstr qname = FSTR_TO_QSTR(iname); struct fscrypt_nokey_name nokey_name; u32 size; /* size of the unencoded no-key name */ - int err; if (fscrypt_is_dot_dotdot(&qname)) { oname->name[0] = '.'; @@ -387,11 +369,9 @@ int fscrypt_fname_disk_to_usr(const struct inode *inode, } else { memcpy(nokey_name.bytes, iname->name, sizeof(nokey_name.bytes)); /* Compute strong hash of remaining part of name. */ - err = fscrypt_do_sha256(&iname->name[sizeof(nokey_name.bytes)], - iname->len - sizeof(nokey_name.bytes), - nokey_name.sha256); - if (err) - return err; + fscrypt_do_sha256(&iname->name[sizeof(nokey_name.bytes)], + iname->len - sizeof(nokey_name.bytes), + nokey_name.sha256); size = FSCRYPT_NOKEY_NAME_MAX; } oname->len = base64_encode((const u8 *)&nokey_name, size, oname->name); @@ -530,9 +510,8 @@ bool fscrypt_match_name(const struct fscrypt_name *fname, return false; if (memcmp(de_name, nokey_name->bytes, sizeof(nokey_name->bytes))) return false; - if (fscrypt_do_sha256(&de_name[sizeof(nokey_name->bytes)], - de_name_len - sizeof(nokey_name->bytes), sha256)) - return false; + fscrypt_do_sha256(&de_name[sizeof(nokey_name->bytes)], + de_name_len - sizeof(nokey_name->bytes), sha256); return !memcmp(sha256, nokey_name->sha256, sizeof(sha256)); } EXPORT_SYMBOL_GPL(fscrypt_match_name); diff --git a/fs/crypto/fscrypt_private.h b/fs/crypto/fscrypt_private.h index eb7fcd2b7fb8..8117a61b6f55 100644 --- a/fs/crypto/fscrypt_private.h +++ b/fs/crypto/fscrypt_private.h @@ -14,12 +14,13 @@ #include <linux/fscrypt.h> #include <linux/siphash.h> #include <crypto/hash.h> +#include <linux/blk-crypto.h> #define CONST_STRLEN(str) (sizeof(str) - 1) -#define FS_KEY_DERIVATION_NONCE_SIZE 16 +#define FSCRYPT_FILE_NONCE_SIZE 16 -#define FSCRYPT_MIN_KEY_SIZE 16 +#define FSCRYPT_MIN_KEY_SIZE 16 #define FSCRYPT_CONTEXT_V1 1 #define FSCRYPT_CONTEXT_V2 2 @@ -30,7 +31,7 @@ struct fscrypt_context_v1 { u8 filenames_encryption_mode; u8 flags; u8 master_key_descriptor[FSCRYPT_KEY_DESCRIPTOR_SIZE]; - u8 nonce[FS_KEY_DERIVATION_NONCE_SIZE]; + u8 nonce[FSCRYPT_FILE_NONCE_SIZE]; }; struct fscrypt_context_v2 { @@ -40,7 +41,7 @@ struct fscrypt_context_v2 { u8 flags; u8 __reserved[4]; u8 master_key_identifier[FSCRYPT_KEY_IDENTIFIER_SIZE]; - u8 nonce[FS_KEY_DERIVATION_NONCE_SIZE]; + u8 nonce[FSCRYPT_FILE_NONCE_SIZE]; }; /* @@ -166,6 +167,20 @@ struct fscrypt_symlink_data { char encrypted_path[1]; } __packed; +/** + * struct fscrypt_prepared_key - a key prepared for actual encryption/decryption + * @tfm: crypto API transform object + * @blk_key: key for blk-crypto + * + * Normally only one of the fields will be non-NULL. + */ +struct fscrypt_prepared_key { + struct crypto_skcipher *tfm; +#ifdef CONFIG_FS_ENCRYPTION_INLINE_CRYPT + struct fscrypt_blk_crypto_key *blk_key; +#endif +}; + /* * fscrypt_info - the "encryption key" for an inode * @@ -175,12 +190,20 @@ struct fscrypt_symlink_data { */ struct fscrypt_info { - /* The actual crypto transform used for encryption and decryption */ - struct crypto_skcipher *ci_ctfm; + /* The key in a form prepared for actual encryption/decryption */ + struct fscrypt_prepared_key ci_enc_key; - /* True if the key should be freed when this fscrypt_info is freed */ + /* True if ci_enc_key should be freed when this fscrypt_info is freed */ bool ci_owns_key; +#ifdef CONFIG_FS_ENCRYPTION_INLINE_CRYPT + /* + * True if this inode will use inline encryption (blk-crypto) instead of + * the traditional filesystem-layer encryption. + */ + bool ci_inlinecrypt; +#endif + /* * Encryption mode used for this inode. It corresponds to either the * contents or filenames encryption mode, depending on the inode type. @@ -205,7 +228,7 @@ struct fscrypt_info { /* * If non-NULL, then encryption is done using the master key directly - * and ci_ctfm will equal ci_direct_key->dk_ctfm. + * and ci_enc_key will equal ci_direct_key->dk_key. */ struct fscrypt_direct_key *ci_direct_key; @@ -221,7 +244,7 @@ struct fscrypt_info { union fscrypt_policy ci_policy; /* This inode's nonce, copied from the fscrypt_context */ - u8 ci_nonce[FS_KEY_DERIVATION_NONCE_SIZE]; + u8 ci_nonce[FSCRYPT_FILE_NONCE_SIZE]; /* Hashed inode number. Only set for IV_INO_LBLK_32 */ u32 ci_hashed_ino; @@ -257,9 +280,10 @@ union fscrypt_iv { __le64 lblk_num; /* per-file nonce; only set in DIRECT_KEY mode */ - u8 nonce[FS_KEY_DERIVATION_NONCE_SIZE]; + u8 nonce[FSCRYPT_FILE_NONCE_SIZE]; }; u8 raw[FSCRYPT_MAX_IV_SIZE]; + __le64 dun[FSCRYPT_MAX_IV_SIZE / sizeof(__le64)]; }; void fscrypt_generate_iv(union fscrypt_iv *iv, u64 lblk_num, @@ -288,13 +312,13 @@ int fscrypt_init_hkdf(struct fscrypt_hkdf *hkdf, const u8 *master_key, * outputs are unique and cryptographically isolated, i.e. knowledge of one * output doesn't reveal another. */ -#define HKDF_CONTEXT_KEY_IDENTIFIER 1 -#define HKDF_CONTEXT_PER_FILE_ENC_KEY 2 -#define HKDF_CONTEXT_DIRECT_KEY 3 -#define HKDF_CONTEXT_IV_INO_LBLK_64_KEY 4 -#define HKDF_CONTEXT_DIRHASH_KEY 5 -#define HKDF_CONTEXT_IV_INO_LBLK_32_KEY 6 -#define HKDF_CONTEXT_INODE_HASH_KEY 7 +#define HKDF_CONTEXT_KEY_IDENTIFIER 1 /* info=<empty> */ +#define HKDF_CONTEXT_PER_FILE_ENC_KEY 2 /* info=file_nonce */ +#define HKDF_CONTEXT_DIRECT_KEY 3 /* info=mode_num */ +#define HKDF_CONTEXT_IV_INO_LBLK_64_KEY 4 /* info=mode_num||fs_uuid */ +#define HKDF_CONTEXT_DIRHASH_KEY 5 /* info=file_nonce */ +#define HKDF_CONTEXT_IV_INO_LBLK_32_KEY 6 /* info=mode_num||fs_uuid */ +#define HKDF_CONTEXT_INODE_HASH_KEY 7 /* info=<empty> */ int fscrypt_hkdf_expand(const struct fscrypt_hkdf *hkdf, u8 context, const u8 *info, unsigned int infolen, @@ -302,6 +326,78 @@ int fscrypt_hkdf_expand(const struct fscrypt_hkdf *hkdf, u8 context, void fscrypt_destroy_hkdf(struct fscrypt_hkdf *hkdf); +/* inline_crypt.c */ +#ifdef CONFIG_FS_ENCRYPTION_INLINE_CRYPT +int fscrypt_select_encryption_impl(struct fscrypt_info *ci); + +static inline bool +fscrypt_using_inline_encryption(const struct fscrypt_info *ci) +{ + return ci->ci_inlinecrypt; +} + +int fscrypt_prepare_inline_crypt_key(struct fscrypt_prepared_key *prep_key, + const u8 *raw_key, + const struct fscrypt_info *ci); + +void fscrypt_destroy_inline_crypt_key(struct fscrypt_prepared_key *prep_key); + +/* + * Check whether the crypto transform or blk-crypto key has been allocated in + * @prep_key, depending on which encryption implementation the file will use. + */ +static inline bool +fscrypt_is_key_prepared(struct fscrypt_prepared_key *prep_key, + const struct fscrypt_info *ci) +{ + /* + * The two smp_load_acquire()'s here pair with the smp_store_release()'s + * in fscrypt_prepare_inline_crypt_key() and fscrypt_prepare_key(). + * I.e., in some cases (namely, if this prep_key is a per-mode + * encryption key) another task can publish blk_key or tfm concurrently, + * executing a RELEASE barrier. We need to use smp_load_acquire() here + * to safely ACQUIRE the memory the other task published. + */ + if (fscrypt_using_inline_encryption(ci)) + return smp_load_acquire(&prep_key->blk_key) != NULL; + return smp_load_acquire(&prep_key->tfm) != NULL; +} + +#else /* CONFIG_FS_ENCRYPTION_INLINE_CRYPT */ + +static inline int fscrypt_select_encryption_impl(struct fscrypt_info *ci) +{ + return 0; +} + +static inline bool +fscrypt_using_inline_encryption(const struct fscrypt_info *ci) +{ + return false; +} + +static inline int +fscrypt_prepare_inline_crypt_key(struct fscrypt_prepared_key *prep_key, + const u8 *raw_key, + const struct fscrypt_info *ci) +{ + WARN_ON(1); + return -EOPNOTSUPP; +} + +static inline void +fscrypt_destroy_inline_crypt_key(struct fscrypt_prepared_key *prep_key) +{ +} + +static inline bool +fscrypt_is_key_prepared(struct fscrypt_prepared_key *prep_key, + const struct fscrypt_info *ci) +{ + return smp_load_acquire(&prep_key->tfm) != NULL; +} +#endif /* !CONFIG_FS_ENCRYPTION_INLINE_CRYPT */ + /* keyring.c */ /* @@ -395,9 +491,9 @@ struct fscrypt_master_key { * Per-mode encryption keys for the various types of encryption policies * that use them. Allocated and derived on-demand. */ - struct crypto_skcipher *mk_direct_keys[__FSCRYPT_MODE_MAX + 1]; - struct crypto_skcipher *mk_iv_ino_lblk_64_keys[__FSCRYPT_MODE_MAX + 1]; - struct crypto_skcipher *mk_iv_ino_lblk_32_keys[__FSCRYPT_MODE_MAX + 1]; + struct fscrypt_prepared_key mk_direct_keys[__FSCRYPT_MODE_MAX + 1]; + struct fscrypt_prepared_key mk_iv_ino_lblk_64_keys[__FSCRYPT_MODE_MAX + 1]; + struct fscrypt_prepared_key mk_iv_ino_lblk_32_keys[__FSCRYPT_MODE_MAX + 1]; /* Hash key for inode numbers. Initialized only when needed. */ siphash_key_t mk_ino_hash_key; @@ -461,13 +557,15 @@ struct fscrypt_mode { int keysize; int ivsize; int logged_impl_name; + enum blk_crypto_mode_num blk_crypto_mode; }; extern struct fscrypt_mode fscrypt_modes[]; -struct crypto_skcipher *fscrypt_allocate_skcipher(struct fscrypt_mode *mode, - const u8 *raw_key, - const struct inode *inode); +int fscrypt_prepare_key(struct fscrypt_prepared_key *prep_key, + const u8 *raw_key, const struct fscrypt_info *ci); + +void fscrypt_destroy_prepared_key(struct fscrypt_prepared_key *prep_key); int fscrypt_set_per_file_enc_key(struct fscrypt_info *ci, const u8 *raw_key); diff --git a/fs/crypto/inline_crypt.c b/fs/crypto/inline_crypt.c new file mode 100644 index 000000000000..faa25541ccb6 --- /dev/null +++ b/fs/crypto/inline_crypt.c @@ -0,0 +1,368 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Inline encryption support for fscrypt + * + * Copyright 2019 Google LLC + */ + +/* + * With "inline encryption", the block layer handles the decryption/encryption + * as part of the bio, instead of the filesystem doing the crypto itself via + * crypto API. See Documentation/block/inline-encryption.rst. fscrypt still + * provides the key and IV to use. + */ + +#include <linux/blk-crypto.h> +#include <linux/blkdev.h> +#include <linux/buffer_head.h> +#include <linux/sched/mm.h> +#include <linux/slab.h> + +#include "fscrypt_private.h" + +struct fscrypt_blk_crypto_key { + struct blk_crypto_key base; + int num_devs; + struct request_queue *devs[]; +}; + +static int fscrypt_get_num_devices(struct super_block *sb) +{ + if (sb->s_cop->get_num_devices) + return sb->s_cop->get_num_devices(sb); + return 1; +} + +static void fscrypt_get_devices(struct super_block *sb, int num_devs, + struct request_queue **devs) +{ + if (num_devs == 1) + devs[0] = bdev_get_queue(sb->s_bdev); + else + sb->s_cop->get_devices(sb, devs); +} + +static unsigned int fscrypt_get_dun_bytes(const struct fscrypt_info *ci) +{ + struct super_block *sb = ci->ci_inode->i_sb; + unsigned int flags = fscrypt_policy_flags(&ci->ci_policy); + int ino_bits = 64, lblk_bits = 64; + + if (flags & FSCRYPT_POLICY_FLAG_DIRECT_KEY) + return offsetofend(union fscrypt_iv, nonce); + + if (flags & FSCRYPT_POLICY_FLAG_IV_INO_LBLK_64) + return sizeof(__le64); + + if (flags & FSCRYPT_POLICY_FLAG_IV_INO_LBLK_32) + return sizeof(__le32); + + /* Default case: IVs are just the file logical block number */ + if (sb->s_cop->get_ino_and_lblk_bits) + sb->s_cop->get_ino_and_lblk_bits(sb, &ino_bits, &lblk_bits); + return DIV_ROUND_UP(lblk_bits, 8); +} + +/* Enable inline encryption for this file if supported. */ +int fscrypt_select_encryption_impl(struct fscrypt_info *ci) +{ + const struct inode *inode = ci->ci_inode; + struct super_block *sb = inode->i_sb; + struct blk_crypto_config crypto_cfg; + int num_devs; + struct request_queue **devs; + int i; + + /* The file must need contents encryption, not filenames encryption */ + if (!fscrypt_needs_contents_encryption(inode)) + return 0; + + /* The crypto mode must have a blk-crypto counterpart */ + if (ci->ci_mode->blk_crypto_mode == BLK_ENCRYPTION_MODE_INVALID) + return 0; + + /* The filesystem must be mounted with -o inlinecrypt */ + if (!(sb->s_flags & SB_INLINECRYPT)) + return 0; + + /* + * When a page contains multiple logically contiguous filesystem blocks, + * some filesystem code only calls fscrypt_mergeable_bio() for the first + * block in the page. This is fine for most of fscrypt's IV generation + * strategies, where contiguous blocks imply contiguous IVs. But it + * doesn't work with IV_INO_LBLK_32. For now, simply exclude + * IV_INO_LBLK_32 with blocksize != PAGE_SIZE from inline encryption. + */ + if ((fscrypt_policy_flags(&ci->ci_policy) & + FSCRYPT_POLICY_FLAG_IV_INO_LBLK_32) && + sb->s_blocksize != PAGE_SIZE) + return 0; + + /* + * On all the filesystem's devices, blk-crypto must support the crypto + * configuration that the file would use. + */ + crypto_cfg.crypto_mode = ci->ci_mode->blk_crypto_mode; + crypto_cfg.data_unit_size = sb->s_blocksize; + crypto_cfg.dun_bytes = fscrypt_get_dun_bytes(ci); + num_devs = fscrypt_get_num_devices(sb); + devs = kmalloc_array(num_devs, sizeof(*devs), GFP_NOFS); + if (!devs) + return -ENOMEM; + fscrypt_get_devices(sb, num_devs, devs); + + for (i = 0; i < num_devs; i++) { + if (!blk_crypto_config_supported(devs[i], &crypto_cfg)) + goto out_free_devs; + } + + ci->ci_inlinecrypt = true; +out_free_devs: + kfree(devs); + + return 0; +} + +int fscrypt_prepare_inline_crypt_key(struct fscrypt_prepared_key *prep_key, + const u8 *raw_key, + const struct fscrypt_info *ci) +{ + const struct inode *inode = ci->ci_inode; + struct super_block *sb = inode->i_sb; + enum blk_crypto_mode_num crypto_mode = ci->ci_mode->blk_crypto_mode; + int num_devs = fscrypt_get_num_devices(sb); + int queue_refs = 0; + struct fscrypt_blk_crypto_key *blk_key; + int err; + int i; + unsigned int flags; + + blk_key = kzalloc(struct_size(blk_key, devs, num_devs), GFP_NOFS); + if (!blk_key) + return -ENOMEM; + + blk_key->num_devs = num_devs; + fscrypt_get_devices(sb, num_devs, blk_key->devs); + + err = blk_crypto_init_key(&blk_key->base, raw_key, crypto_mode, + fscrypt_get_dun_bytes(ci), sb->s_blocksize); + if (err) { + fscrypt_err(inode, "error %d initializing blk-crypto key", err); + goto fail; + } + + /* + * We have to start using blk-crypto on all the filesystem's devices. + * We also have to save all the request_queue's for later so that the + * key can be evicted from them. This is needed because some keys + * aren't destroyed until after the filesystem was already unmounted + * (namely, the per-mode keys in struct fscrypt_master_key). + */ + for (i = 0; i < num_devs; i++) { + if (!blk_get_queue(blk_key->devs[i])) { + fscrypt_err(inode, "couldn't get request_queue"); + err = -EAGAIN; + goto fail; + } + queue_refs++; + + flags = memalloc_nofs_save(); + err = blk_crypto_start_using_key(&blk_key->base, + blk_key->devs[i]); + memalloc_nofs_restore(flags); + if (err) { + fscrypt_err(inode, + "error %d starting to use blk-crypto", err); + goto fail; + } + } + /* + * Pairs with the smp_load_acquire() in fscrypt_is_key_prepared(). + * I.e., here we publish ->blk_key with a RELEASE barrier so that + * concurrent tasks can ACQUIRE it. Note that this concurrency is only + * possible for per-mode keys, not for per-file keys. + */ + smp_store_release(&prep_key->blk_key, blk_key); + return 0; + +fail: + for (i = 0; i < queue_refs; i++) + blk_put_queue(blk_key->devs[i]); + kfree_sensitive(blk_key); + return err; +} + +void fscrypt_destroy_inline_crypt_key(struct fscrypt_prepared_key *prep_key) +{ + struct fscrypt_blk_crypto_key *blk_key = prep_key->blk_key; + int i; + + if (blk_key) { + for (i = 0; i < blk_key->num_devs; i++) { + blk_crypto_evict_key(blk_key->devs[i], &blk_key->base); + blk_put_queue(blk_key->devs[i]); + } + kfree_sensitive(blk_key); + } +} + +bool __fscrypt_inode_uses_inline_crypto(const struct inode *inode) +{ + return inode->i_crypt_info->ci_inlinecrypt; +} +EXPORT_SYMBOL_GPL(__fscrypt_inode_uses_inline_crypto); + +static void fscrypt_generate_dun(const struct fscrypt_info *ci, u64 lblk_num, + u64 dun[BLK_CRYPTO_DUN_ARRAY_SIZE]) +{ + union fscrypt_iv iv; + int i; + + fscrypt_generate_iv(&iv, lblk_num, ci); + + BUILD_BUG_ON(FSCRYPT_MAX_IV_SIZE > BLK_CRYPTO_MAX_IV_SIZE); + memset(dun, 0, BLK_CRYPTO_MAX_IV_SIZE); + for (i = 0; i < ci->ci_mode->ivsize/sizeof(dun[0]); i++) + dun[i] = le64_to_cpu(iv.dun[i]); +} + +/** + * fscrypt_set_bio_crypt_ctx() - prepare a file contents bio for inline crypto + * @bio: a bio which will eventually be submitted to the file + * @inode: the file's inode + * @first_lblk: the first file logical block number in the I/O + * @gfp_mask: memory allocation flags - these must be a waiting mask so that + * bio_crypt_set_ctx can't fail. + * + * If the contents of the file should be encrypted (or decrypted) with inline + * encryption, then assign the appropriate encryption context to the bio. + * + * Normally the bio should be newly allocated (i.e. no pages added yet), as + * otherwise fscrypt_mergeable_bio() won't work as intended. + * + * The encryption context will be freed automatically when the bio is freed. + */ +void fscrypt_set_bio_crypt_ctx(struct bio *bio, const struct inode *inode, + u64 first_lblk, gfp_t gfp_mask) +{ + const struct fscrypt_info *ci; + u64 dun[BLK_CRYPTO_DUN_ARRAY_SIZE]; + + if (!fscrypt_inode_uses_inline_crypto(inode)) + return; + ci = inode->i_crypt_info; + + fscrypt_generate_dun(ci, first_lblk, dun); + bio_crypt_set_ctx(bio, &ci->ci_enc_key.blk_key->base, dun, gfp_mask); +} +EXPORT_SYMBOL_GPL(fscrypt_set_bio_crypt_ctx); + +/* Extract the inode and logical block number from a buffer_head. */ +static bool bh_get_inode_and_lblk_num(const struct buffer_head *bh, + const struct inode **inode_ret, + u64 *lblk_num_ret) +{ + struct page *page = bh->b_page; + const struct address_space *mapping; + const struct inode *inode; + + /* + * The ext4 journal (jbd2) can submit a buffer_head it directly created + * for a non-pagecache page. fscrypt doesn't care about these. + */ + mapping = page_mapping(page); + if (!mapping) + return false; + inode = mapping->host; + + *inode_ret = inode; + *lblk_num_ret = ((u64)page->index << (PAGE_SHIFT - inode->i_blkbits)) + + (bh_offset(bh) >> inode->i_blkbits); + return true; +} + +/** + * fscrypt_set_bio_crypt_ctx_bh() - prepare a file contents bio for inline + * crypto + * @bio: a bio which will eventually be submitted to the file + * @first_bh: the first buffer_head for which I/O will be submitted + * @gfp_mask: memory allocation flags + * + * Same as fscrypt_set_bio_crypt_ctx(), except this takes a buffer_head instead + * of an inode and block number directly. + */ +void fscrypt_set_bio_crypt_ctx_bh(struct bio *bio, + const struct buffer_head *first_bh, + gfp_t gfp_mask) +{ + const struct inode *inode; + u64 first_lblk; + + if (bh_get_inode_and_lblk_num(first_bh, &inode, &first_lblk)) + fscrypt_set_bio_crypt_ctx(bio, inode, first_lblk, gfp_mask); +} +EXPORT_SYMBOL_GPL(fscrypt_set_bio_crypt_ctx_bh); + +/** + * fscrypt_mergeable_bio() - test whether data can be added to a bio + * @bio: the bio being built up + * @inode: the inode for the next part of the I/O + * @next_lblk: the next file logical block number in the I/O + * + * When building a bio which may contain data which should undergo inline + * encryption (or decryption) via fscrypt, filesystems should call this function + * to ensure that the resulting bio contains only contiguous data unit numbers. + * This will return false if the next part of the I/O cannot be merged with the + * bio because either the encryption key would be different or the encryption + * data unit numbers would be discontiguous. + * + * fscrypt_set_bio_crypt_ctx() must have already been called on the bio. + * + * Return: true iff the I/O is mergeable + */ +bool fscrypt_mergeable_bio(struct bio *bio, const struct inode *inode, + u64 next_lblk) +{ + const struct bio_crypt_ctx *bc = bio->bi_crypt_context; + u64 next_dun[BLK_CRYPTO_DUN_ARRAY_SIZE]; + + if (!!bc != fscrypt_inode_uses_inline_crypto(inode)) + return false; + if (!bc) + return true; + + /* + * Comparing the key pointers is good enough, as all I/O for each key + * uses the same pointer. I.e., there's currently no need to support + * merging requests where the keys are the same but the pointers differ. + */ + if (bc->bc_key != &inode->i_crypt_info->ci_enc_key.blk_key->base) + return false; + + fscrypt_generate_dun(inode->i_crypt_info, next_lblk, next_dun); + return bio_crypt_dun_is_contiguous(bc, bio->bi_iter.bi_size, next_dun); +} +EXPORT_SYMBOL_GPL(fscrypt_mergeable_bio); + +/** + * fscrypt_mergeable_bio_bh() - test whether data can be added to a bio + * @bio: the bio being built up + * @next_bh: the next buffer_head for which I/O will be submitted + * + * Same as fscrypt_mergeable_bio(), except this takes a buffer_head instead of + * an inode and block number directly. + * + * Return: true iff the I/O is mergeable + */ +bool fscrypt_mergeable_bio_bh(struct bio *bio, + const struct buffer_head *next_bh) +{ + const struct inode *inode; + u64 next_lblk; + + if (!bh_get_inode_and_lblk_num(next_bh, &inode, &next_lblk)) + return !bio->bi_crypt_context; + + return fscrypt_mergeable_bio(bio, inode, next_lblk); +} +EXPORT_SYMBOL_GPL(fscrypt_mergeable_bio_bh); diff --git a/fs/crypto/keyring.c b/fs/crypto/keyring.c index e24eb48bfbe1..e74f239c4428 100644 --- a/fs/crypto/keyring.c +++ b/fs/crypto/keyring.c @@ -45,13 +45,13 @@ static void free_master_key(struct fscrypt_master_key *mk) wipe_master_key_secret(&mk->mk_secret); for (i = 0; i <= __FSCRYPT_MODE_MAX; i++) { - crypto_free_skcipher(mk->mk_direct_keys[i]); - crypto_free_skcipher(mk->mk_iv_ino_lblk_64_keys[i]); - crypto_free_skcipher(mk->mk_iv_ino_lblk_32_keys[i]); + fscrypt_destroy_prepared_key(&mk->mk_direct_keys[i]); + fscrypt_destroy_prepared_key(&mk->mk_iv_ino_lblk_64_keys[i]); + fscrypt_destroy_prepared_key(&mk->mk_iv_ino_lblk_32_keys[i]); } key_put(mk->mk_users); - kzfree(mk); + kfree_sensitive(mk); } static inline bool valid_key_spec(const struct fscrypt_key_specifier *spec) @@ -213,7 +213,11 @@ static int allocate_filesystem_keyring(struct super_block *sb) if (IS_ERR(keyring)) return PTR_ERR(keyring); - /* Pairs with READ_ONCE() in fscrypt_find_master_key() */ + /* + * Pairs with the smp_load_acquire() in fscrypt_find_master_key(). + * I.e., here we publish ->s_master_keys with a RELEASE barrier so that + * concurrent tasks can ACQUIRE it. + */ smp_store_release(&sb->s_master_keys, keyring); return 0; } @@ -234,8 +238,13 @@ struct key *fscrypt_find_master_key(struct super_block *sb, struct key *keyring; char description[FSCRYPT_MK_DESCRIPTION_SIZE]; - /* pairs with smp_store_release() in allocate_filesystem_keyring() */ - keyring = READ_ONCE(sb->s_master_keys); + /* + * Pairs with the smp_store_release() in allocate_filesystem_keyring(). + * I.e., another task can publish ->s_master_keys concurrently, + * executing a RELEASE barrier. We need to use smp_load_acquire() here + * to safely ACQUIRE the memory the other task published. + */ + keyring = smp_load_acquire(&sb->s_master_keys); if (keyring == NULL) return ERR_PTR(-ENOKEY); /* No keyring yet, so no keys yet. */ @@ -522,7 +531,7 @@ static int fscrypt_provisioning_key_preparse(struct key_preparsed_payload *prep) static void fscrypt_provisioning_key_free_preparse( struct key_preparsed_payload *prep) { - kzfree(prep->payload.data[0]); + kfree_sensitive(prep->payload.data[0]); } static void fscrypt_provisioning_key_describe(const struct key *key, @@ -539,7 +548,7 @@ static void fscrypt_provisioning_key_describe(const struct key *key, static void fscrypt_provisioning_key_destroy(struct key *key) { - kzfree(key->payload.data[0]); + kfree_sensitive(key->payload.data[0]); } static struct key_type key_type_fscrypt_provisioning = { diff --git a/fs/crypto/keysetup.c b/fs/crypto/keysetup.c index 1129adfa097d..fea6226afc2b 100644 --- a/fs/crypto/keysetup.c +++ b/fs/crypto/keysetup.c @@ -19,6 +19,7 @@ struct fscrypt_mode fscrypt_modes[] = { .cipher_str = "xts(aes)", .keysize = 64, .ivsize = 16, + .blk_crypto_mode = BLK_ENCRYPTION_MODE_AES_256_XTS, }, [FSCRYPT_MODE_AES_256_CTS] = { .friendly_name = "AES-256-CTS-CBC", @@ -31,6 +32,7 @@ struct fscrypt_mode fscrypt_modes[] = { .cipher_str = "essiv(cbc(aes),sha256)", .keysize = 16, .ivsize = 16, + .blk_crypto_mode = BLK_ENCRYPTION_MODE_AES_128_CBC_ESSIV, }, [FSCRYPT_MODE_AES_128_CTS] = { .friendly_name = "AES-128-CTS-CBC", @@ -43,6 +45,7 @@ struct fscrypt_mode fscrypt_modes[] = { .cipher_str = "adiantum(xchacha12,aes)", .keysize = 32, .ivsize = 32, + .blk_crypto_mode = BLK_ENCRYPTION_MODE_ADIANTUM, }, }; @@ -64,9 +67,9 @@ select_encryption_mode(const union fscrypt_policy *policy, } /* Create a symmetric cipher object for the given encryption mode and key */ -struct crypto_skcipher *fscrypt_allocate_skcipher(struct fscrypt_mode *mode, - const u8 *raw_key, - const struct inode *inode) +static struct crypto_skcipher * +fscrypt_allocate_skcipher(struct fscrypt_mode *mode, const u8 *raw_key, + const struct inode *inode) { struct crypto_skcipher *tfm; int err; @@ -109,30 +112,56 @@ err_free_tfm: return ERR_PTR(err); } -/* Given a per-file encryption key, set up the file's crypto transform object */ -int fscrypt_set_per_file_enc_key(struct fscrypt_info *ci, const u8 *raw_key) +/* + * Prepare the crypto transform object or blk-crypto key in @prep_key, given the + * raw key, encryption mode, and flag indicating which encryption implementation + * (fs-layer or blk-crypto) will be used. + */ +int fscrypt_prepare_key(struct fscrypt_prepared_key *prep_key, + const u8 *raw_key, const struct fscrypt_info *ci) { struct crypto_skcipher *tfm; + if (fscrypt_using_inline_encryption(ci)) + return fscrypt_prepare_inline_crypt_key(prep_key, raw_key, ci); + tfm = fscrypt_allocate_skcipher(ci->ci_mode, raw_key, ci->ci_inode); if (IS_ERR(tfm)) return PTR_ERR(tfm); + /* + * Pairs with the smp_load_acquire() in fscrypt_is_key_prepared(). + * I.e., here we publish ->tfm with a RELEASE barrier so that + * concurrent tasks can ACQUIRE it. Note that this concurrency is only + * possible for per-mode keys, not for per-file keys. + */ + smp_store_release(&prep_key->tfm, tfm); + return 0; +} - ci->ci_ctfm = tfm; +/* Destroy a crypto transform object and/or blk-crypto key. */ +void fscrypt_destroy_prepared_key(struct fscrypt_prepared_key *prep_key) +{ + crypto_free_skcipher(prep_key->tfm); + fscrypt_destroy_inline_crypt_key(prep_key); +} + +/* Given a per-file encryption key, set up the file's crypto transform object */ +int fscrypt_set_per_file_enc_key(struct fscrypt_info *ci, const u8 *raw_key) +{ ci->ci_owns_key = true; - return 0; + return fscrypt_prepare_key(&ci->ci_enc_key, raw_key, ci); } static int setup_per_mode_enc_key(struct fscrypt_info *ci, struct fscrypt_master_key *mk, - struct crypto_skcipher **tfms, + struct fscrypt_prepared_key *keys, u8 hkdf_context, bool include_fs_uuid) { const struct inode *inode = ci->ci_inode; const struct super_block *sb = inode->i_sb; struct fscrypt_mode *mode = ci->ci_mode; const u8 mode_num = mode - fscrypt_modes; - struct crypto_skcipher *tfm; + struct fscrypt_prepared_key *prep_key; u8 mode_key[FSCRYPT_MAX_KEY_SIZE]; u8 hkdf_info[sizeof(mode_num) + sizeof(sb->s_uuid)]; unsigned int hkdf_infolen = 0; @@ -141,16 +170,15 @@ static int setup_per_mode_enc_key(struct fscrypt_info *ci, if (WARN_ON(mode_num > __FSCRYPT_MODE_MAX)) return -EINVAL; - /* pairs with smp_store_release() below */ - tfm = READ_ONCE(tfms[mode_num]); - if (likely(tfm != NULL)) { - ci->ci_ctfm = tfm; + prep_key = &keys[mode_num]; + if (fscrypt_is_key_prepared(prep_key, ci)) { + ci->ci_enc_key = *prep_key; return 0; } mutex_lock(&fscrypt_mode_key_setup_mutex); - if (tfms[mode_num]) + if (fscrypt_is_key_prepared(prep_key, ci)) goto done_unlock; BUILD_BUG_ON(sizeof(mode_num) != 1); @@ -167,16 +195,12 @@ static int setup_per_mode_enc_key(struct fscrypt_info *ci, mode_key, mode->keysize); if (err) goto out_unlock; - tfm = fscrypt_allocate_skcipher(mode, mode_key, inode); + err = fscrypt_prepare_key(prep_key, mode_key, ci); memzero_explicit(mode_key, mode->keysize); - if (IS_ERR(tfm)) { - err = PTR_ERR(tfm); + if (err) goto out_unlock; - } - /* pairs with READ_ONCE() above */ - smp_store_release(&tfms[mode_num], tfm); done_unlock: - ci->ci_ctfm = tfm; + ci->ci_enc_key = *prep_key; err = 0; out_unlock: mutex_unlock(&fscrypt_mode_key_setup_mutex); @@ -189,7 +213,7 @@ int fscrypt_derive_dirhash_key(struct fscrypt_info *ci, int err; err = fscrypt_hkdf_expand(&mk->mk_secret.hkdf, HKDF_CONTEXT_DIRHASH_KEY, - ci->ci_nonce, FS_KEY_DERIVATION_NONCE_SIZE, + ci->ci_nonce, FSCRYPT_FILE_NONCE_SIZE, (u8 *)&ci->ci_dirhash_key, sizeof(ci->ci_dirhash_key)); if (err) @@ -270,8 +294,7 @@ static int fscrypt_setup_v2_file_key(struct fscrypt_info *ci, err = fscrypt_hkdf_expand(&mk->mk_secret.hkdf, HKDF_CONTEXT_PER_FILE_ENC_KEY, - ci->ci_nonce, - FS_KEY_DERIVATION_NONCE_SIZE, + ci->ci_nonce, FSCRYPT_FILE_NONCE_SIZE, derived_key, ci->ci_mode->keysize); if (err) return err; @@ -310,6 +333,10 @@ static int setup_file_encryption_key(struct fscrypt_info *ci, struct fscrypt_key_specifier mk_spec; int err; + err = fscrypt_select_encryption_impl(ci); + if (err) + return err; + switch (ci->ci_policy.version) { case FSCRYPT_POLICY_V1: mk_spec.type = FSCRYPT_KEY_SPEC_TYPE_DESCRIPTOR; @@ -402,7 +429,7 @@ static void put_crypt_info(struct fscrypt_info *ci) if (ci->ci_direct_key) fscrypt_put_direct_key(ci->ci_direct_key); else if (ci->ci_owns_key) - crypto_free_skcipher(ci->ci_ctfm); + fscrypt_destroy_prepared_key(&ci->ci_enc_key); key = ci->ci_master_key; if (key) { @@ -472,7 +499,7 @@ int fscrypt_get_encryption_info(struct inode *inode) } memcpy(crypt_info->ci_nonce, fscrypt_context_nonce(&ctx), - FS_KEY_DERIVATION_NONCE_SIZE); + FSCRYPT_FILE_NONCE_SIZE); if (!fscrypt_supported_policy(&crypt_info->ci_policy, inode)) { res = -EINVAL; @@ -491,7 +518,17 @@ int fscrypt_get_encryption_info(struct inode *inode) if (res) goto out; + /* + * Multiple tasks may race to set ->i_crypt_info, so use + * cmpxchg_release(). This pairs with the smp_load_acquire() in + * fscrypt_get_info(). I.e., here we publish ->i_crypt_info with a + * RELEASE barrier so that other tasks can ACQUIRE it. + */ if (cmpxchg_release(&inode->i_crypt_info, NULL, crypt_info) == NULL) { + /* + * We won the race and set ->i_crypt_info to our crypt_info. + * Now link it into the master key's inode list. + */ if (master_key) { struct fscrypt_master_key *mk = master_key->payload.data[0]; @@ -562,7 +599,7 @@ EXPORT_SYMBOL(fscrypt_free_inode); */ int fscrypt_drop_inode(struct inode *inode) { - const struct fscrypt_info *ci = READ_ONCE(inode->i_crypt_info); + const struct fscrypt_info *ci = fscrypt_get_info(inode); const struct fscrypt_master_key *mk; /* diff --git a/fs/crypto/keysetup_v1.c b/fs/crypto/keysetup_v1.c index 801b48c0cd7f..a3cb52572b05 100644 --- a/fs/crypto/keysetup_v1.c +++ b/fs/crypto/keysetup_v1.c @@ -45,7 +45,7 @@ static DEFINE_SPINLOCK(fscrypt_direct_keys_lock); * key is longer, then only the first 'derived_keysize' bytes are used. */ static int derive_key_aes(const u8 *master_key, - const u8 nonce[FS_KEY_DERIVATION_NONCE_SIZE], + const u8 nonce[FSCRYPT_FILE_NONCE_SIZE], u8 *derived_key, unsigned int derived_keysize) { int res = 0; @@ -68,7 +68,7 @@ static int derive_key_aes(const u8 *master_key, skcipher_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG | CRYPTO_TFM_REQ_MAY_SLEEP, crypto_req_done, &wait); - res = crypto_skcipher_setkey(tfm, nonce, FS_KEY_DERIVATION_NONCE_SIZE); + res = crypto_skcipher_setkey(tfm, nonce, FSCRYPT_FILE_NONCE_SIZE); if (res < 0) goto out; @@ -146,7 +146,7 @@ struct fscrypt_direct_key { struct hlist_node dk_node; refcount_t dk_refcount; const struct fscrypt_mode *dk_mode; - struct crypto_skcipher *dk_ctfm; + struct fscrypt_prepared_key dk_key; u8 dk_descriptor[FSCRYPT_KEY_DESCRIPTOR_SIZE]; u8 dk_raw[FSCRYPT_MAX_KEY_SIZE]; }; @@ -154,8 +154,8 @@ struct fscrypt_direct_key { static void free_direct_key(struct fscrypt_direct_key *dk) { if (dk) { - crypto_free_skcipher(dk->dk_ctfm); - kzfree(dk); + fscrypt_destroy_prepared_key(&dk->dk_key); + kfree_sensitive(dk); } } @@ -199,6 +199,8 @@ find_or_insert_direct_key(struct fscrypt_direct_key *to_insert, continue; if (ci->ci_mode != dk->dk_mode) continue; + if (!fscrypt_is_key_prepared(&dk->dk_key, ci)) + continue; if (crypto_memneq(raw_key, dk->dk_raw, ci->ci_mode->keysize)) continue; /* using existing tfm with same (descriptor, mode, raw_key) */ @@ -231,13 +233,9 @@ fscrypt_get_direct_key(const struct fscrypt_info *ci, const u8 *raw_key) return ERR_PTR(-ENOMEM); refcount_set(&dk->dk_refcount, 1); dk->dk_mode = ci->ci_mode; - dk->dk_ctfm = fscrypt_allocate_skcipher(ci->ci_mode, raw_key, - ci->ci_inode); - if (IS_ERR(dk->dk_ctfm)) { - err = PTR_ERR(dk->dk_ctfm); - dk->dk_ctfm = NULL; + err = fscrypt_prepare_key(&dk->dk_key, raw_key, ci); + if (err) goto err_free_dk; - } memcpy(dk->dk_descriptor, ci->ci_policy.v1.master_key_descriptor, FSCRYPT_KEY_DESCRIPTOR_SIZE); memcpy(dk->dk_raw, raw_key, ci->ci_mode->keysize); @@ -259,7 +257,7 @@ static int setup_v1_file_key_direct(struct fscrypt_info *ci, if (IS_ERR(dk)) return PTR_ERR(dk); ci->ci_direct_key = dk; - ci->ci_ctfm = dk->dk_ctfm; + ci->ci_enc_key = dk->dk_key; return 0; } @@ -285,7 +283,7 @@ static int setup_v1_file_key_derived(struct fscrypt_info *ci, err = fscrypt_set_per_file_enc_key(ci, derived_key); out: - kzfree(derived_key); + kfree_sensitive(derived_key); return err; } diff --git a/fs/crypto/policy.c b/fs/crypto/policy.c index d23ff162c78b..2d73fd39ad96 100644 --- a/fs/crypto/policy.c +++ b/fs/crypto/policy.c @@ -78,6 +78,20 @@ static bool supported_iv_ino_lblk_policy(const struct fscrypt_policy_v2 *policy, int ino_bits = 64, lblk_bits = 64; /* + * IV_INO_LBLK_* exist only because of hardware limitations, and + * currently the only known use case for them involves AES-256-XTS. + * That's also all we test currently. For these reasons, for now only + * allow AES-256-XTS here. This can be relaxed later if a use case for + * IV_INO_LBLK_* with other encryption modes arises. + */ + if (policy->contents_encryption_mode != FSCRYPT_MODE_AES_256_XTS) { + fscrypt_warn(inode, + "Can't use %s policy with contents mode other than AES-256-XTS", + type); + return false; + } + + /* * It's unsafe to include inode numbers in the IVs if the filesystem can * potentially renumber inodes, e.g. via filesystem shrinking. */ @@ -338,7 +352,7 @@ static int fscrypt_get_policy(struct inode *inode, union fscrypt_policy *policy) union fscrypt_context ctx; int ret; - ci = READ_ONCE(inode->i_crypt_info); + ci = fscrypt_get_info(inode); if (ci) { /* key available, use the cached policy */ *policy = ci->ci_policy; @@ -529,7 +543,7 @@ int fscrypt_ioctl_get_nonce(struct file *filp, void __user *arg) if (!fscrypt_context_is_valid(&ctx, ret)) return -EINVAL; if (copy_to_user(arg, fscrypt_context_nonce(&ctx), - FS_KEY_DERIVATION_NONCE_SIZE)) + FSCRYPT_FILE_NONCE_SIZE)) return -EFAULT; return 0; } @@ -627,7 +641,7 @@ int fscrypt_inherit_context(struct inode *parent, struct inode *child, if (res < 0) return res; - ci = READ_ONCE(parent->i_crypt_info); + ci = fscrypt_get_info(parent); if (ci == NULL) return -ENOKEY; @@ -488,7 +488,7 @@ retry: if (dax_is_conflict(entry)) goto fallback; if (!xa_is_value(entry)) { - xas_set_err(xas, EIO); + xas_set_err(xas, -EIO); goto out_unlock; } @@ -680,21 +680,20 @@ int dax_invalidate_mapping_entry_sync(struct address_space *mapping, return __dax_invalidate_entry(mapping, index, false); } -static int copy_user_dax(struct block_device *bdev, struct dax_device *dax_dev, - sector_t sector, size_t size, struct page *to, - unsigned long vaddr) +static int copy_cow_page_dax(struct block_device *bdev, struct dax_device *dax_dev, + sector_t sector, struct page *to, unsigned long vaddr) { void *vto, *kaddr; pgoff_t pgoff; long rc; int id; - rc = bdev_dax_pgoff(bdev, sector, size, &pgoff); + rc = bdev_dax_pgoff(bdev, sector, PAGE_SIZE, &pgoff); if (rc) return rc; id = dax_read_lock(); - rc = dax_direct_access(dax_dev, pgoff, PHYS_PFN(size), &kaddr, NULL); + rc = dax_direct_access(dax_dev, pgoff, PHYS_PFN(PAGE_SIZE), &kaddr, NULL); if (rc < 0) { dax_read_unlock(id); return rc; @@ -1305,8 +1304,8 @@ static vm_fault_t dax_iomap_pte_fault(struct vm_fault *vmf, pfn_t *pfnp, clear_user_highpage(vmf->cow_page, vaddr); break; case IOMAP_MAPPED: - error = copy_user_dax(iomap.bdev, iomap.dax_dev, - sector, PAGE_SIZE, vmf->cow_page, vaddr); + error = copy_cow_page_dax(iomap.bdev, iomap.dax_dev, + sector, vmf->cow_page, vaddr); break; default: WARN_ON_ONCE(1); @@ -1368,7 +1367,7 @@ static vm_fault_t dax_iomap_pte_fault(struct vm_fault *vmf, pfn_t *pfnp, ret = dax_load_hole(&xas, mapping, &entry, vmf); goto finish_iomap; } - /*FALLTHRU*/ + fallthrough; default: WARN_ON_ONCE(1); error = -EIO; diff --git a/fs/dcache.c b/fs/dcache.c index 361ea7ab30ea..ea0485861d93 100644 --- a/fs/dcache.c +++ b/fs/dcache.c @@ -1746,7 +1746,7 @@ static struct dentry *__d_alloc(struct super_block *sb, const struct qstr *name) dentry->d_lockref.count = 1; dentry->d_flags = 0; spin_lock_init(&dentry->d_lock); - seqcount_init(&dentry->d_seq); + seqcount_spinlock_init(&dentry->d_seq, &dentry->d_lock); dentry->d_inode = NULL; dentry->d_parent = dentry; dentry->d_sb = sb; diff --git a/fs/debugfs/file.c b/fs/debugfs/file.c index ae49a55bda00..a768a09430c3 100644 --- a/fs/debugfs/file.c +++ b/fs/debugfs/file.c @@ -177,7 +177,7 @@ static int open_proxy_open(struct inode *inode, struct file *filp) goto out; if (!fops_get(real_fops)) { -#ifdef MODULE +#ifdef CONFIG_MODULES if (real_fops->owner && real_fops->owner->state == MODULE_STATE_GOING) goto out; @@ -273,7 +273,7 @@ static int full_proxy_release(struct inode *inode, struct file *filp) r = real_fops->release(inode, filp); replace_fops(filp, d_inode(dentry)->i_fop); - kfree((void *)proxy_fops); + kfree(proxy_fops); fops_put(real_fops); return r; } @@ -312,7 +312,7 @@ static int full_proxy_open(struct inode *inode, struct file *filp) goto out; if (!fops_get(real_fops)) { -#ifdef MODULE +#ifdef CONFIG_MODULES if (real_fops->owner && real_fops->owner->state == MODULE_STATE_GOING) goto out; @@ -918,11 +918,6 @@ struct dentry *debugfs_create_blob(const char *name, umode_t mode, } EXPORT_SYMBOL_GPL(debugfs_create_blob); -struct array_data { - void *array; - u32 elements; -}; - static size_t u32_format_array(char *buf, size_t bufsize, u32 *array, int array_size) { @@ -943,8 +938,8 @@ static size_t u32_format_array(char *buf, size_t bufsize, static int u32_array_open(struct inode *inode, struct file *file) { - struct array_data *data = inode->i_private; - int size, elements = data->elements; + struct debugfs_u32_array *data = inode->i_private; + int size, elements = data->n_elements; char *buf; /* @@ -959,7 +954,7 @@ static int u32_array_open(struct inode *inode, struct file *file) buf[size] = 0; file->private_data = buf; - u32_format_array(buf, size, data->array, data->elements); + u32_format_array(buf, size, data->array, data->n_elements); return nonseekable_open(inode, file); } @@ -996,8 +991,7 @@ static const struct file_operations u32_array_fops = { * @parent: a pointer to the parent dentry for this file. This should be a * directory dentry if set. If this parameter is %NULL, then the * file will be created in the root of the debugfs filesystem. - * @array: u32 array that provides data. - * @elements: total number of elements in the array. + * @array: wrapper struct containing data pointer and size of the array. * * This function creates a file in debugfs with the given name that exports * @array as data. If the @mode variable is so set it can be read from. @@ -1005,17 +999,10 @@ static const struct file_operations u32_array_fops = { * Once array is created its size can not be changed. */ void debugfs_create_u32_array(const char *name, umode_t mode, - struct dentry *parent, u32 *array, u32 elements) + struct dentry *parent, + struct debugfs_u32_array *array) { - struct array_data *data = kmalloc(sizeof(*data), GFP_KERNEL); - - if (data == NULL) - return; - - data->array = array; - data->elements = elements; - - debugfs_create_file_unsafe(name, mode, parent, data, &u32_array_fops); + debugfs_create_file_unsafe(name, mode, parent, array, &u32_array_fops); } EXPORT_SYMBOL_GPL(debugfs_create_u32_array); diff --git a/fs/debugfs/inode.c b/fs/debugfs/inode.c index b7f2e971ecbc..2fcf66473436 100644 --- a/fs/debugfs/inode.c +++ b/fs/debugfs/inode.c @@ -35,6 +35,7 @@ static struct vfsmount *debugfs_mount; static int debugfs_mount_count; static bool debugfs_registered; +static unsigned int debugfs_allow = DEFAULT_DEBUGFS_ALLOW_BITS; /* * Don't allow access attributes to be changed whilst the kernel is locked down @@ -266,6 +267,9 @@ static struct dentry *debug_mount(struct file_system_type *fs_type, int flags, const char *dev_name, void *data) { + if (!(debugfs_allow & DEBUGFS_ALLOW_API)) + return ERR_PTR(-EPERM); + return mount_single(fs_type, flags, data, debug_fill_super); } @@ -311,6 +315,9 @@ static struct dentry *start_creating(const char *name, struct dentry *parent) struct dentry *dentry; int error; + if (!(debugfs_allow & DEBUGFS_ALLOW_API)) + return ERR_PTR(-EPERM); + pr_debug("creating file '%s'\n", name); if (IS_ERR(parent)) @@ -385,6 +392,11 @@ static struct dentry *__debugfs_create_file(const char *name, umode_t mode, if (IS_ERR(dentry)) return dentry; + if (!(debugfs_allow & DEBUGFS_ALLOW_API)) { + failed_creating(dentry); + return ERR_PTR(-EPERM); + } + inode = debugfs_get_inode(dentry->d_sb); if (unlikely(!inode)) { pr_err("out of free dentries, can not create file '%s'\n", @@ -541,6 +553,11 @@ struct dentry *debugfs_create_dir(const char *name, struct dentry *parent) if (IS_ERR(dentry)) return dentry; + if (!(debugfs_allow & DEBUGFS_ALLOW_API)) { + failed_creating(dentry); + return ERR_PTR(-EPERM); + } + inode = debugfs_get_inode(dentry->d_sb); if (unlikely(!inode)) { pr_err("out of free dentries, can not create directory '%s'\n", @@ -583,6 +600,11 @@ struct dentry *debugfs_create_automount(const char *name, if (IS_ERR(dentry)) return dentry; + if (!(debugfs_allow & DEBUGFS_ALLOW_API)) { + failed_creating(dentry); + return ERR_PTR(-EPERM); + } + inode = debugfs_get_inode(dentry->d_sb); if (unlikely(!inode)) { pr_err("out of free dentries, can not create automount '%s'\n", @@ -786,10 +808,27 @@ bool debugfs_initialized(void) } EXPORT_SYMBOL_GPL(debugfs_initialized); +static int __init debugfs_kernel(char *str) +{ + if (str) { + if (!strcmp(str, "on")) + debugfs_allow = DEBUGFS_ALLOW_API | DEBUGFS_ALLOW_MOUNT; + else if (!strcmp(str, "no-mount")) + debugfs_allow = DEBUGFS_ALLOW_API; + else if (!strcmp(str, "off")) + debugfs_allow = 0; + } + + return 0; +} +early_param("debugfs", debugfs_kernel); static int __init debugfs_init(void) { int retval; + if (!(debugfs_allow & DEBUGFS_ALLOW_MOUNT)) + return -EPERM; + retval = sysfs_create_mount_point(kernel_kobj, "debug"); if (retval) return retval; diff --git a/fs/debugfs/internal.h b/fs/debugfs/internal.h index 034e6973cead..92af8ae31313 100644 --- a/fs/debugfs/internal.h +++ b/fs/debugfs/internal.h @@ -29,4 +29,18 @@ struct debugfs_fsdata { */ #define DEBUGFS_FSDATA_IS_REAL_FOPS_BIT BIT(0) +/* Access BITS */ +#define DEBUGFS_ALLOW_API BIT(0) +#define DEBUGFS_ALLOW_MOUNT BIT(1) + +#ifdef CONFIG_DEBUG_FS_ALLOW_ALL +#define DEFAULT_DEBUGFS_ALLOW_BITS (DEBUGFS_ALLOW_MOUNT | DEBUGFS_ALLOW_API) +#endif +#ifdef CONFIG_DEBUG_FS_DISALLOW_MOUNT +#define DEFAULT_DEBUGFS_ALLOW_BITS (DEBUGFS_ALLOW_API) +#endif +#ifdef CONFIG_DEBUG_FS_ALLOW_NONE +#define DEFAULT_DEBUGFS_ALLOW_BITS (0) +#endif + #endif /* _DEBUGFS_INTERNAL_H_ */ diff --git a/fs/direct-io.c b/fs/direct-io.c index 6d5370eac2a8..183299892465 100644 --- a/fs/direct-io.c +++ b/fs/direct-io.c @@ -1387,8 +1387,8 @@ ssize_t __blockdev_direct_IO(struct kiocb *iocb, struct inode *inode, * Attempt to prefetch the pieces we likely need later. */ prefetch(&bdev->bd_disk->part_tbl); - prefetch(bdev->bd_queue); - prefetch((char *)bdev->bd_queue + SMP_CACHE_BYTES); + prefetch(bdev->bd_disk->queue); + prefetch((char *)bdev->bd_disk->queue + SMP_CACHE_BYTES); return do_blockdev_direct_IO(iocb, inode, bdev, iter, get_block, end_io, submit_io, flags); diff --git a/fs/dlm/config.c b/fs/dlm/config.c index 3b21082e1b55..47f0b98b707f 100644 --- a/fs/dlm/config.c +++ b/fs/dlm/config.c @@ -73,6 +73,7 @@ struct dlm_cluster { unsigned int cl_log_debug; unsigned int cl_log_info; unsigned int cl_protocol; + unsigned int cl_mark; unsigned int cl_timewarn_cs; unsigned int cl_waitwarn_us; unsigned int cl_new_rsb_count; @@ -96,6 +97,7 @@ enum { CLUSTER_ATTR_LOG_DEBUG, CLUSTER_ATTR_LOG_INFO, CLUSTER_ATTR_PROTOCOL, + CLUSTER_ATTR_MARK, CLUSTER_ATTR_TIMEWARN_CS, CLUSTER_ATTR_WAITWARN_US, CLUSTER_ATTR_NEW_RSB_COUNT, @@ -168,6 +170,7 @@ CLUSTER_ATTR(scan_secs, 1); CLUSTER_ATTR(log_debug, 0); CLUSTER_ATTR(log_info, 0); CLUSTER_ATTR(protocol, 0); +CLUSTER_ATTR(mark, 0); CLUSTER_ATTR(timewarn_cs, 1); CLUSTER_ATTR(waitwarn_us, 0); CLUSTER_ATTR(new_rsb_count, 0); @@ -183,6 +186,7 @@ static struct configfs_attribute *cluster_attrs[] = { [CLUSTER_ATTR_LOG_DEBUG] = &cluster_attr_log_debug, [CLUSTER_ATTR_LOG_INFO] = &cluster_attr_log_info, [CLUSTER_ATTR_PROTOCOL] = &cluster_attr_protocol, + [CLUSTER_ATTR_MARK] = &cluster_attr_mark, [CLUSTER_ATTR_TIMEWARN_CS] = &cluster_attr_timewarn_cs, [CLUSTER_ATTR_WAITWARN_US] = &cluster_attr_waitwarn_us, [CLUSTER_ATTR_NEW_RSB_COUNT] = &cluster_attr_new_rsb_count, @@ -196,6 +200,7 @@ enum { COMM_ATTR_LOCAL, COMM_ATTR_ADDR, COMM_ATTR_ADDR_LIST, + COMM_ATTR_MARK, }; enum { @@ -228,6 +233,7 @@ struct dlm_comm { int nodeid; int local; int addr_count; + unsigned int mark; struct sockaddr_storage *addr[DLM_MAX_ADDR_COUNT]; }; @@ -465,6 +471,7 @@ static struct config_item *make_comm(struct config_group *g, const char *name) cm->nodeid = -1; cm->local = 0; cm->addr_count = 0; + cm->mark = 0; return &cm->item; } @@ -660,8 +667,28 @@ static ssize_t comm_addr_list_show(struct config_item *item, char *buf) return 4096 - allowance; } +static ssize_t comm_mark_show(struct config_item *item, char *buf) +{ + return sprintf(buf, "%u\n", config_item_to_comm(item)->mark); +} + +static ssize_t comm_mark_store(struct config_item *item, const char *buf, + size_t len) +{ + unsigned int mark; + int rc; + + rc = kstrtouint(buf, 0, &mark); + if (rc) + return rc; + + config_item_to_comm(item)->mark = mark; + return len; +} + CONFIGFS_ATTR(comm_, nodeid); CONFIGFS_ATTR(comm_, local); +CONFIGFS_ATTR(comm_, mark); CONFIGFS_ATTR_WO(comm_, addr); CONFIGFS_ATTR_RO(comm_, addr_list); @@ -670,6 +697,7 @@ static struct configfs_attribute *comm_attrs[] = { [COMM_ATTR_LOCAL] = &comm_attr_local, [COMM_ATTR_ADDR] = &comm_attr_addr, [COMM_ATTR_ADDR_LIST] = &comm_attr_addr_list, + [COMM_ATTR_MARK] = &comm_attr_mark, NULL, }; @@ -829,6 +857,20 @@ int dlm_comm_seq(int nodeid, uint32_t *seq) return 0; } +int dlm_comm_mark(int nodeid, unsigned int *mark) +{ + struct dlm_comm *cm; + + cm = get_comm(nodeid); + if (!cm) + return -ENOENT; + + *mark = cm->mark; + put_comm(cm); + + return 0; +} + int dlm_our_nodeid(void) { return local_comm ? local_comm->nodeid : 0; @@ -855,6 +897,7 @@ int dlm_our_addr(struct sockaddr_storage *addr, int num) #define DEFAULT_LOG_DEBUG 0 #define DEFAULT_LOG_INFO 1 #define DEFAULT_PROTOCOL 0 +#define DEFAULT_MARK 0 #define DEFAULT_TIMEWARN_CS 500 /* 5 sec = 500 centiseconds */ #define DEFAULT_WAITWARN_US 0 #define DEFAULT_NEW_RSB_COUNT 128 @@ -871,6 +914,7 @@ struct dlm_config_info dlm_config = { .ci_log_debug = DEFAULT_LOG_DEBUG, .ci_log_info = DEFAULT_LOG_INFO, .ci_protocol = DEFAULT_PROTOCOL, + .ci_mark = DEFAULT_MARK, .ci_timewarn_cs = DEFAULT_TIMEWARN_CS, .ci_waitwarn_us = DEFAULT_WAITWARN_US, .ci_new_rsb_count = DEFAULT_NEW_RSB_COUNT, diff --git a/fs/dlm/config.h b/fs/dlm/config.h index 2b471aae4e61..f62996cad561 100644 --- a/fs/dlm/config.h +++ b/fs/dlm/config.h @@ -31,6 +31,7 @@ struct dlm_config_info { int ci_log_debug; int ci_log_info; int ci_protocol; + int ci_mark; int ci_timewarn_cs; int ci_waitwarn_us; int ci_new_rsb_count; @@ -45,6 +46,7 @@ void dlm_config_exit(void); int dlm_config_nodes(char *lsname, struct dlm_config_node **nodes_out, int *count_out); int dlm_comm_seq(int nodeid, uint32_t *seq); +int dlm_comm_mark(int nodeid, unsigned int *mark); int dlm_our_nodeid(void); int dlm_our_addr(struct sockaddr_storage *addr, int num); diff --git a/fs/dlm/lock.c b/fs/dlm/lock.c index 18d81599522f..002123efc6b0 100644 --- a/fs/dlm/lock.c +++ b/fs/dlm/lock.c @@ -5817,7 +5817,7 @@ int dlm_user_request(struct dlm_ls *ls, struct dlm_user_args *ua, break; case -EAGAIN: error = 0; - /* fall through */ + fallthrough; default: __put_lkb(ls, lkb); goto out; diff --git a/fs/dlm/lockspace.c b/fs/dlm/lockspace.c index e93670ecfae5..624617c12250 100644 --- a/fs/dlm/lockspace.c +++ b/fs/dlm/lockspace.c @@ -622,6 +622,9 @@ static int new_lockspace(const char *name, const char *cluster, wait_event(ls->ls_recover_lock_wait, test_bit(LSFL_RECOVER_LOCK, &ls->ls_flags)); + /* let kobject handle freeing of ls if there's an error */ + do_unreg = 1; + ls->ls_kobj.kset = dlm_kset; error = kobject_init_and_add(&ls->ls_kobj, &dlm_ktype, NULL, "%s", ls->ls_name); @@ -629,9 +632,6 @@ static int new_lockspace(const char *name, const char *cluster, goto out_recoverd; kobject_uevent(&ls->ls_kobj, KOBJ_ADD); - /* let kobject handle freeing of ls if there's an error */ - do_unreg = 1; - /* This uevent triggers dlm_controld in userspace to add us to the group of nodes that are members of this lockspace (managed by the cluster infrastructure.) Once it's done that, it tells us who the diff --git a/fs/dlm/lowcomms.c b/fs/dlm/lowcomms.c index 3543a8fec907..5050fe05769b 100644 --- a/fs/dlm/lowcomms.c +++ b/fs/dlm/lowcomms.c @@ -63,6 +63,7 @@ /* Number of messages to send before rescheduling */ #define MAX_SEND_MSG_COUNT 25 +#define DLM_SHUTDOWN_WAIT_TIMEOUT msecs_to_jiffies(10000) struct cbuf { unsigned int base; @@ -110,10 +111,12 @@ struct connection { #define CF_CLOSE 6 #define CF_APP_LIMITED 7 #define CF_CLOSING 8 +#define CF_SHUTDOWN 9 struct list_head writequeue; /* List of outgoing writequeue_entries */ spinlock_t writequeue_lock; int (*rx_action) (struct connection *); /* What to do when active */ void (*connect_action) (struct connection *); /* What to do to connect */ + void (*shutdown_action)(struct connection *con); /* What to do to shutdown */ struct page *rx_page; struct cbuf cb; int retries; @@ -122,6 +125,7 @@ struct connection { struct connection *othercon; struct work_struct rwork; /* Receive workqueue */ struct work_struct swork; /* Send workqueue */ + wait_queue_head_t shutdown_wait; /* wait for graceful shutdown */ }; #define sock2con(x) ((struct connection *)(x)->sk_user_data) @@ -218,6 +222,7 @@ static struct connection *__nodeid2con(int nodeid, gfp_t alloc) spin_lock_init(&con->writequeue_lock); INIT_WORK(&con->swork, process_send_sockets); INIT_WORK(&con->rwork, process_recv_sockets); + init_waitqueue_head(&con->shutdown_wait); /* Setup action pointers for child sockets */ if (con->nodeid) { @@ -619,6 +624,54 @@ static void close_connection(struct connection *con, bool and_other, clear_bit(CF_CLOSING, &con->flags); } +static void shutdown_connection(struct connection *con) +{ + int ret; + + if (cancel_work_sync(&con->swork)) { + log_print("canceled swork for node %d", con->nodeid); + clear_bit(CF_WRITE_PENDING, &con->flags); + } + + mutex_lock(&con->sock_mutex); + /* nothing to shutdown */ + if (!con->sock) { + mutex_unlock(&con->sock_mutex); + return; + } + + set_bit(CF_SHUTDOWN, &con->flags); + ret = kernel_sock_shutdown(con->sock, SHUT_WR); + mutex_unlock(&con->sock_mutex); + if (ret) { + log_print("Connection %p failed to shutdown: %d will force close", + con, ret); + goto force_close; + } else { + ret = wait_event_timeout(con->shutdown_wait, + !test_bit(CF_SHUTDOWN, &con->flags), + DLM_SHUTDOWN_WAIT_TIMEOUT); + if (ret == 0) { + log_print("Connection %p shutdown timed out, will force close", + con); + goto force_close; + } + } + + return; + +force_close: + clear_bit(CF_SHUTDOWN, &con->flags); + close_connection(con, false, true, true); +} + +static void dlm_tcp_shutdown(struct connection *con) +{ + if (con->othercon) + shutdown_connection(con->othercon); + shutdown_connection(con); +} + /* Data received from remote end */ static int receive_from_sock(struct connection *con) { @@ -685,14 +738,14 @@ static int receive_from_sock(struct connection *con) page_address(con->rx_page), con->cb.base, con->cb.len, PAGE_SIZE); - if (ret == -EBADMSG) { - log_print("lowcomms: addr=%p, base=%u, len=%u, read=%d", - page_address(con->rx_page), con->cb.base, + if (ret < 0) { + log_print("lowcomms err %d: addr=%p, base=%u, len=%u, read=%d", + ret, page_address(con->rx_page), con->cb.base, con->cb.len, r); + cbuf_eat(&con->cb, r); + } else { + cbuf_eat(&con->cb, ret); } - if (ret < 0) - goto out_close; - cbuf_eat(&con->cb, ret); if (cbuf_empty(&con->cb) && !call_again_soon) { __free_page(con->rx_page); @@ -713,13 +766,18 @@ out_resched: out_close: mutex_unlock(&con->sock_mutex); if (ret != -EAGAIN) { - close_connection(con, true, true, false); /* Reconnect when there is something to send */ + close_connection(con, false, true, false); + if (ret == 0) { + log_print("connection %p got EOF from %d", + con, con->nodeid); + /* handling for tcp shutdown */ + clear_bit(CF_SHUTDOWN, &con->flags); + wake_up(&con->shutdown_wait); + /* signal to breaking receive worker */ + ret = -1; + } } - /* Don't return success if we really got EOF */ - if (ret == 0) - ret = -EAGAIN; - return ret; } @@ -803,22 +861,18 @@ static int accept_from_sock(struct connection *con) spin_lock_init(&othercon->writequeue_lock); INIT_WORK(&othercon->swork, process_send_sockets); INIT_WORK(&othercon->rwork, process_recv_sockets); + init_waitqueue_head(&othercon->shutdown_wait); set_bit(CF_IS_OTHERCON, &othercon->flags); + } else { + /* close other sock con if we have something new */ + close_connection(othercon, false, true, false); } + mutex_lock_nested(&othercon->sock_mutex, 2); - if (!othercon->sock) { - newcon->othercon = othercon; - add_sock(newsock, othercon); - addcon = othercon; - mutex_unlock(&othercon->sock_mutex); - } - else { - printk("Extra connection from node %d attempted\n", nodeid); - result = -EAGAIN; - mutex_unlock(&othercon->sock_mutex); - mutex_unlock(&newcon->sock_mutex); - goto accept_err; - } + newcon->othercon = othercon; + add_sock(newsock, othercon); + addcon = othercon; + mutex_unlock(&othercon->sock_mutex); } else { newcon->rx_action = receive_from_sock; @@ -914,6 +968,7 @@ static void sctp_connect_to_sock(struct connection *con) int result; int addr_len; struct socket *sock; + unsigned int mark; if (con->nodeid == 0) { log_print("attempt to connect sock 0 foiled"); @@ -944,6 +999,13 @@ static void sctp_connect_to_sock(struct connection *con) if (result < 0) goto socket_err; + /* set skb mark */ + result = dlm_comm_mark(con->nodeid, &mark); + if (result < 0) + goto bind_err; + + sock_set_mark(sock->sk, mark); + con->rx_action = receive_from_sock; con->connect_action = sctp_connect_to_sock; add_sock(sock, con); @@ -1006,6 +1068,7 @@ static void tcp_connect_to_sock(struct connection *con) struct sockaddr_storage saddr, src_addr; int addr_len; struct socket *sock = NULL; + unsigned int mark; int result; if (con->nodeid == 0) { @@ -1027,6 +1090,13 @@ static void tcp_connect_to_sock(struct connection *con) if (result < 0) goto out_err; + /* set skb mark */ + result = dlm_comm_mark(con->nodeid, &mark); + if (result < 0) + goto out_err; + + sock_set_mark(sock->sk, mark); + memset(&saddr, 0, sizeof(saddr)); result = nodeid_to_addr(con->nodeid, &saddr, NULL, false); if (result < 0) { @@ -1036,6 +1106,7 @@ static void tcp_connect_to_sock(struct connection *con) con->rx_action = receive_from_sock; con->connect_action = tcp_connect_to_sock; + con->shutdown_action = dlm_tcp_shutdown; add_sock(sock, con); /* Bind to our cluster-known address connecting to avoid @@ -1111,6 +1182,8 @@ static struct socket *tcp_create_listen_sock(struct connection *con, goto create_out; } + sock_set_mark(sock->sk, dlm_config.ci_mark); + /* Turn off Nagle's algorithm */ tcp_sock_set_nodelay(sock->sk); @@ -1185,6 +1258,7 @@ static int sctp_listen_for_all(void) } sock_set_rcvbuf(sock->sk, NEEDED_RMEM); + sock_set_mark(sock->sk, dlm_config.ci_mark); sctp_sock_set_nodelay(sock->sk); write_lock_bh(&sock->sk->sk_callback_lock); @@ -1396,7 +1470,7 @@ out: send_error: mutex_unlock(&con->sock_mutex); - close_connection(con, true, false, true); + close_connection(con, false, false, true); /* Requeue the send work. When the work daemon runs again, it will try a new connection, then call this function again. */ queue_work(send_workqueue, &con->swork); @@ -1528,6 +1602,12 @@ static void stop_conn(struct connection *con) _stop_conn(con, true); } +static void shutdown_conn(struct connection *con) +{ + if (con->shutdown_action) + con->shutdown_action(con); +} + static void free_conn(struct connection *con) { close_connection(con, true, true, true); @@ -1579,6 +1659,7 @@ void dlm_lowcomms_stop(void) mutex_lock(&connections_lock); dlm_allow_conn = 0; mutex_unlock(&connections_lock); + foreach_conn(shutdown_conn); work_flush(); clean_writequeues(); foreach_conn(free_conn); diff --git a/fs/dlm/netlink.c b/fs/dlm/netlink.c index e7f550327d5d..e338c407cb75 100644 --- a/fs/dlm/netlink.c +++ b/fs/dlm/netlink.c @@ -113,7 +113,7 @@ static void fill_data(struct dlm_lock_data *data, struct dlm_lkb *lkb) void dlm_timeout_warn(struct dlm_lkb *lkb) { - struct sk_buff *uninitialized_var(send_skb); + struct sk_buff *send_skb; struct dlm_lock_data *data; size_t size; int rv; diff --git a/fs/ecryptfs/keystore.c b/fs/ecryptfs/keystore.c index af3eb02bbca1..f6a17d259db7 100644 --- a/fs/ecryptfs/keystore.c +++ b/fs/ecryptfs/keystore.c @@ -838,7 +838,7 @@ ecryptfs_write_tag_70_packet(char *dest, size_t *remaining_bytes, out_release_free_unlock: crypto_free_shash(s->hash_tfm); out_free_unlock: - kzfree(s->block_aligned_filename); + kfree_sensitive(s->block_aligned_filename); out_unlock: mutex_unlock(s->tfm_mutex); out: @@ -847,7 +847,7 @@ out: key_put(auth_tok_key); } skcipher_request_free(s->skcipher_req); - kzfree(s->hash_desc); + kfree_sensitive(s->hash_desc); kfree(s); return rc; } diff --git a/fs/ecryptfs/messaging.c b/fs/ecryptfs/messaging.c index 8646ba76def3..c0dfd9647627 100644 --- a/fs/ecryptfs/messaging.c +++ b/fs/ecryptfs/messaging.c @@ -175,7 +175,7 @@ int ecryptfs_exorcise_daemon(struct ecryptfs_daemon *daemon) } hlist_del(&daemon->euid_chain); mutex_unlock(&daemon->mux); - kzfree(daemon); + kfree_sensitive(daemon); out: return rc; } diff --git a/fs/efs/super.c b/fs/efs/super.c index 4a6ebff2af76..a4a945d0ac6a 100644 --- a/fs/efs/super.c +++ b/fs/efs/super.c @@ -13,6 +13,7 @@ #include <linux/slab.h> #include <linux/buffer_head.h> #include <linux/vfs.h> +#include <linux/blkdev.h> #include "efs.h" #include <linux/efs_vh.h> diff --git a/fs/erofs/compress.h b/fs/erofs/compress.h index 07d279fd5d67..3d452443c545 100644 --- a/fs/erofs/compress.h +++ b/fs/erofs/compress.h @@ -1,7 +1,7 @@ /* SPDX-License-Identifier: GPL-2.0-only */ /* * Copyright (C) 2019 HUAWEI, Inc. - * http://www.huawei.com/ + * https://www.huawei.com/ * Created by Gao Xiang <gaoxiang25@huawei.com> */ #ifndef __EROFS_FS_COMPRESS_H diff --git a/fs/erofs/data.c b/fs/erofs/data.c index 64b56c7df023..459ecb42cbd3 100644 --- a/fs/erofs/data.c +++ b/fs/erofs/data.c @@ -1,7 +1,7 @@ // SPDX-License-Identifier: GPL-2.0-only /* * Copyright (C) 2017-2018 HUAWEI, Inc. - * http://www.huawei.com/ + * https://www.huawei.com/ * Created by Gao Xiang <gaoxiang25@huawei.com> */ #include "internal.h" @@ -265,7 +265,7 @@ submit_bio_out: */ static int erofs_raw_access_readpage(struct file *file, struct page *page) { - erofs_off_t uninitialized_var(last_block); + erofs_off_t last_block; struct bio *bio; trace_erofs_readpage(page, true); @@ -282,7 +282,7 @@ static int erofs_raw_access_readpage(struct file *file, struct page *page) static void erofs_raw_access_readahead(struct readahead_control *rac) { - erofs_off_t uninitialized_var(last_block); + erofs_off_t last_block; struct bio *bio = NULL; struct page *page; diff --git a/fs/erofs/decompressor.c b/fs/erofs/decompressor.c index 7628816f2453..cbadbf55c6c2 100644 --- a/fs/erofs/decompressor.c +++ b/fs/erofs/decompressor.c @@ -1,7 +1,7 @@ // SPDX-License-Identifier: GPL-2.0-only /* * Copyright (C) 2019 HUAWEI, Inc. - * http://www.huawei.com/ + * https://www.huawei.com/ * Created by Gao Xiang <gaoxiang25@huawei.com> */ #include "compress.h" diff --git a/fs/erofs/dir.c b/fs/erofs/dir.c index d28c623dfef9..2776bb832127 100644 --- a/fs/erofs/dir.c +++ b/fs/erofs/dir.c @@ -1,7 +1,7 @@ // SPDX-License-Identifier: GPL-2.0-only /* * Copyright (C) 2017-2018 HUAWEI, Inc. - * http://www.huawei.com/ + * https://www.huawei.com/ * Created by Gao Xiang <gaoxiang25@huawei.com> */ #include "internal.h" diff --git a/fs/erofs/erofs_fs.h b/fs/erofs/erofs_fs.h index 385fa49c7749..9ad1615f4474 100644 --- a/fs/erofs/erofs_fs.h +++ b/fs/erofs/erofs_fs.h @@ -3,7 +3,7 @@ * EROFS (Enhanced ROM File System) on-disk format definition * * Copyright (C) 2017-2018 HUAWEI, Inc. - * http://www.huawei.com/ + * https://www.huawei.com/ * Created by Gao Xiang <gaoxiang25@huawei.com> */ #ifndef __EROFS_FS_H diff --git a/fs/erofs/inode.c b/fs/erofs/inode.c index 7dd4bbe9674f..139d0bed42f8 100644 --- a/fs/erofs/inode.c +++ b/fs/erofs/inode.c @@ -1,38 +1,87 @@ // SPDX-License-Identifier: GPL-2.0-only /* * Copyright (C) 2017-2018 HUAWEI, Inc. - * http://www.huawei.com/ + * https://www.huawei.com/ * Created by Gao Xiang <gaoxiang25@huawei.com> */ #include "xattr.h" #include <trace/events/erofs.h> -/* no locking */ -static int erofs_read_inode(struct inode *inode, void *data) +/* + * if inode is successfully read, return its inode page (or sometimes + * the inode payload page if it's an extended inode) in order to fill + * inline data if possible. + */ +static struct page *erofs_read_inode(struct inode *inode, + unsigned int *ofs) { + struct super_block *sb = inode->i_sb; + struct erofs_sb_info *sbi = EROFS_SB(sb); struct erofs_inode *vi = EROFS_I(inode); - struct erofs_inode_compact *dic = data; - struct erofs_inode_extended *die; + const erofs_off_t inode_loc = iloc(sbi, vi->nid); + + erofs_blk_t blkaddr, nblks = 0; + struct page *page; + struct erofs_inode_compact *dic; + struct erofs_inode_extended *die, *copied = NULL; + unsigned int ifmt; + int err; - const unsigned int ifmt = le16_to_cpu(dic->i_format); - struct erofs_sb_info *sbi = EROFS_SB(inode->i_sb); - erofs_blk_t nblks = 0; + blkaddr = erofs_blknr(inode_loc); + *ofs = erofs_blkoff(inode_loc); - vi->datalayout = erofs_inode_datalayout(ifmt); + erofs_dbg("%s, reading inode nid %llu at %u of blkaddr %u", + __func__, vi->nid, *ofs, blkaddr); + + page = erofs_get_meta_page(sb, blkaddr); + if (IS_ERR(page)) { + erofs_err(sb, "failed to get inode (nid: %llu) page, err %ld", + vi->nid, PTR_ERR(page)); + return page; + } + dic = page_address(page) + *ofs; + ifmt = le16_to_cpu(dic->i_format); + + vi->datalayout = erofs_inode_datalayout(ifmt); if (vi->datalayout >= EROFS_INODE_DATALAYOUT_MAX) { erofs_err(inode->i_sb, "unsupported datalayout %u of nid %llu", vi->datalayout, vi->nid); - DBG_BUGON(1); - return -EOPNOTSUPP; + err = -EOPNOTSUPP; + goto err_out; } switch (erofs_inode_version(ifmt)) { case EROFS_INODE_LAYOUT_EXTENDED: - die = data; - vi->inode_isize = sizeof(struct erofs_inode_extended); + /* check if the inode acrosses page boundary */ + if (*ofs + vi->inode_isize <= PAGE_SIZE) { + *ofs += vi->inode_isize; + die = (struct erofs_inode_extended *)dic; + } else { + const unsigned int gotten = PAGE_SIZE - *ofs; + + copied = kmalloc(vi->inode_isize, GFP_NOFS); + if (!copied) { + err = -ENOMEM; + goto err_out; + } + memcpy(copied, dic, gotten); + unlock_page(page); + put_page(page); + + page = erofs_get_meta_page(sb, blkaddr + 1); + if (IS_ERR(page)) { + erofs_err(sb, "failed to get inode payload page (nid: %llu), err %ld", + vi->nid, PTR_ERR(page)); + kfree(copied); + return page; + } + *ofs = vi->inode_isize - gotten; + memcpy((u8 *)copied + gotten, page_address(page), *ofs); + die = copied; + } vi->xattr_isize = erofs_xattr_ibody_size(die->i_xattr_icount); inode->i_mode = le16_to_cpu(die->i_mode); @@ -69,9 +118,12 @@ static int erofs_read_inode(struct inode *inode, void *data) /* total blocks for compressed files */ if (erofs_inode_is_data_compressed(vi->datalayout)) nblks = le32_to_cpu(die->i_u.compressed_blocks); + + kfree(copied); break; case EROFS_INODE_LAYOUT_COMPACT: vi->inode_isize = sizeof(struct erofs_inode_compact); + *ofs += vi->inode_isize; vi->xattr_isize = erofs_xattr_ibody_size(dic->i_xattr_icount); inode->i_mode = le16_to_cpu(dic->i_mode); @@ -111,8 +163,8 @@ static int erofs_read_inode(struct inode *inode, void *data) erofs_err(inode->i_sb, "unsupported on-disk inode version %u of nid %llu", erofs_inode_version(ifmt), vi->nid); - DBG_BUGON(1); - return -EOPNOTSUPP; + err = -EOPNOTSUPP; + goto err_out; } if (!nblks) @@ -120,13 +172,18 @@ static int erofs_read_inode(struct inode *inode, void *data) inode->i_blocks = roundup(inode->i_size, EROFS_BLKSIZ) >> 9; else inode->i_blocks = nblks << LOG_SECTORS_PER_BLOCK; - return 0; + return page; bogusimode: erofs_err(inode->i_sb, "bogus i_mode (%o) @ nid %llu", inode->i_mode, vi->nid); + err = -EFSCORRUPTED; +err_out: DBG_BUGON(1); - return -EFSCORRUPTED; + kfree(copied); + unlock_page(page); + put_page(page); + return ERR_PTR(err); } static int erofs_fill_symlink(struct inode *inode, void *data, @@ -146,7 +203,7 @@ static int erofs_fill_symlink(struct inode *inode, void *data, if (!lnk) return -ENOMEM; - m_pofs += vi->inode_isize + vi->xattr_isize; + m_pofs += vi->xattr_isize; /* inline symlink data shouldn't cross page boundary as well */ if (m_pofs + inode->i_size > PAGE_SIZE) { kfree(lnk); @@ -167,37 +224,17 @@ static int erofs_fill_symlink(struct inode *inode, void *data, static int erofs_fill_inode(struct inode *inode, int isdir) { - struct super_block *sb = inode->i_sb; struct erofs_inode *vi = EROFS_I(inode); struct page *page; - void *data; - int err; - erofs_blk_t blkaddr; unsigned int ofs; - erofs_off_t inode_loc; + int err = 0; trace_erofs_fill_inode(inode, isdir); - inode_loc = iloc(EROFS_SB(sb), vi->nid); - blkaddr = erofs_blknr(inode_loc); - ofs = erofs_blkoff(inode_loc); - - erofs_dbg("%s, reading inode nid %llu at %u of blkaddr %u", - __func__, vi->nid, ofs, blkaddr); - page = erofs_get_meta_page(sb, blkaddr); - - if (IS_ERR(page)) { - erofs_err(sb, "failed to get inode (nid: %llu) page, err %ld", - vi->nid, PTR_ERR(page)); + /* read inode base data from disk */ + page = erofs_read_inode(inode, &ofs); + if (IS_ERR(page)) return PTR_ERR(page); - } - - DBG_BUGON(!PageUptodate(page)); - data = page_address(page); - - err = erofs_read_inode(inode, data + ofs); - if (err) - goto out_unlock; /* setup the new inode */ switch (inode->i_mode & S_IFMT) { @@ -210,7 +247,7 @@ static int erofs_fill_inode(struct inode *inode, int isdir) inode->i_fop = &erofs_dir_fops; break; case S_IFLNK: - err = erofs_fill_symlink(inode, data, ofs); + err = erofs_fill_symlink(inode, page_address(page), ofs); if (err) goto out_unlock; inode_nohighmem(inode); diff --git a/fs/erofs/internal.h b/fs/erofs/internal.h index 1c077b7bb43d..67a7ec945686 100644 --- a/fs/erofs/internal.h +++ b/fs/erofs/internal.h @@ -1,7 +1,7 @@ /* SPDX-License-Identifier: GPL-2.0-only */ /* * Copyright (C) 2017-2018 HUAWEI, Inc. - * http://www.huawei.com/ + * https://www.huawei.com/ * Created by Gao Xiang <gaoxiang25@huawei.com> */ #ifndef __EROFS_INTERNAL_H diff --git a/fs/erofs/namei.c b/fs/erofs/namei.c index 52f201e03c62..5f8cc7346c69 100644 --- a/fs/erofs/namei.c +++ b/fs/erofs/namei.c @@ -1,7 +1,7 @@ // SPDX-License-Identifier: GPL-2.0-only /* * Copyright (C) 2017-2018 HUAWEI, Inc. - * http://www.huawei.com/ + * https://www.huawei.com/ * Created by Gao Xiang <gaoxiang25@huawei.com> */ #include "xattr.h" diff --git a/fs/erofs/super.c b/fs/erofs/super.c index 7a13ffb07c23..ddaa516c008a 100644 --- a/fs/erofs/super.c +++ b/fs/erofs/super.c @@ -1,7 +1,7 @@ // SPDX-License-Identifier: GPL-2.0-only /* * Copyright (C) 2017-2018 HUAWEI, Inc. - * http://www.huawei.com/ + * https://www.huawei.com/ * Created by Gao Xiang <gaoxiang25@huawei.com> */ #include <linux/module.h> diff --git a/fs/erofs/utils.c b/fs/erofs/utils.c index 52d0be10f1aa..de9986d2f82f 100644 --- a/fs/erofs/utils.c +++ b/fs/erofs/utils.c @@ -1,7 +1,7 @@ // SPDX-License-Identifier: GPL-2.0-only /* * Copyright (C) 2018 HUAWEI, Inc. - * http://www.huawei.com/ + * https://www.huawei.com/ * Created by Gao Xiang <gaoxiang25@huawei.com> */ #include "internal.h" @@ -127,12 +127,6 @@ int erofs_workgroup_put(struct erofs_workgroup *grp) return count; } -static void erofs_workgroup_unfreeze_final(struct erofs_workgroup *grp) -{ - erofs_workgroup_unfreeze(grp, 0); - __erofs_workgroup_free(grp); -} - static bool erofs_try_to_release_workgroup(struct erofs_sb_info *sbi, struct erofs_workgroup *grp) { @@ -162,11 +156,9 @@ static bool erofs_try_to_release_workgroup(struct erofs_sb_info *sbi, */ DBG_BUGON(xa_erase(&sbi->managed_pslots, grp->index) != grp); - /* - * If managed cache is on, last refcount should indicate - * the related workstation. - */ - erofs_workgroup_unfreeze_final(grp); + /* last refcount should be connected with its managed pslot. */ + erofs_workgroup_unfreeze(grp, 0); + __erofs_workgroup_free(grp); return true; } diff --git a/fs/erofs/xattr.c b/fs/erofs/xattr.c index 87e437e7b34f..c8c381eadcd6 100644 --- a/fs/erofs/xattr.c +++ b/fs/erofs/xattr.c @@ -1,7 +1,7 @@ // SPDX-License-Identifier: GPL-2.0-only /* * Copyright (C) 2017-2018 HUAWEI, Inc. - * http://www.huawei.com/ + * https://www.huawei.com/ * Created by Gao Xiang <gaoxiang25@huawei.com> */ #include <linux/security.h> diff --git a/fs/erofs/xattr.h b/fs/erofs/xattr.h index e4e5093f012c..815304bd335f 100644 --- a/fs/erofs/xattr.h +++ b/fs/erofs/xattr.h @@ -1,7 +1,7 @@ /* SPDX-License-Identifier: GPL-2.0-only */ /* * Copyright (C) 2017-2018 HUAWEI, Inc. - * http://www.huawei.com/ + * https://www.huawei.com/ * Created by Gao Xiang <gaoxiang25@huawei.com> */ #ifndef __EROFS_XATTR_H diff --git a/fs/erofs/zdata.c b/fs/erofs/zdata.c index be50a4d9d273..6c939def00f9 100644 --- a/fs/erofs/zdata.c +++ b/fs/erofs/zdata.c @@ -1,7 +1,7 @@ // SPDX-License-Identifier: GPL-2.0-only /* * Copyright (C) 2018 HUAWEI, Inc. - * http://www.huawei.com/ + * https://www.huawei.com/ * Created by Gao Xiang <gaoxiang25@huawei.com> */ #include "zdata.h" @@ -43,13 +43,13 @@ void z_erofs_exit_zip_subsystem(void) static inline int z_erofs_init_workqueue(void) { const unsigned int onlinecpus = num_possible_cpus(); - const unsigned int flags = WQ_UNBOUND | WQ_HIGHPRI | WQ_CPU_INTENSIVE; /* * no need to spawn too many threads, limiting threads could minimum * scheduling overhead, perhaps per-CPU threads should be better? */ - z_erofs_workqueue = alloc_workqueue("erofs_unzipd", flags, + z_erofs_workqueue = alloc_workqueue("erofs_unzipd", + WQ_UNBOUND | WQ_HIGHPRI, onlinecpus + onlinecpus / 4); return z_erofs_workqueue ? 0 : -ENOMEM; } @@ -1161,7 +1161,7 @@ static void z_erofs_submit_queue(struct super_block *sb, struct z_erofs_decompressqueue *q[NR_JOBQUEUES]; void *bi_private; /* since bio will be NULL, no need to initialize last_index */ - pgoff_t uninitialized_var(last_index); + pgoff_t last_index; unsigned int nr_bios = 0; struct bio *bio = NULL; diff --git a/fs/erofs/zdata.h b/fs/erofs/zdata.h index 9b66c28b3ae9..68c9b29fc0ca 100644 --- a/fs/erofs/zdata.h +++ b/fs/erofs/zdata.h @@ -1,7 +1,7 @@ /* SPDX-License-Identifier: GPL-2.0-only */ /* * Copyright (C) 2018 HUAWEI, Inc. - * http://www.huawei.com/ + * https://www.huawei.com/ * Created by Gao Xiang <gaoxiang25@huawei.com> */ #ifndef __EROFS_FS_ZDATA_H diff --git a/fs/erofs/zmap.c b/fs/erofs/zmap.c index 736db3a4cdef..ae325541884e 100644 --- a/fs/erofs/zmap.c +++ b/fs/erofs/zmap.c @@ -1,7 +1,7 @@ // SPDX-License-Identifier: GPL-2.0-only /* * Copyright (C) 2018-2019 HUAWEI, Inc. - * http://www.huawei.com/ + * https://www.huawei.com/ * Created by Gao Xiang <gaoxiang25@huawei.com> */ #include "internal.h" @@ -359,7 +359,7 @@ static int z_erofs_extent_lookback(struct z_erofs_maprecorder *m, return z_erofs_extent_lookback(m, m->delta[0]); case Z_EROFS_VLE_CLUSTER_TYPE_PLAIN: map->m_flags &= ~EROFS_MAP_ZIPPED; - /* fallthrough */ + fallthrough; case Z_EROFS_VLE_CLUSTER_TYPE_HEAD: map->m_la = (lcn << lclusterbits) | m->clusterofs; break; @@ -416,7 +416,7 @@ int z_erofs_map_blocks_iter(struct inode *inode, case Z_EROFS_VLE_CLUSTER_TYPE_PLAIN: if (endoff >= m.clusterofs) map->m_flags &= ~EROFS_MAP_ZIPPED; - /* fallthrough */ + fallthrough; case Z_EROFS_VLE_CLUSTER_TYPE_HEAD: if (endoff >= m.clusterofs) { map->m_la = (m.lcn << lclusterbits) | m.clusterofs; @@ -433,7 +433,7 @@ int z_erofs_map_blocks_iter(struct inode *inode, end = (m.lcn << lclusterbits) | m.clusterofs; map->m_flags |= EROFS_MAP_FULL_MAPPED; m.delta[0] = 1; - /* fallthrough */ + fallthrough; case Z_EROFS_VLE_CLUSTER_TYPE_NONHEAD: /* get the correspoinding first chunk */ err = z_erofs_extent_lookback(&m, m.delta[0]); diff --git a/fs/erofs/zpvec.h b/fs/erofs/zpvec.h index 58556903aa94..1d67cbd38704 100644 --- a/fs/erofs/zpvec.h +++ b/fs/erofs/zpvec.h @@ -1,7 +1,7 @@ /* SPDX-License-Identifier: GPL-2.0-only */ /* * Copyright (C) 2018 HUAWEI, Inc. - * http://www.huawei.com/ + * https://www.huawei.com/ * Created by Gao Xiang <gaoxiang25@huawei.com> */ #ifndef __EROFS_FS_ZPVEC_H diff --git a/fs/eventpoll.c b/fs/eventpoll.c index 12eebcdea9c8..8107e06d7f6f 100644 --- a/fs/eventpoll.c +++ b/fs/eventpoll.c @@ -1994,9 +1994,11 @@ static int ep_loop_check_proc(void *priv, void *cookie, int call_nests) * not already there, and calling reverse_path_check() * during ep_insert(). */ - if (list_empty(&epi->ffd.file->f_tfile_llink)) - list_add(&epi->ffd.file->f_tfile_llink, - &tfile_check_list); + if (list_empty(&epi->ffd.file->f_tfile_llink)) { + if (get_file_rcu(epi->ffd.file)) + list_add(&epi->ffd.file->f_tfile_llink, + &tfile_check_list); + } } } mutex_unlock(&ep->mtx); @@ -2040,6 +2042,7 @@ static void clear_tfile_check_list(void) file = list_first_entry(&tfile_check_list, struct file, f_tfile_llink); list_del_init(&file->f_tfile_llink); + fput(file); } INIT_LIST_HEAD(&tfile_check_list); } @@ -2200,25 +2203,22 @@ int do_epoll_ctl(int epfd, int op, int fd, struct epoll_event *epds, full_check = 1; if (is_file_epoll(tf.file)) { error = -ELOOP; - if (ep_loop_check(ep, tf.file) != 0) { - clear_tfile_check_list(); + if (ep_loop_check(ep, tf.file) != 0) goto error_tgt_fput; - } - } else + } else { + get_file(tf.file); list_add(&tf.file->f_tfile_llink, &tfile_check_list); + } error = epoll_mutex_lock(&ep->mtx, 0, nonblock); - if (error) { -out_del: - list_del(&tf.file->f_tfile_llink); + if (error) goto error_tgt_fput; - } if (is_file_epoll(tf.file)) { tep = tf.file->private_data; error = epoll_mutex_lock(&tep->mtx, 1, nonblock); if (error) { mutex_unlock(&ep->mtx); - goto out_del; + goto error_tgt_fput; } } } @@ -2239,8 +2239,6 @@ out_del: error = ep_insert(ep, epds, tf.file, fd, full_check); } else error = -EEXIST; - if (full_check) - clear_tfile_check_list(); break; case EPOLL_CTL_DEL: if (epi) @@ -2263,8 +2261,10 @@ out_del: mutex_unlock(&ep->mtx); error_tgt_fput: - if (full_check) + if (full_check) { + clear_tfile_check_list(); mutex_unlock(&epmutex); + } fdput(tf); error_fput: diff --git a/fs/exec.c b/fs/exec.c index e6e8a9a70327..a91003e28eaa 100644 --- a/fs/exec.c +++ b/fs/exec.c @@ -141,12 +141,14 @@ SYSCALL_DEFINE1(uselib, const char __user *, library) if (IS_ERR(file)) goto out; - error = -EINVAL; - if (!S_ISREG(file_inode(file)->i_mode)) - goto exit; - + /* + * may_open() has already checked for this, so it should be + * impossible to trip now. But we need to be extra cautious + * and check again at the very end too. + */ error = -EACCES; - if (path_noexec(&file->f_path)) + if (WARN_ON_ONCE(!S_ISREG(file_inode(file)->i_mode) || + path_noexec(&file->f_path))) goto exit; fsnotify_open(file); @@ -215,7 +217,7 @@ static struct page *get_arg_page(struct linux_binprm *bprm, unsigned long pos, * We are doing an exec(). 'current' is the process * doing the exec and bprm->mm is the new process's mm. */ - ret = get_user_pages_remote(current, bprm->mm, pos, 1, gup_flags, + ret = get_user_pages_remote(bprm->mm, pos, 1, gup_flags, &page, NULL, NULL); if (ret <= 0) return NULL; @@ -448,18 +450,26 @@ static int count(struct user_arg_ptr argv, int max) return i; } -static int prepare_arg_pages(struct linux_binprm *bprm, - struct user_arg_ptr argv, struct user_arg_ptr envp) +static int count_strings_kernel(const char *const *argv) { - unsigned long limit, ptr_size; + int i; + + if (!argv) + return 0; - bprm->argc = count(argv, MAX_ARG_STRINGS); - if (bprm->argc < 0) - return bprm->argc; + for (i = 0; argv[i]; ++i) { + if (i >= MAX_ARG_STRINGS) + return -E2BIG; + if (fatal_signal_pending(current)) + return -ERESTARTNOHAND; + cond_resched(); + } + return i; +} - bprm->envc = count(envp, MAX_ARG_STRINGS); - if (bprm->envc < 0) - return bprm->envc; +static int bprm_stack_limits(struct linux_binprm *bprm) +{ + unsigned long limit, ptr_size; /* * Limit to 1/4 of the max stack size or 3/4 of _STK_LIM @@ -633,6 +643,20 @@ int copy_string_kernel(const char *arg, struct linux_binprm *bprm) } EXPORT_SYMBOL(copy_string_kernel); +static int copy_strings_kernel(int argc, const char *const *argv, + struct linux_binprm *bprm) +{ + while (argc-- > 0) { + int ret = copy_string_kernel(argv[argc], bprm); + if (ret < 0) + return ret; + if (fatal_signal_pending(current)) + return -ERESTARTNOHAND; + cond_resched(); + } + return 0; +} + #ifdef CONFIG_MMU /* @@ -887,11 +911,14 @@ static struct file *do_open_execat(int fd, struct filename *name, int flags) if (IS_ERR(file)) goto out; + /* + * may_open() has already checked for this, so it should be + * impossible to trip now. But we need to be extra cautious + * and check again at the very end too. + */ err = -EACCES; - if (!S_ISREG(file_inode(file)->i_mode)) - goto exit; - - if (path_noexec(&file->f_path)) + if (WARN_ON_ONCE(!S_ISREG(file_inode(file)->i_mode) || + path_noexec(&file->f_path))) goto exit; err = deny_write_access(file); @@ -1380,7 +1407,12 @@ int begin_new_exec(struct linux_binprm * bprm) if (retval) goto out_unlock; - set_fs(USER_DS); + /* + * Ensure that the uaccess routines can actually operate on userspace + * pointers: + */ + force_uaccess_begin(); + me->flags &= ~(PF_RANDOMIZE | PF_FORKNOEXEC | PF_KTHREAD | PF_NOFREEZE | PF_NO_SETAFFINITY); flush_thread(); @@ -1543,6 +1575,10 @@ static int prepare_bprm_creds(struct linux_binprm *bprm) static void free_bprm(struct linux_binprm *bprm) { + if (bprm->mm) { + acct_arg_size(bprm, 0); + mmput(bprm->mm); + } free_arg_pages(bprm); if (bprm->cred) { mutex_unlock(¤t->signal->cred_guard_mutex); @@ -1557,9 +1593,43 @@ static void free_bprm(struct linux_binprm *bprm) /* If a binfmt changed the interp, free it. */ if (bprm->interp != bprm->filename) kfree(bprm->interp); + kfree(bprm->fdpath); kfree(bprm); } +static struct linux_binprm *alloc_bprm(int fd, struct filename *filename) +{ + struct linux_binprm *bprm = kzalloc(sizeof(*bprm), GFP_KERNEL); + int retval = -ENOMEM; + if (!bprm) + goto out; + + if (fd == AT_FDCWD || filename->name[0] == '/') { + bprm->filename = filename->name; + } else { + if (filename->name[0] == '\0') + bprm->fdpath = kasprintf(GFP_KERNEL, "/dev/fd/%d", fd); + else + bprm->fdpath = kasprintf(GFP_KERNEL, "/dev/fd/%d/%s", + fd, filename->name); + if (!bprm->fdpath) + goto out_free; + + bprm->filename = bprm->fdpath; + } + bprm->interp = bprm->filename; + + retval = bprm_mm_init(bprm); + if (retval) + goto out_free; + return bprm; + +out_free: + free_bprm(bprm); +out: + return ERR_PTR(retval); +} + int bprm_change_interp(const char *interp, struct linux_binprm *bprm) { /* If a binfmt changed the interp, free it first. */ @@ -1818,53 +1888,25 @@ static int exec_binprm(struct linux_binprm *bprm) /* * sys_execve() executes a new program. */ -static int __do_execve_file(int fd, struct filename *filename, - struct user_arg_ptr argv, - struct user_arg_ptr envp, - int flags, struct file *file) +static int bprm_execve(struct linux_binprm *bprm, + int fd, struct filename *filename, int flags) { - char *pathbuf = NULL; - struct linux_binprm *bprm; + struct file *file; struct files_struct *displaced; int retval; - if (IS_ERR(filename)) - return PTR_ERR(filename); - - /* - * We move the actual failure in case of RLIMIT_NPROC excess from - * set*uid() to execve() because too many poorly written programs - * don't check setuid() return code. Here we additionally recheck - * whether NPROC limit is still exceeded. - */ - if ((current->flags & PF_NPROC_EXCEEDED) && - atomic_read(¤t_user()->processes) > rlimit(RLIMIT_NPROC)) { - retval = -EAGAIN; - goto out_ret; - } - - /* We're below the limit (still or again), so we don't want to make - * further execve() calls fail. */ - current->flags &= ~PF_NPROC_EXCEEDED; - retval = unshare_files(&displaced); if (retval) - goto out_ret; - - retval = -ENOMEM; - bprm = kzalloc(sizeof(*bprm), GFP_KERNEL); - if (!bprm) - goto out_files; + return retval; retval = prepare_bprm_creds(bprm); if (retval) - goto out_free; + goto out_files; check_unsafe_exec(bprm); current->in_execve = 1; - if (!file) - file = do_open_execat(fd, filename, flags); + file = do_open_execat(fd, filename, flags); retval = PTR_ERR(file); if (IS_ERR(file)) goto out_unmark; @@ -1872,57 +1914,20 @@ static int __do_execve_file(int fd, struct filename *filename, sched_exec(); bprm->file = file; - if (!filename) { - bprm->filename = "none"; - } else if (fd == AT_FDCWD || filename->name[0] == '/') { - bprm->filename = filename->name; - } else { - if (filename->name[0] == '\0') - pathbuf = kasprintf(GFP_KERNEL, "/dev/fd/%d", fd); - else - pathbuf = kasprintf(GFP_KERNEL, "/dev/fd/%d/%s", - fd, filename->name); - if (!pathbuf) { - retval = -ENOMEM; - goto out_unmark; - } - /* - * Record that a name derived from an O_CLOEXEC fd will be - * inaccessible after exec. Relies on having exclusive access to - * current->files (due to unshare_files above). - */ - if (close_on_exec(fd, rcu_dereference_raw(current->files->fdt))) - bprm->interp_flags |= BINPRM_FLAGS_PATH_INACCESSIBLE; - bprm->filename = pathbuf; - } - bprm->interp = bprm->filename; - - retval = bprm_mm_init(bprm); - if (retval) - goto out_unmark; - - retval = prepare_arg_pages(bprm, argv, envp); - if (retval < 0) - goto out; + /* + * Record that a name derived from an O_CLOEXEC fd will be + * inaccessible after exec. Relies on having exclusive access to + * current->files (due to unshare_files above). + */ + if (bprm->fdpath && + close_on_exec(fd, rcu_dereference_raw(current->files->fdt))) + bprm->interp_flags |= BINPRM_FLAGS_PATH_INACCESSIBLE; /* Set the unchanging part of bprm->cred */ retval = security_bprm_creds_for_exec(bprm); if (retval) goto out; - retval = copy_string_kernel(bprm->filename, bprm); - if (retval < 0) - goto out; - - bprm->exec = bprm->p; - retval = copy_strings(bprm->envc, envp, bprm); - if (retval < 0) - goto out; - - retval = copy_strings(bprm->argc, argv, bprm); - if (retval < 0) - goto out; - retval = exec_binprm(bprm); if (retval < 0) goto out; @@ -1933,10 +1938,6 @@ static int __do_execve_file(int fd, struct filename *filename, rseq_execve(current); acct_update_integrals(current); task_numa_free(current, false); - free_bprm(bprm); - kfree(pathbuf); - if (filename) - putname(filename); if (displaced) put_files_struct(displaced); return retval; @@ -1950,25 +1951,15 @@ out: */ if (bprm->point_of_no_return && !fatal_signal_pending(current)) force_sigsegv(SIGSEGV); - if (bprm->mm) { - acct_arg_size(bprm, 0); - mmput(bprm->mm); - } out_unmark: current->fs->in_exec = 0; current->in_execve = 0; -out_free: - free_bprm(bprm); - kfree(pathbuf); - out_files: if (displaced) reset_files_struct(displaced); -out_ret: - if (filename) - putname(filename); + return retval; } @@ -1977,18 +1968,124 @@ static int do_execveat_common(int fd, struct filename *filename, struct user_arg_ptr envp, int flags) { - return __do_execve_file(fd, filename, argv, envp, flags, NULL); + struct linux_binprm *bprm; + int retval; + + if (IS_ERR(filename)) + return PTR_ERR(filename); + + /* + * We move the actual failure in case of RLIMIT_NPROC excess from + * set*uid() to execve() because too many poorly written programs + * don't check setuid() return code. Here we additionally recheck + * whether NPROC limit is still exceeded. + */ + if ((current->flags & PF_NPROC_EXCEEDED) && + atomic_read(¤t_user()->processes) > rlimit(RLIMIT_NPROC)) { + retval = -EAGAIN; + goto out_ret; + } + + /* We're below the limit (still or again), so we don't want to make + * further execve() calls fail. */ + current->flags &= ~PF_NPROC_EXCEEDED; + + bprm = alloc_bprm(fd, filename); + if (IS_ERR(bprm)) { + retval = PTR_ERR(bprm); + goto out_ret; + } + + retval = count(argv, MAX_ARG_STRINGS); + if (retval < 0) + goto out_free; + bprm->argc = retval; + + retval = count(envp, MAX_ARG_STRINGS); + if (retval < 0) + goto out_free; + bprm->envc = retval; + + retval = bprm_stack_limits(bprm); + if (retval < 0) + goto out_free; + + retval = copy_string_kernel(bprm->filename, bprm); + if (retval < 0) + goto out_free; + bprm->exec = bprm->p; + + retval = copy_strings(bprm->envc, envp, bprm); + if (retval < 0) + goto out_free; + + retval = copy_strings(bprm->argc, argv, bprm); + if (retval < 0) + goto out_free; + + retval = bprm_execve(bprm, fd, filename, flags); +out_free: + free_bprm(bprm); + +out_ret: + putname(filename); + return retval; } -int do_execve_file(struct file *file, void *__argv, void *__envp) +int kernel_execve(const char *kernel_filename, + const char *const *argv, const char *const *envp) { - struct user_arg_ptr argv = { .ptr.native = __argv }; - struct user_arg_ptr envp = { .ptr.native = __envp }; + struct filename *filename; + struct linux_binprm *bprm; + int fd = AT_FDCWD; + int retval; + + filename = getname_kernel(kernel_filename); + if (IS_ERR(filename)) + return PTR_ERR(filename); - return __do_execve_file(AT_FDCWD, NULL, argv, envp, 0, file); + bprm = alloc_bprm(fd, filename); + if (IS_ERR(bprm)) { + retval = PTR_ERR(bprm); + goto out_ret; + } + + retval = count_strings_kernel(argv); + if (retval < 0) + goto out_free; + bprm->argc = retval; + + retval = count_strings_kernel(envp); + if (retval < 0) + goto out_free; + bprm->envc = retval; + + retval = bprm_stack_limits(bprm); + if (retval < 0) + goto out_free; + + retval = copy_string_kernel(bprm->filename, bprm); + if (retval < 0) + goto out_free; + bprm->exec = bprm->p; + + retval = copy_strings_kernel(bprm->envc, envp, bprm); + if (retval < 0) + goto out_free; + + retval = copy_strings_kernel(bprm->argc, argv, bprm); + if (retval < 0) + goto out_free; + + retval = bprm_execve(bprm, fd, filename, 0); +out_free: + free_bprm(bprm); +out_ret: + putname(filename); + return retval; } -int do_execve(struct filename *filename, +static int do_execve(struct filename *filename, const char __user *const __user *__argv, const char __user *const __user *__envp) { @@ -1997,7 +2094,7 @@ int do_execve(struct filename *filename, return do_execveat_common(AT_FDCWD, filename, argv, envp, 0); } -int do_execveat(int fd, struct filename *filename, +static int do_execveat(int fd, struct filename *filename, const char __user *const __user *__argv, const char __user *const __user *__envp, int flags) diff --git a/fs/exfat/balloc.c b/fs/exfat/balloc.c index 4055eb00ea9b..a987919686c0 100644 --- a/fs/exfat/balloc.c +++ b/fs/exfat/balloc.c @@ -158,7 +158,7 @@ int exfat_set_bitmap(struct inode *inode, unsigned int clu) b = BITMAP_OFFSET_BIT_IN_SECTOR(sb, ent_idx); set_bit_le(b, sbi->vol_amap[i]->b_data); - exfat_update_bh(sb, sbi->vol_amap[i], IS_DIRSYNC(inode)); + exfat_update_bh(sbi->vol_amap[i], IS_DIRSYNC(inode)); return 0; } @@ -180,7 +180,7 @@ void exfat_clear_bitmap(struct inode *inode, unsigned int clu) b = BITMAP_OFFSET_BIT_IN_SECTOR(sb, ent_idx); clear_bit_le(b, sbi->vol_amap[i]->b_data); - exfat_update_bh(sb, sbi->vol_amap[i], IS_DIRSYNC(inode)); + exfat_update_bh(sbi->vol_amap[i], IS_DIRSYNC(inode)); if (opts->discard) { int ret_discard; diff --git a/fs/exfat/dir.c b/fs/exfat/dir.c index 119abf0d8dd6..573659bfbc55 100644 --- a/fs/exfat/dir.c +++ b/fs/exfat/dir.c @@ -470,7 +470,7 @@ int exfat_init_dir_entry(struct inode *inode, struct exfat_chain *p_dir, &ep->dentry.file.access_date, NULL); - exfat_update_bh(sb, bh, IS_DIRSYNC(inode)); + exfat_update_bh(bh, IS_DIRSYNC(inode)); brelse(bh); ep = exfat_get_dentry(sb, p_dir, entry + 1, &bh, §or); @@ -480,7 +480,7 @@ int exfat_init_dir_entry(struct inode *inode, struct exfat_chain *p_dir, exfat_init_stream_entry(ep, (type == TYPE_FILE) ? ALLOC_FAT_CHAIN : ALLOC_NO_FAT_CHAIN, start_clu, size); - exfat_update_bh(sb, bh, IS_DIRSYNC(inode)); + exfat_update_bh(bh, IS_DIRSYNC(inode)); brelse(bh); return 0; @@ -516,7 +516,7 @@ int exfat_update_dir_chksum(struct inode *inode, struct exfat_chain *p_dir, } fep->dentry.file.checksum = cpu_to_le16(chksum); - exfat_update_bh(sb, fbh, IS_DIRSYNC(inode)); + exfat_update_bh(fbh, IS_DIRSYNC(inode)); release_fbh: brelse(fbh); return ret; @@ -538,7 +538,7 @@ int exfat_init_ext_entry(struct inode *inode, struct exfat_chain *p_dir, return -EIO; ep->dentry.file.num_ext = (unsigned char)(num_entries - 1); - exfat_update_bh(sb, bh, sync); + exfat_update_bh(bh, sync); brelse(bh); ep = exfat_get_dentry(sb, p_dir, entry + 1, &bh, §or); @@ -547,7 +547,7 @@ int exfat_init_ext_entry(struct inode *inode, struct exfat_chain *p_dir, ep->dentry.stream.name_len = p_uniname->name_len; ep->dentry.stream.name_hash = cpu_to_le16(p_uniname->name_hash); - exfat_update_bh(sb, bh, sync); + exfat_update_bh(bh, sync); brelse(bh); for (i = EXFAT_FIRST_CLUSTER; i < num_entries; i++) { @@ -556,7 +556,7 @@ int exfat_init_ext_entry(struct inode *inode, struct exfat_chain *p_dir, return -EIO; exfat_init_name_entry(ep, uniname); - exfat_update_bh(sb, bh, sync); + exfat_update_bh(bh, sync); brelse(bh); uniname += EXFAT_FILE_NAME_LEN; } @@ -580,7 +580,7 @@ int exfat_remove_entries(struct inode *inode, struct exfat_chain *p_dir, return -EIO; exfat_set_entry_type(ep, TYPE_DELETED); - exfat_update_bh(sb, bh, IS_DIRSYNC(inode)); + exfat_update_bh(bh, IS_DIRSYNC(inode)); brelse(bh); } @@ -604,16 +604,20 @@ void exfat_update_dir_chksum_with_entry_set(struct exfat_entry_set_cache *es) es->modified = true; } -void exfat_free_dentry_set(struct exfat_entry_set_cache *es, int sync) +int exfat_free_dentry_set(struct exfat_entry_set_cache *es, int sync) { - int i; + int i, err = 0; - for (i = 0; i < es->num_bh; i++) { - if (es->modified) - exfat_update_bh(es->sb, es->bh[i], sync); - brelse(es->bh[i]); - } + if (es->modified) + err = exfat_update_bhs(es->bh, es->num_bh, sync); + + for (i = 0; i < es->num_bh; i++) + if (err) + bforget(es->bh[i]); + else + brelse(es->bh[i]); kfree(es); + return err; } static int exfat_walk_fat_chain(struct super_block *sb, diff --git a/fs/exfat/exfat_fs.h b/fs/exfat/exfat_fs.h index 75c7bdbeba6d..95d717f8620c 100644 --- a/fs/exfat/exfat_fs.h +++ b/fs/exfat/exfat_fs.h @@ -13,8 +13,6 @@ #define EXFAT_SUPER_MAGIC 0x2011BAB0UL #define EXFAT_ROOT_INO 1 -#define EXFAT_SB_DIRTY 0 - #define EXFAT_CLUSTERS_UNTRACKED (~0u) /* @@ -226,7 +224,8 @@ struct exfat_sb_info { unsigned int num_FAT_sectors; /* num of FAT sectors */ unsigned int root_dir; /* root dir cluster */ unsigned int dentries_per_clu; /* num of dentries per cluster */ - unsigned int vol_flag; /* volume dirty flag */ + unsigned int vol_flags; /* volume flags */ + unsigned int vol_flags_persistent; /* volume flags to retain */ struct buffer_head *boot_bh; /* buffer_head of BOOT sector */ unsigned int map_clu; /* allocation bitmap start cluster */ @@ -238,7 +237,6 @@ struct exfat_sb_info { unsigned int clu_srch_ptr; /* cluster search pointer */ unsigned int used_clusters; /* number of used clusters */ - unsigned long s_state; struct mutex s_lock; /* superblock lock */ struct exfat_mount_options options; struct nls_table *nls_io; /* Charset used for input and display */ @@ -383,7 +381,8 @@ static inline int exfat_sector_to_cluster(struct exfat_sb_info *sbi, } /* super.c */ -int exfat_set_vol_flags(struct super_block *sb, unsigned short new_flag); +int exfat_set_volume_dirty(struct super_block *sb); +int exfat_clear_volume_dirty(struct super_block *sb); /* fatent.c */ #define exfat_get_next_cluster(sb, pclu) exfat_ent_get(sb, *(pclu), pclu) @@ -463,7 +462,7 @@ struct exfat_dentry *exfat_get_dentry_cached(struct exfat_entry_set_cache *es, int num); struct exfat_entry_set_cache *exfat_get_dentry_set(struct super_block *sb, struct exfat_chain *p_dir, int entry, unsigned int type); -void exfat_free_dentry_set(struct exfat_entry_set_cache *es, int sync); +int exfat_free_dentry_set(struct exfat_entry_set_cache *es, int sync); int exfat_count_dir_entries(struct super_block *sb, struct exfat_chain *p_dir); /* inode.c */ @@ -515,7 +514,8 @@ void exfat_set_entry_time(struct exfat_sb_info *sbi, struct timespec64 *ts, u8 *tz, __le16 *time, __le16 *date, u8 *time_cs); u16 exfat_calc_chksum16(void *data, int len, u16 chksum, int type); u32 exfat_calc_chksum32(void *data, int len, u32 chksum, int type); -void exfat_update_bh(struct super_block *sb, struct buffer_head *bh, int sync); +void exfat_update_bh(struct buffer_head *bh, int sync); +int exfat_update_bhs(struct buffer_head **bhs, int nr_bhs, int sync); void exfat_chain_set(struct exfat_chain *ec, unsigned int dir, unsigned int size, unsigned char flags); void exfat_chain_dup(struct exfat_chain *dup, struct exfat_chain *ec); diff --git a/fs/exfat/exfat_raw.h b/fs/exfat/exfat_raw.h index 350ce59cc324..6aec6288e1f2 100644 --- a/fs/exfat/exfat_raw.h +++ b/fs/exfat/exfat_raw.h @@ -14,9 +14,8 @@ #define EXFAT_MAX_FILE_LEN 255 -#define VOL_CLEAN 0x0000 -#define VOL_DIRTY 0x0002 -#define ERR_MEDIUM 0x0004 +#define VOLUME_DIRTY 0x0002 +#define MEDIA_FAILURE 0x0004 #define EXFAT_EOF_CLUSTER 0xFFFFFFFFu #define EXFAT_BAD_CLUSTER 0xFFFFFFF7u diff --git a/fs/exfat/fatent.c b/fs/exfat/fatent.c index 4e5c5c9c0f2d..c3c9afee7418 100644 --- a/fs/exfat/fatent.c +++ b/fs/exfat/fatent.c @@ -75,7 +75,7 @@ int exfat_ent_set(struct super_block *sb, unsigned int loc, fat_entry = (__le32 *)&(bh->b_data[off]); *fat_entry = cpu_to_le32(content); - exfat_update_bh(sb, bh, sb->s_flags & SB_SYNCHRONOUS); + exfat_update_bh(bh, sb->s_flags & SB_SYNCHRONOUS); exfat_mirror_bh(sb, sec, bh); brelse(bh); return 0; @@ -174,7 +174,6 @@ int exfat_free_cluster(struct inode *inode, struct exfat_chain *p_chain) return -EIO; } - set_bit(EXFAT_SB_DIRTY, &sbi->s_state); clu = p_chain->dir; if (p_chain->flags == ALLOC_NO_FAT_CHAIN) { @@ -230,21 +229,6 @@ int exfat_find_last_cluster(struct super_block *sb, struct exfat_chain *p_chain, return 0; } -static inline int exfat_sync_bhs(struct buffer_head **bhs, int nr_bhs) -{ - int i, err = 0; - - for (i = 0; i < nr_bhs; i++) - write_dirty_buffer(bhs[i], 0); - - for (i = 0; i < nr_bhs; i++) { - wait_on_buffer(bhs[i]); - if (!err && !buffer_uptodate(bhs[i])) - err = -EIO; - } - return err; -} - int exfat_zeroed_cluster(struct inode *dir, unsigned int clu) { struct super_block *sb = dir->i_sb; @@ -266,41 +250,23 @@ int exfat_zeroed_cluster(struct inode *dir, unsigned int clu) } /* Zeroing the unused blocks on this cluster */ - n = 0; while (blknr < last_blknr) { - bhs[n] = sb_getblk(sb, blknr); - if (!bhs[n]) { - err = -ENOMEM; - goto release_bhs; - } - memset(bhs[n]->b_data, 0, sb->s_blocksize); - exfat_update_bh(sb, bhs[n], 0); - - n++; - blknr++; - - if (n == nr_bhs) { - if (IS_DIRSYNC(dir)) { - err = exfat_sync_bhs(bhs, n); - if (err) - goto release_bhs; + for (n = 0; n < nr_bhs && blknr < last_blknr; n++, blknr++) { + bhs[n] = sb_getblk(sb, blknr); + if (!bhs[n]) { + err = -ENOMEM; + goto release_bhs; } - - for (i = 0; i < n; i++) - brelse(bhs[i]); - n = 0; + memset(bhs[n]->b_data, 0, sb->s_blocksize); } - } - if (IS_DIRSYNC(dir)) { - err = exfat_sync_bhs(bhs, n); + err = exfat_update_bhs(bhs, n, IS_DIRSYNC(dir)); if (err) goto release_bhs; - } - - for (i = 0; i < n; i++) - brelse(bhs[i]); + for (i = 0; i < n; i++) + brelse(bhs[i]); + } return 0; release_bhs: @@ -358,8 +324,6 @@ int exfat_alloc_cluster(struct inode *inode, unsigned int num_alloc, } } - set_bit(EXFAT_SB_DIRTY, &sbi->s_state); - p_chain->dir = EXFAT_EOF_CLUSTER; while ((new_clu = exfat_find_free_bitmap(sb, hint_clu)) != diff --git a/fs/exfat/file.c b/fs/exfat/file.c index a6a063830edc..f41f523a58ad 100644 --- a/fs/exfat/file.c +++ b/fs/exfat/file.c @@ -106,7 +106,7 @@ int __exfat_truncate(struct inode *inode, loff_t new_size) if (ei->type != TYPE_FILE && ei->type != TYPE_DIR) return -EPERM; - exfat_set_vol_flags(sb, VOL_DIRTY); + exfat_set_volume_dirty(sb); num_clusters_new = EXFAT_B_TO_CLU_ROUND_UP(i_size_read(inode), sbi); num_clusters_phys = @@ -154,6 +154,7 @@ int __exfat_truncate(struct inode *inode, loff_t new_size) struct timespec64 ts; struct exfat_dentry *ep, *ep2; struct exfat_entry_set_cache *es; + int err; es = exfat_get_dentry_set(sb, &(ei->dir), ei->entry, ES_ALL_ENTRIES); @@ -188,7 +189,9 @@ int __exfat_truncate(struct inode *inode, loff_t new_size) } exfat_update_dir_chksum_with_entry_set(es); - exfat_free_dentry_set(es, inode_needs_sync(inode)); + err = exfat_free_dentry_set(es, inode_needs_sync(inode)); + if (err) + return err; } /* cut off from the FAT chain */ @@ -217,7 +220,7 @@ int __exfat_truncate(struct inode *inode, loff_t new_size) if (exfat_free_cluster(inode, &clu)) return -EIO; - exfat_set_vol_flags(sb, VOL_CLEAN); + exfat_clear_volume_dirty(sb); return 0; } diff --git a/fs/exfat/inode.c b/fs/exfat/inode.c index cf9ca6c4d046..7f90204adef5 100644 --- a/fs/exfat/inode.c +++ b/fs/exfat/inode.c @@ -39,7 +39,7 @@ static int __exfat_write_inode(struct inode *inode, int sync) if (is_dir && ei->dir.dir == sbi->root_dir && ei->entry == -1) return 0; - exfat_set_vol_flags(sb, VOL_DIRTY); + exfat_set_volume_dirty(sb); /* get the directory entry of given file or directory */ es = exfat_get_dentry_set(sb, &(ei->dir), ei->entry, ES_ALL_ENTRIES); @@ -77,8 +77,7 @@ static int __exfat_write_inode(struct inode *inode, int sync) ep2->dentry.stream.size = ep2->dentry.stream.valid_size; exfat_update_dir_chksum_with_entry_set(es); - exfat_free_dentry_set(es, sync); - return 0; + return exfat_free_dentry_set(es, sync); } int exfat_write_inode(struct inode *inode, struct writeback_control *wbc) @@ -168,7 +167,7 @@ static int exfat_map_cluster(struct inode *inode, unsigned int clu_offset, } if (*clu == EXFAT_EOF_CLUSTER) { - exfat_set_vol_flags(sb, VOL_DIRTY); + exfat_set_volume_dirty(sb); new_clu.dir = (last_clu == EXFAT_EOF_CLUSTER) ? EXFAT_EOF_CLUSTER : last_clu + 1; @@ -222,6 +221,7 @@ static int exfat_map_cluster(struct inode *inode, unsigned int clu_offset, if (ei->dir.dir != DIR_DELETED && modified) { struct exfat_dentry *ep; struct exfat_entry_set_cache *es; + int err; es = exfat_get_dentry_set(sb, &(ei->dir), ei->entry, ES_ALL_ENTRIES); @@ -240,8 +240,9 @@ static int exfat_map_cluster(struct inode *inode, unsigned int clu_offset, ep->dentry.stream.valid_size; exfat_update_dir_chksum_with_entry_set(es); - exfat_free_dentry_set(es, inode_needs_sync(inode)); - + err = exfat_free_dentry_set(es, inode_needs_sync(inode)); + if (err) + return err; } /* end of if != DIR_DELETED */ inode->i_blocks += diff --git a/fs/exfat/misc.c b/fs/exfat/misc.c index 17d41f3d3709..d34e6193258d 100644 --- a/fs/exfat/misc.c +++ b/fs/exfat/misc.c @@ -163,9 +163,8 @@ u32 exfat_calc_chksum32(void *data, int len, u32 chksum, int type) return chksum; } -void exfat_update_bh(struct super_block *sb, struct buffer_head *bh, int sync) +void exfat_update_bh(struct buffer_head *bh, int sync) { - set_bit(EXFAT_SB_DIRTY, &EXFAT_SB(sb)->s_state); set_buffer_uptodate(bh); mark_buffer_dirty(bh); @@ -173,6 +172,25 @@ void exfat_update_bh(struct super_block *sb, struct buffer_head *bh, int sync) sync_dirty_buffer(bh); } +int exfat_update_bhs(struct buffer_head **bhs, int nr_bhs, int sync) +{ + int i, err = 0; + + for (i = 0; i < nr_bhs; i++) { + set_buffer_uptodate(bhs[i]); + mark_buffer_dirty(bhs[i]); + if (sync) + write_dirty_buffer(bhs[i], 0); + } + + for (i = 0; i < nr_bhs && sync; i++) { + wait_on_buffer(bhs[i]); + if (!err && !buffer_uptodate(bhs[i])) + err = -EIO; + } + return err; +} + void exfat_chain_set(struct exfat_chain *ec, unsigned int dir, unsigned int size, unsigned char flags) { diff --git a/fs/exfat/namei.c b/fs/exfat/namei.c index 2b9e21094a96..e73f20f66cb2 100644 --- a/fs/exfat/namei.c +++ b/fs/exfat/namei.c @@ -387,7 +387,7 @@ static int exfat_find_empty_entry(struct inode *inode, ep->dentry.stream.valid_size = cpu_to_le64(size); ep->dentry.stream.size = ep->dentry.stream.valid_size; ep->dentry.stream.flags = p_dir->flags; - exfat_update_bh(sb, bh, IS_DIRSYNC(inode)); + exfat_update_bh(bh, IS_DIRSYNC(inode)); brelse(bh); if (exfat_update_dir_chksum(inode, &(ei->dir), ei->entry)) @@ -562,10 +562,10 @@ static int exfat_create(struct inode *dir, struct dentry *dentry, umode_t mode, int err; mutex_lock(&EXFAT_SB(sb)->s_lock); - exfat_set_vol_flags(sb, VOL_DIRTY); + exfat_set_volume_dirty(sb); err = exfat_add_entry(dir, dentry->d_name.name, &cdir, TYPE_FILE, &info); - exfat_set_vol_flags(sb, VOL_CLEAN); + exfat_clear_volume_dirty(sb); if (err) goto unlock; @@ -834,7 +834,7 @@ static int exfat_unlink(struct inode *dir, struct dentry *dentry) num_entries++; brelse(bh); - exfat_set_vol_flags(sb, VOL_DIRTY); + exfat_set_volume_dirty(sb); /* update the directory entry */ if (exfat_remove_entries(dir, &cdir, entry, 0, num_entries)) { err = -EIO; @@ -843,7 +843,7 @@ static int exfat_unlink(struct inode *dir, struct dentry *dentry) /* This doesn't modify ei */ ei->dir.dir = DIR_DELETED; - exfat_set_vol_flags(sb, VOL_CLEAN); + exfat_clear_volume_dirty(sb); inode_inc_iversion(dir); dir->i_mtime = dir->i_atime = current_time(dir); @@ -873,10 +873,10 @@ static int exfat_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode) int err; mutex_lock(&EXFAT_SB(sb)->s_lock); - exfat_set_vol_flags(sb, VOL_DIRTY); + exfat_set_volume_dirty(sb); err = exfat_add_entry(dir, dentry->d_name.name, &cdir, TYPE_DIR, &info); - exfat_set_vol_flags(sb, VOL_CLEAN); + exfat_clear_volume_dirty(sb); if (err) goto unlock; @@ -1001,14 +1001,14 @@ static int exfat_rmdir(struct inode *dir, struct dentry *dentry) num_entries++; brelse(bh); - exfat_set_vol_flags(sb, VOL_DIRTY); + exfat_set_volume_dirty(sb); err = exfat_remove_entries(dir, &cdir, entry, 0, num_entries); if (err) { exfat_err(sb, "failed to exfat_remove_entries : err(%d)", err); goto unlock; } ei->dir.dir = DIR_DELETED; - exfat_set_vol_flags(sb, VOL_CLEAN); + exfat_clear_volume_dirty(sb); inode_inc_iversion(dir); dir->i_mtime = dir->i_atime = current_time(dir); @@ -1071,7 +1071,7 @@ static int exfat_rename_file(struct inode *inode, struct exfat_chain *p_dir, epnew->dentry.file.attr |= cpu_to_le16(ATTR_ARCHIVE); ei->attr |= ATTR_ARCHIVE; } - exfat_update_bh(sb, new_bh, sync); + exfat_update_bh(new_bh, sync); brelse(old_bh); brelse(new_bh); @@ -1087,7 +1087,7 @@ static int exfat_rename_file(struct inode *inode, struct exfat_chain *p_dir, } memcpy(epnew, epold, DENTRY_SIZE); - exfat_update_bh(sb, new_bh, sync); + exfat_update_bh(new_bh, sync); brelse(old_bh); brelse(new_bh); @@ -1104,7 +1104,7 @@ static int exfat_rename_file(struct inode *inode, struct exfat_chain *p_dir, epold->dentry.file.attr |= cpu_to_le16(ATTR_ARCHIVE); ei->attr |= ATTR_ARCHIVE; } - exfat_update_bh(sb, old_bh, sync); + exfat_update_bh(old_bh, sync); brelse(old_bh); ret = exfat_init_ext_entry(inode, p_dir, oldentry, num_new_entries, p_uniname); @@ -1159,7 +1159,7 @@ static int exfat_move_file(struct inode *inode, struct exfat_chain *p_olddir, epnew->dentry.file.attr |= cpu_to_le16(ATTR_ARCHIVE); ei->attr |= ATTR_ARCHIVE; } - exfat_update_bh(sb, new_bh, IS_DIRSYNC(inode)); + exfat_update_bh(new_bh, IS_DIRSYNC(inode)); brelse(mov_bh); brelse(new_bh); @@ -1175,7 +1175,7 @@ static int exfat_move_file(struct inode *inode, struct exfat_chain *p_olddir, } memcpy(epnew, epmov, DENTRY_SIZE); - exfat_update_bh(sb, new_bh, IS_DIRSYNC(inode)); + exfat_update_bh(new_bh, IS_DIRSYNC(inode)); brelse(mov_bh); brelse(new_bh); @@ -1300,7 +1300,7 @@ static int __exfat_rename(struct inode *old_parent_inode, if (ret) goto out; - exfat_set_vol_flags(sb, VOL_DIRTY); + exfat_set_volume_dirty(sb); if (olddir.dir == newdir.dir) ret = exfat_rename_file(new_parent_inode, &olddir, dentry, @@ -1355,7 +1355,7 @@ del_out: */ new_ei->dir.dir = DIR_DELETED; } - exfat_set_vol_flags(sb, VOL_CLEAN); + exfat_clear_volume_dirty(sb); out: return ret; } diff --git a/fs/exfat/super.c b/fs/exfat/super.c index 253a92460d52..3b6a1659892f 100644 --- a/fs/exfat/super.c +++ b/fs/exfat/super.c @@ -45,9 +45,6 @@ static void exfat_put_super(struct super_block *sb) struct exfat_sb_info *sbi = EXFAT_SB(sb); mutex_lock(&sbi->s_lock); - if (test_and_clear_bit(EXFAT_SB_DIRTY, &sbi->s_state)) - sync_blockdev(sb->s_bdev); - exfat_set_vol_flags(sb, VOL_CLEAN); exfat_free_bitmap(sbi); brelse(sbi->boot_bh); mutex_unlock(&sbi->s_lock); @@ -60,13 +57,14 @@ static int exfat_sync_fs(struct super_block *sb, int wait) struct exfat_sb_info *sbi = EXFAT_SB(sb); int err = 0; + if (!wait) + return 0; + /* If there are some dirty buffers in the bdev inode */ mutex_lock(&sbi->s_lock); - if (test_and_clear_bit(EXFAT_SB_DIRTY, &sbi->s_state)) { - sync_blockdev(sb->s_bdev); - if (exfat_set_vol_flags(sb, VOL_CLEAN)) - err = -EIO; - } + sync_blockdev(sb->s_bdev); + if (exfat_clear_volume_dirty(sb)) + err = -EIO; mutex_unlock(&sbi->s_lock); return err; } @@ -98,17 +96,20 @@ static int exfat_statfs(struct dentry *dentry, struct kstatfs *buf) return 0; } -int exfat_set_vol_flags(struct super_block *sb, unsigned short new_flag) +static int exfat_set_vol_flags(struct super_block *sb, unsigned short new_flags) { struct exfat_sb_info *sbi = EXFAT_SB(sb); struct boot_sector *p_boot = (struct boot_sector *)sbi->boot_bh->b_data; bool sync; + /* retain persistent-flags */ + new_flags |= sbi->vol_flags_persistent; + /* flags are not changed */ - if (sbi->vol_flag == new_flag) + if (sbi->vol_flags == new_flags) return 0; - sbi->vol_flag = new_flag; + sbi->vol_flags = new_flags; /* skip updating volume dirty flag, * if this volume has been mounted with read-only @@ -116,9 +117,9 @@ int exfat_set_vol_flags(struct super_block *sb, unsigned short new_flag) if (sb_rdonly(sb)) return 0; - p_boot->vol_flags = cpu_to_le16(new_flag); + p_boot->vol_flags = cpu_to_le16(new_flags); - if (new_flag == VOL_DIRTY && !buffer_dirty(sbi->boot_bh)) + if ((new_flags & VOLUME_DIRTY) && !buffer_dirty(sbi->boot_bh)) sync = true; else sync = false; @@ -131,6 +132,20 @@ int exfat_set_vol_flags(struct super_block *sb, unsigned short new_flag) return 0; } +int exfat_set_volume_dirty(struct super_block *sb) +{ + struct exfat_sb_info *sbi = EXFAT_SB(sb); + + return exfat_set_vol_flags(sb, sbi->vol_flags | VOLUME_DIRTY); +} + +int exfat_clear_volume_dirty(struct super_block *sb) +{ + struct exfat_sb_info *sbi = EXFAT_SB(sb); + + return exfat_set_vol_flags(sb, sbi->vol_flags & ~VOLUME_DIRTY); +} + static int exfat_show_options(struct seq_file *m, struct dentry *root) { struct super_block *sb = root->d_sb; @@ -459,7 +474,8 @@ static int exfat_read_boot_sector(struct super_block *sb) sbi->dentries_per_clu = 1 << (sbi->cluster_size_bits - DENTRY_SIZE_BITS); - sbi->vol_flag = le16_to_cpu(p_boot->vol_flags); + sbi->vol_flags = le16_to_cpu(p_boot->vol_flags); + sbi->vol_flags_persistent = sbi->vol_flags & (VOLUME_DIRTY | MEDIA_FAILURE); sbi->clu_srch_ptr = EXFAT_FIRST_CLUSTER; sbi->used_clusters = EXFAT_CLUSTERS_UNTRACKED; @@ -474,9 +490,9 @@ static int exfat_read_boot_sector(struct super_block *sb) exfat_err(sb, "bogus data start sector"); return -EINVAL; } - if (sbi->vol_flag & VOL_DIRTY) + if (sbi->vol_flags & VOLUME_DIRTY) exfat_warn(sb, "Volume was not properly unmounted. Some data may be corrupt. Please run fsck."); - if (sbi->vol_flag & ERR_MEDIUM) + if (sbi->vol_flags & MEDIA_FAILURE) exfat_warn(sb, "Medium has reported failures. Some data may be lost."); /* exFAT file size is limited by a disk volume size */ diff --git a/fs/ext2/dir.c b/fs/ext2/dir.c index 13318e255ebf..70355ab6740e 100644 --- a/fs/ext2/dir.c +++ b/fs/ext2/dir.c @@ -348,7 +348,6 @@ struct ext2_dir_entry_2 *ext2_find_entry (struct inode *dir, struct page *page = NULL; struct ext2_inode_info *ei = EXT2_I(dir); ext2_dirent * de; - int dir_has_error = 0; if (npages == 0) goto out; @@ -362,25 +361,25 @@ struct ext2_dir_entry_2 *ext2_find_entry (struct inode *dir, n = start; do { char *kaddr; - page = ext2_get_page(dir, n, dir_has_error); - if (!IS_ERR(page)) { - kaddr = page_address(page); - de = (ext2_dirent *) kaddr; - kaddr += ext2_last_byte(dir, n) - reclen; - while ((char *) de <= kaddr) { - if (de->rec_len == 0) { - ext2_error(dir->i_sb, __func__, - "zero-length directory entry"); - ext2_put_page(page); - goto out; - } - if (ext2_match (namelen, name, de)) - goto found; - de = ext2_next_entry(de); + page = ext2_get_page(dir, n, 0); + if (IS_ERR(page)) + return ERR_CAST(page); + + kaddr = page_address(page); + de = (ext2_dirent *) kaddr; + kaddr += ext2_last_byte(dir, n) - reclen; + while ((char *) de <= kaddr) { + if (de->rec_len == 0) { + ext2_error(dir->i_sb, __func__, + "zero-length directory entry"); + ext2_put_page(page); + goto out; } - ext2_put_page(page); - } else - dir_has_error = 1; + if (ext2_match(namelen, name, de)) + goto found; + de = ext2_next_entry(de); + } + ext2_put_page(page); if (++n >= npages) n = 0; @@ -394,7 +393,7 @@ struct ext2_dir_entry_2 *ext2_find_entry (struct inode *dir, } } while (n != start); out: - return NULL; + return ERR_PTR(-ENOENT); found: *res_page = page; @@ -414,18 +413,18 @@ struct ext2_dir_entry_2 * ext2_dotdot (struct inode *dir, struct page **p) return de; } -ino_t ext2_inode_by_name(struct inode *dir, const struct qstr *child) +int ext2_inode_by_name(struct inode *dir, const struct qstr *child, ino_t *ino) { - ino_t res = 0; struct ext2_dir_entry_2 *de; struct page *page; - de = ext2_find_entry (dir, child, &page); - if (de) { - res = le32_to_cpu(de->inode); - ext2_put_page(page); - } - return res; + de = ext2_find_entry(dir, child, &page); + if (IS_ERR(de)) + return PTR_ERR(de); + + *ino = le32_to_cpu(de->inode); + ext2_put_page(page); + return 0; } static int ext2_prepare_chunk(struct page *page, loff_t pos, unsigned len) diff --git a/fs/ext2/ext2.h b/fs/ext2/ext2.h index 8178bd38a9d6..5136b7289e8d 100644 --- a/fs/ext2/ext2.h +++ b/fs/ext2/ext2.h @@ -52,8 +52,8 @@ struct ext2_block_alloc_info { /* * Was i_next_alloc_goal in ext2_inode_info * is the *physical* companion to i_next_alloc_block. - * it the the physical block number of the block which was most-recentl - * allocated to this file. This give us the goal (target) for the next + * it is the physical block number of the block which was most-recently + * allocated to this file. This gives us the goal (target) for the next * allocation when we detect linearly ascending requests. */ ext2_fsblk_t last_alloc_physical_block; @@ -374,7 +374,6 @@ struct ext2_inode { /* * Mount flags */ -#define EXT2_MOUNT_CHECK 0x000001 /* Do mount-time checks */ #define EXT2_MOUNT_OLDALLOC 0x000002 /* Don't use the new Orlov allocator */ #define EXT2_MOUNT_GRPID 0x000004 /* Create files with directory's group */ #define EXT2_MOUNT_DEBUG 0x000008 /* Some debugging messages */ @@ -738,7 +737,8 @@ extern void ext2_rsv_window_add(struct super_block *sb, struct ext2_reserve_wind /* dir.c */ extern int ext2_add_link (struct dentry *, struct inode *); -extern ino_t ext2_inode_by_name(struct inode *, const struct qstr *); +extern int ext2_inode_by_name(struct inode *dir, + const struct qstr *child, ino_t *ino); extern int ext2_make_empty(struct inode *, struct inode *); extern struct ext2_dir_entry_2 * ext2_find_entry (struct inode *,const struct qstr *, struct page **); extern int ext2_delete_entry (struct ext2_dir_entry_2 *, struct page *); diff --git a/fs/ext2/file.c b/fs/ext2/file.c index 60378ddf1424..96044f5dbc0e 100644 --- a/fs/ext2/file.c +++ b/fs/ext2/file.c @@ -93,8 +93,10 @@ static vm_fault_t ext2_dax_fault(struct vm_fault *vmf) struct inode *inode = file_inode(vmf->vma->vm_file); struct ext2_inode_info *ei = EXT2_I(inode); vm_fault_t ret; + bool write = (vmf->flags & FAULT_FLAG_WRITE) && + (vmf->vma->vm_flags & VM_SHARED); - if (vmf->flags & FAULT_FLAG_WRITE) { + if (write) { sb_start_pagefault(inode->i_sb); file_update_time(vmf->vma->vm_file); } @@ -103,7 +105,7 @@ static vm_fault_t ext2_dax_fault(struct vm_fault *vmf) ret = dax_iomap_fault(vmf, PE_SIZE_PTE, NULL, NULL, &ext2_iomap_ops); up_read(&ei->dax_sem); - if (vmf->flags & FAULT_FLAG_WRITE) + if (write) sb_end_pagefault(inode->i_sb); return ret; } diff --git a/fs/ext2/ialloc.c b/fs/ext2/ialloc.c index fda7d3f5b4be..432c3febea6d 100644 --- a/fs/ext2/ialloc.c +++ b/fs/ext2/ialloc.c @@ -80,6 +80,7 @@ static void ext2_release_inode(struct super_block *sb, int group, int dir) if (dir) le16_add_cpu(&desc->bg_used_dirs_count, -1); spin_unlock(sb_bgl_lock(EXT2_SB(sb), group)); + percpu_counter_inc(&EXT2_SB(sb)->s_freeinodes_counter); if (dir) percpu_counter_dec(&EXT2_SB(sb)->s_dirs_counter); mark_buffer_dirty(bh); @@ -528,7 +529,7 @@ got: goto fail; } - percpu_counter_add(&sbi->s_freeinodes_counter, -1); + percpu_counter_dec(&sbi->s_freeinodes_counter); if (S_ISDIR(mode)) percpu_counter_inc(&sbi->s_dirs_counter); diff --git a/fs/ext2/inode.c b/fs/ext2/inode.c index c8b371c82b4f..415c21f0e750 100644 --- a/fs/ext2/inode.c +++ b/fs/ext2/inode.c @@ -356,8 +356,7 @@ static inline ext2_fsblk_t ext2_find_goal(struct inode *inode, long block, * @blks: number of data blocks to be mapped. * @blocks_to_boundary: the offset in the indirect block * - * return the total number of blocks to be allocate, including the - * direct and indirect blocks. + * return the number of direct blocks to allocate. */ static int ext2_blks_to_allocate(Indirect * branch, int k, unsigned long blks, @@ -390,11 +389,9 @@ ext2_blks_to_allocate(Indirect * branch, int k, unsigned long blks, * ext2_alloc_blocks: multiple allocate blocks needed for a branch * @indirect_blks: the number of blocks need to allocate for indirect * blocks - * + * @blks: the number of blocks need to allocate for direct blocks * @new_blocks: on return it will store the new block numbers for * the indirect blocks(if needed) and the first direct block, - * @blks: on return it will store the total number of allocated - * direct blocks */ static int ext2_alloc_blocks(struct inode *inode, ext2_fsblk_t goal, int indirect_blks, int blks, @@ -1244,7 +1241,7 @@ do_indirects: mark_inode_dirty(inode); ext2_free_branches(inode, &nr, &nr+1, 1); } - /* fall through */ + fallthrough; case EXT2_IND_BLOCK: nr = i_data[EXT2_DIND_BLOCK]; if (nr) { @@ -1252,7 +1249,7 @@ do_indirects: mark_inode_dirty(inode); ext2_free_branches(inode, &nr, &nr+1, 2); } - /* fall through */ + fallthrough; case EXT2_DIND_BLOCK: nr = i_data[EXT2_TIND_BLOCK]; if (nr) { diff --git a/fs/ext2/namei.c b/fs/ext2/namei.c index ba3e3e075891..5bf2c145643b 100644 --- a/fs/ext2/namei.c +++ b/fs/ext2/namei.c @@ -57,13 +57,17 @@ static struct dentry *ext2_lookup(struct inode * dir, struct dentry *dentry, uns { struct inode * inode; ino_t ino; + int res; if (dentry->d_name.len > EXT2_NAME_LEN) return ERR_PTR(-ENAMETOOLONG); - ino = ext2_inode_by_name(dir, &dentry->d_name); - inode = NULL; - if (ino) { + res = ext2_inode_by_name(dir, &dentry->d_name, &ino); + if (res) { + if (res != -ENOENT) + return ERR_PTR(res); + inode = NULL; + } else { inode = ext2_iget(dir->i_sb, ino); if (inode == ERR_PTR(-ESTALE)) { ext2_error(dir->i_sb, __func__, @@ -78,9 +82,13 @@ static struct dentry *ext2_lookup(struct inode * dir, struct dentry *dentry, uns struct dentry *ext2_get_parent(struct dentry *child) { struct qstr dotdot = QSTR_INIT("..", 2); - unsigned long ino = ext2_inode_by_name(d_inode(child), &dotdot); - if (!ino) - return ERR_PTR(-ENOENT); + ino_t ino; + int res; + + res = ext2_inode_by_name(d_inode(child), &dotdot, &ino); + if (res) + return ERR_PTR(res); + return d_obtain_alias(ext2_iget(child->d_sb, ino)); } @@ -274,9 +282,9 @@ static int ext2_unlink(struct inode * dir, struct dentry *dentry) if (err) goto out; - de = ext2_find_entry (dir, &dentry->d_name, &page); - if (!de) { - err = -ENOENT; + de = ext2_find_entry(dir, &dentry->d_name, &page); + if (IS_ERR(de)) { + err = PTR_ERR(de); goto out; } @@ -330,9 +338,9 @@ static int ext2_rename (struct inode * old_dir, struct dentry * old_dentry, if (err) goto out; - old_de = ext2_find_entry (old_dir, &old_dentry->d_name, &old_page); - if (!old_de) { - err = -ENOENT; + old_de = ext2_find_entry(old_dir, &old_dentry->d_name, &old_page); + if (IS_ERR(old_de)) { + err = PTR_ERR(old_de); goto out; } @@ -351,10 +359,11 @@ static int ext2_rename (struct inode * old_dir, struct dentry * old_dentry, if (dir_de && !ext2_empty_dir (new_inode)) goto out_dir; - err = -ENOENT; - new_de = ext2_find_entry (new_dir, &new_dentry->d_name, &new_page); - if (!new_de) + new_de = ext2_find_entry(new_dir, &new_dentry->d_name, &new_page); + if (IS_ERR(new_de)) { + err = PTR_ERR(new_de); goto out_dir; + } ext2_set_link(new_dir, new_de, new_page, old_inode, 1); new_inode->i_ctime = current_time(new_inode); if (dir_de) diff --git a/fs/ext2/super.c b/fs/ext2/super.c index 4a4ab683250d..7fab2b3b5b39 100644 --- a/fs/ext2/super.c +++ b/fs/ext2/super.c @@ -431,7 +431,7 @@ static unsigned long get_sb_block(void **data) enum { Opt_bsd_df, Opt_minix_df, Opt_grpid, Opt_nogrpid, Opt_resgid, Opt_resuid, Opt_sb, Opt_err_cont, Opt_err_panic, - Opt_err_ro, Opt_nouid32, Opt_nocheck, Opt_debug, + Opt_err_ro, Opt_nouid32, Opt_debug, Opt_oldalloc, Opt_orlov, Opt_nobh, Opt_user_xattr, Opt_nouser_xattr, Opt_acl, Opt_noacl, Opt_xip, Opt_dax, Opt_ignore, Opt_err, Opt_quota, Opt_usrquota, Opt_grpquota, Opt_reservation, Opt_noreservation @@ -451,8 +451,6 @@ static const match_table_t tokens = { {Opt_err_panic, "errors=panic"}, {Opt_err_ro, "errors=remount-ro"}, {Opt_nouid32, "nouid32"}, - {Opt_nocheck, "check=none"}, - {Opt_nocheck, "nocheck"}, {Opt_debug, "debug"}, {Opt_oldalloc, "oldalloc"}, {Opt_orlov, "orlov"}, @@ -546,12 +544,6 @@ static int parse_options(char *options, struct super_block *sb, case Opt_nouid32: set_opt (opts->s_mount_opt, NO_UID32); break; - case Opt_nocheck: - ext2_msg(sb, KERN_WARNING, - "Option nocheck/check=none is deprecated and" - " will be removed in June 2020."); - clear_opt (opts->s_mount_opt, CHECK); - break; case Opt_debug: set_opt (opts->s_mount_opt, DEBUG); break; @@ -595,7 +587,7 @@ static int parse_options(char *options, struct super_block *sb, case Opt_xip: ext2_msg(sb, KERN_INFO, "use dax instead of xip"); set_opt(opts->s_mount_opt, XIP); - /* Fall through */ + fallthrough; case Opt_dax: #ifdef CONFIG_FS_DAX ext2_msg(sb, KERN_WARNING, diff --git a/fs/ext2/xattr.c b/fs/ext2/xattr.c index 943cc469f42f..841fa6d9d744 100644 --- a/fs/ext2/xattr.c +++ b/fs/ext2/xattr.c @@ -437,6 +437,9 @@ ext2_xattr_set(struct inode *inode, int name_index, const char *name, name_len = strlen(name); if (name_len > 255 || value_len > sb->s_blocksize) return -ERANGE; + error = dquot_initialize(inode); + if (error) + return error; down_write(&EXT2_I(inode)->xattr_sem); if (EXT2_I(inode)->i_file_acl) { /* The inode already has an extended attribute block. */ @@ -588,7 +591,6 @@ bad_block: /* Remove the old value. */ memmove(first_val + size, first_val, val - first_val); memset(first_val, 0, size); - here->e_value_offs = 0; min_offs += size; /* Adjust all value offsets. */ @@ -600,6 +602,8 @@ bad_block: cpu_to_le16(o + size); last = EXT2_XATTR_NEXT(last); } + + here->e_value_offs = 0; } if (value == NULL) { /* Remove the old name. */ diff --git a/fs/ext4/Kconfig b/fs/ext4/Kconfig index 1afa5a4bcb5f..619dd35ddd48 100644 --- a/fs/ext4/Kconfig +++ b/fs/ext4/Kconfig @@ -110,7 +110,7 @@ config EXT4_KUNIT_TESTS This builds the ext4 KUnit tests. KUnit tests run during boot and output the results to the debug log - in TAP format (http://testanything.org/). Only useful for kernel devs + in TAP format (https://testanything.org/). Only useful for kernel devs running KUnit test harness and are not for inclusion into a production build. diff --git a/fs/ext4/balloc.c b/fs/ext4/balloc.c index 1ba46d87cdf1..48c3df47748d 100644 --- a/fs/ext4/balloc.c +++ b/fs/ext4/balloc.c @@ -413,7 +413,8 @@ verified: * Return buffer_head on success or an ERR_PTR in case of failure. */ struct buffer_head * -ext4_read_block_bitmap_nowait(struct super_block *sb, ext4_group_t block_group) +ext4_read_block_bitmap_nowait(struct super_block *sb, ext4_group_t block_group, + bool ignore_locked) { struct ext4_group_desc *desc; struct ext4_sb_info *sbi = EXT4_SB(sb); @@ -441,6 +442,12 @@ ext4_read_block_bitmap_nowait(struct super_block *sb, ext4_group_t block_group) return ERR_PTR(-ENOMEM); } + if (ignore_locked && buffer_locked(bh)) { + /* buffer under IO already, return if called for prefetching */ + put_bh(bh); + return NULL; + } + if (bitmap_uptodate(bh)) goto verify; @@ -487,10 +494,11 @@ ext4_read_block_bitmap_nowait(struct super_block *sb, ext4_group_t block_group) * submit the buffer_head for reading */ set_buffer_new(bh); - trace_ext4_read_block_bitmap_load(sb, block_group); + trace_ext4_read_block_bitmap_load(sb, block_group, ignore_locked); bh->b_end_io = ext4_end_bitmap_read; get_bh(bh); - submit_bh(REQ_OP_READ, REQ_META | REQ_PRIO, bh); + submit_bh(REQ_OP_READ, REQ_META | REQ_PRIO | + (ignore_locked ? REQ_RAHEAD : 0), bh); return bh; verify: err = ext4_validate_block_bitmap(sb, desc, block_group, bh); @@ -534,7 +542,7 @@ ext4_read_block_bitmap(struct super_block *sb, ext4_group_t block_group) struct buffer_head *bh; int err; - bh = ext4_read_block_bitmap_nowait(sb, block_group); + bh = ext4_read_block_bitmap_nowait(sb, block_group, false); if (IS_ERR(bh)) return bh; err = ext4_wait_block_bitmap(sb, block_group, bh); diff --git a/fs/ext4/block_validity.c b/fs/ext4/block_validity.c index 16e9b2fda03a..c54ba52f2dd4 100644 --- a/fs/ext4/block_validity.c +++ b/fs/ext4/block_validity.c @@ -24,6 +24,7 @@ struct ext4_system_zone { struct rb_node node; ext4_fsblk_t start_blk; unsigned int count; + u32 ino; }; static struct kmem_cache *ext4_system_zone_cachep; @@ -45,7 +46,8 @@ void ext4_exit_system_zone(void) static inline int can_merge(struct ext4_system_zone *entry1, struct ext4_system_zone *entry2) { - if ((entry1->start_blk + entry1->count) == entry2->start_blk) + if ((entry1->start_blk + entry1->count) == entry2->start_blk && + entry1->ino == entry2->ino) return 1; return 0; } @@ -66,9 +68,9 @@ static void release_system_zone(struct ext4_system_blocks *system_blks) */ static int add_system_zone(struct ext4_system_blocks *system_blks, ext4_fsblk_t start_blk, - unsigned int count) + unsigned int count, u32 ino) { - struct ext4_system_zone *new_entry = NULL, *entry; + struct ext4_system_zone *new_entry, *entry; struct rb_node **n = &system_blks->root.rb_node, *node; struct rb_node *parent = NULL, *new_node = NULL; @@ -79,30 +81,21 @@ static int add_system_zone(struct ext4_system_blocks *system_blks, n = &(*n)->rb_left; else if (start_blk >= (entry->start_blk + entry->count)) n = &(*n)->rb_right; - else { - if (start_blk + count > (entry->start_blk + - entry->count)) - entry->count = (start_blk + count - - entry->start_blk); - new_node = *n; - new_entry = rb_entry(new_node, struct ext4_system_zone, - node); - break; - } + else /* Unexpected overlap of system zones. */ + return -EFSCORRUPTED; } - if (!new_entry) { - new_entry = kmem_cache_alloc(ext4_system_zone_cachep, - GFP_KERNEL); - if (!new_entry) - return -ENOMEM; - new_entry->start_blk = start_blk; - new_entry->count = count; - new_node = &new_entry->node; - - rb_link_node(new_node, parent, n); - rb_insert_color(new_node, &system_blks->root); - } + new_entry = kmem_cache_alloc(ext4_system_zone_cachep, + GFP_KERNEL); + if (!new_entry) + return -ENOMEM; + new_entry->start_blk = start_blk; + new_entry->count = count; + new_entry->ino = ino; + new_node = &new_entry->node; + + rb_link_node(new_node, parent, n); + rb_insert_color(new_node, &system_blks->root); /* Can we merge to the left? */ node = rb_prev(new_node); @@ -151,40 +144,6 @@ static void debug_print_tree(struct ext4_sb_info *sbi) printk(KERN_CONT "\n"); } -/* - * Returns 1 if the passed-in block region (start_blk, - * start_blk+count) is valid; 0 if some part of the block region - * overlaps with filesystem metadata blocks. - */ -static int ext4_data_block_valid_rcu(struct ext4_sb_info *sbi, - struct ext4_system_blocks *system_blks, - ext4_fsblk_t start_blk, - unsigned int count) -{ - struct ext4_system_zone *entry; - struct rb_node *n; - - if ((start_blk <= le32_to_cpu(sbi->s_es->s_first_data_block)) || - (start_blk + count < start_blk) || - (start_blk + count > ext4_blocks_count(sbi->s_es))) - return 0; - - if (system_blks == NULL) - return 1; - - n = system_blks->root.rb_node; - while (n) { - entry = rb_entry(n, struct ext4_system_zone, node); - if (start_blk + count - 1 < entry->start_blk) - n = n->rb_left; - else if (start_blk >= (entry->start_blk + entry->count)) - n = n->rb_right; - else - return 0; - } - return 1; -} - static int ext4_protect_reserved_inode(struct super_block *sb, struct ext4_system_blocks *system_blks, u32 ino) @@ -214,19 +173,18 @@ static int ext4_protect_reserved_inode(struct super_block *sb, if (n == 0) { i++; } else { - if (!ext4_data_block_valid_rcu(sbi, system_blks, - map.m_pblk, n)) { - err = -EFSCORRUPTED; - __ext4_error(sb, __func__, __LINE__, -err, - map.m_pblk, "blocks %llu-%llu " - "from inode %u overlap system zone", - map.m_pblk, - map.m_pblk + map.m_len - 1, ino); + err = add_system_zone(system_blks, map.m_pblk, n, ino); + if (err < 0) { + if (err == -EFSCORRUPTED) { + __ext4_error(sb, __func__, __LINE__, + -err, map.m_pblk, + "blocks %llu-%llu from inode %u overlap system zone", + map.m_pblk, + map.m_pblk + map.m_len - 1, + ino); + } break; } - err = add_system_zone(system_blks, map.m_pblk, n); - if (err < 0) - break; i += n; } } @@ -262,14 +220,6 @@ int ext4_setup_system_zone(struct super_block *sb) int flex_size = ext4_flex_bg_size(sbi); int ret; - if (!test_opt(sb, BLOCK_VALIDITY)) { - if (sbi->system_blks) - ext4_release_system_zone(sb); - return 0; - } - if (sbi->system_blks) - return 0; - system_blks = kzalloc(sizeof(*system_blks), GFP_KERNEL); if (!system_blks) return -ENOMEM; @@ -277,22 +227,25 @@ int ext4_setup_system_zone(struct super_block *sb) for (i=0; i < ngroups; i++) { cond_resched(); if (ext4_bg_has_super(sb, i) && - ((i < 5) || ((i % flex_size) == 0))) - add_system_zone(system_blks, + ((i < 5) || ((i % flex_size) == 0))) { + ret = add_system_zone(system_blks, ext4_group_first_block_no(sb, i), - ext4_bg_num_gdb(sb, i) + 1); + ext4_bg_num_gdb(sb, i) + 1, 0); + if (ret) + goto err; + } gdp = ext4_get_group_desc(sb, i, NULL); ret = add_system_zone(system_blks, - ext4_block_bitmap(sb, gdp), 1); + ext4_block_bitmap(sb, gdp), 1, 0); if (ret) goto err; ret = add_system_zone(system_blks, - ext4_inode_bitmap(sb, gdp), 1); + ext4_inode_bitmap(sb, gdp), 1, 0); if (ret) goto err; ret = add_system_zone(system_blks, ext4_inode_table(sb, gdp), - sbi->s_itb_per_group); + sbi->s_itb_per_group, 0); if (ret) goto err; } @@ -341,11 +294,24 @@ void ext4_release_system_zone(struct super_block *sb) call_rcu(&system_blks->rcu, ext4_destroy_system_zone); } -int ext4_data_block_valid(struct ext4_sb_info *sbi, ext4_fsblk_t start_blk, +/* + * Returns 1 if the passed-in block region (start_blk, + * start_blk+count) is valid; 0 if some part of the block region + * overlaps with some other filesystem metadata blocks. + */ +int ext4_inode_block_valid(struct inode *inode, ext4_fsblk_t start_blk, unsigned int count) { + struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb); struct ext4_system_blocks *system_blks; - int ret; + struct ext4_system_zone *entry; + struct rb_node *n; + int ret = 1; + + if ((start_blk <= le32_to_cpu(sbi->s_es->s_first_data_block)) || + (start_blk + count < start_blk) || + (start_blk + count > ext4_blocks_count(sbi->s_es))) + return 0; /* * Lock the system zone to prevent it being released concurrently @@ -354,8 +320,22 @@ int ext4_data_block_valid(struct ext4_sb_info *sbi, ext4_fsblk_t start_blk, */ rcu_read_lock(); system_blks = rcu_dereference(sbi->system_blks); - ret = ext4_data_block_valid_rcu(sbi, system_blks, start_blk, - count); + if (system_blks == NULL) + goto out_rcu; + + n = system_blks->root.rb_node; + while (n) { + entry = rb_entry(n, struct ext4_system_zone, node); + if (start_blk + count - 1 < entry->start_blk) + n = n->rb_left; + else if (start_blk >= (entry->start_blk + entry->count)) + n = n->rb_right; + else { + ret = (entry->ino == inode->i_ino); + break; + } + } +out_rcu: rcu_read_unlock(); return ret; } @@ -374,8 +354,7 @@ int ext4_check_blockref(const char *function, unsigned int line, while (bref < p+max) { blk = le32_to_cpu(*bref++); if (blk && - unlikely(!ext4_data_block_valid(EXT4_SB(inode->i_sb), - blk, 1))) { + unlikely(!ext4_inode_block_valid(inode, blk, 1))) { ext4_error_inode(inode, function, line, blk, "invalid block"); return -EFSCORRUPTED; diff --git a/fs/ext4/ext4.h b/fs/ext4/ext4.h index 42f5060f3cdf..523e00d7b392 100644 --- a/fs/ext4/ext4.h +++ b/fs/ext4/ext4.h @@ -434,10 +434,36 @@ struct flex_groups { #define EXT4_CASEFOLD_FL 0x40000000 /* Casefolded directory */ #define EXT4_RESERVED_FL 0x80000000 /* reserved for ext4 lib */ -#define EXT4_FL_USER_VISIBLE 0x725BDFFF /* User visible flags */ -#define EXT4_FL_USER_MODIFIABLE 0x624BC0FF /* User modifiable flags */ - -/* Flags we can manipulate with through EXT4_IOC_FSSETXATTR */ +/* User modifiable flags */ +#define EXT4_FL_USER_MODIFIABLE (EXT4_SECRM_FL | \ + EXT4_UNRM_FL | \ + EXT4_COMPR_FL | \ + EXT4_SYNC_FL | \ + EXT4_IMMUTABLE_FL | \ + EXT4_APPEND_FL | \ + EXT4_NODUMP_FL | \ + EXT4_NOATIME_FL | \ + EXT4_JOURNAL_DATA_FL | \ + EXT4_NOTAIL_FL | \ + EXT4_DIRSYNC_FL | \ + EXT4_TOPDIR_FL | \ + EXT4_EXTENTS_FL | \ + 0x00400000 /* EXT4_EOFBLOCKS_FL */ | \ + EXT4_DAX_FL | \ + EXT4_PROJINHERIT_FL | \ + EXT4_CASEFOLD_FL) + +/* User visible flags */ +#define EXT4_FL_USER_VISIBLE (EXT4_FL_USER_MODIFIABLE | \ + EXT4_DIRTY_FL | \ + EXT4_COMPRBLK_FL | \ + EXT4_NOCOMPR_FL | \ + EXT4_ENCRYPT_FL | \ + EXT4_INDEX_FL | \ + EXT4_VERITY_FL | \ + EXT4_INLINE_DATA_FL) + +/* Flags we can manipulate with through FS_IOC_FSSETXATTR */ #define EXT4_FL_XFLAG_VISIBLE (EXT4_SYNC_FL | \ EXT4_IMMUTABLE_FL | \ EXT4_APPEND_FL | \ @@ -669,8 +695,6 @@ enum { /* * ioctl commands */ -#define EXT4_IOC_GETFLAGS FS_IOC_GETFLAGS -#define EXT4_IOC_SETFLAGS FS_IOC_SETFLAGS #define EXT4_IOC_GETVERSION _IOR('f', 3, long) #define EXT4_IOC_SETVERSION _IOW('f', 4, long) #define EXT4_IOC_GETVERSION_OLD FS_IOC_GETVERSION @@ -687,17 +711,11 @@ enum { #define EXT4_IOC_RESIZE_FS _IOW('f', 16, __u64) #define EXT4_IOC_SWAP_BOOT _IO('f', 17) #define EXT4_IOC_PRECACHE_EXTENTS _IO('f', 18) -#define EXT4_IOC_SET_ENCRYPTION_POLICY FS_IOC_SET_ENCRYPTION_POLICY -#define EXT4_IOC_GET_ENCRYPTION_PWSALT FS_IOC_GET_ENCRYPTION_PWSALT -#define EXT4_IOC_GET_ENCRYPTION_POLICY FS_IOC_GET_ENCRYPTION_POLICY /* ioctl codes 19--39 are reserved for fscrypt */ #define EXT4_IOC_CLEAR_ES_CACHE _IO('f', 40) #define EXT4_IOC_GETSTATE _IOW('f', 41, __u32) #define EXT4_IOC_GET_ES_CACHE _IOWR('f', 42, struct fiemap) -#define EXT4_IOC_FSGETXATTR FS_IOC_FSGETXATTR -#define EXT4_IOC_FSSETXATTR FS_IOC_FSSETXATTR - #define EXT4_IOC_SHUTDOWN _IOR ('X', 125, __u32) /* @@ -722,8 +740,6 @@ enum { /* * ioctl commands in 32 bit emulation */ -#define EXT4_IOC32_GETFLAGS FS_IOC32_GETFLAGS -#define EXT4_IOC32_SETFLAGS FS_IOC32_SETFLAGS #define EXT4_IOC32_GETVERSION _IOR('f', 3, int) #define EXT4_IOC32_SETVERSION _IOW('f', 4, int) #define EXT4_IOC32_GETRSVSZ _IOR('f', 5, int) @@ -1054,6 +1070,7 @@ struct ext4_inode_info { struct timespec64 i_crtime; /* mballoc */ + atomic_t i_prealloc_active; struct list_head i_prealloc_list; spinlock_t i_prealloc_lock; @@ -1172,6 +1189,7 @@ struct ext4_inode_info { #define EXT4_MOUNT_JOURNAL_CHECKSUM 0x800000 /* Journal checksums */ #define EXT4_MOUNT_JOURNAL_ASYNC_COMMIT 0x1000000 /* Journal Async Commit */ #define EXT4_MOUNT_WARN_ON_ERROR 0x2000000 /* Trigger WARN_ON on error */ +#define EXT4_MOUNT_PREFETCH_BLOCK_BITMAPS 0x4000000 #define EXT4_MOUNT_DELALLOC 0x8000000 /* Delalloc support */ #define EXT4_MOUNT_DATA_ERR_ABORT 0x10000000 /* Abort on file data write */ #define EXT4_MOUNT_BLOCK_VALIDITY 0x20000000 /* Block validity checking */ @@ -1501,10 +1519,13 @@ struct ext4_sb_info { unsigned int s_mb_stats; unsigned int s_mb_order2_reqs; unsigned int s_mb_group_prealloc; + unsigned int s_mb_max_inode_prealloc; unsigned int s_max_dir_size_kb; /* where last allocation was done - for stream allocation */ unsigned long s_mb_last_group; unsigned long s_mb_last_start; + unsigned int s_mb_prefetch; + unsigned int s_mb_prefetch_limit; /* stats for buddy allocator */ atomic_t s_bal_reqs; /* number of reqs with len > 1 */ @@ -1572,6 +1593,8 @@ struct ext4_sb_info { struct ratelimit_state s_err_ratelimit_state; struct ratelimit_state s_warning_ratelimit_state; struct ratelimit_state s_msg_ratelimit_state; + atomic_t s_warning_count; + atomic_t s_msg_count; /* Encryption context for '-o test_dummy_encryption' */ struct fscrypt_dummy_context s_dummy_enc_ctx; @@ -1585,6 +1608,9 @@ struct ext4_sb_info { #ifdef CONFIG_EXT4_DEBUG unsigned long s_simulate_fail; #endif + /* Record the errseq of the backing block device */ + errseq_t s_bdev_wb_err; + spinlock_t s_bdev_wb_lock; }; static inline struct ext4_sb_info *EXT4_SB(struct super_block *sb) @@ -2313,9 +2339,15 @@ struct ext4_lazy_init { struct mutex li_list_mtx; }; +enum ext4_li_mode { + EXT4_LI_MODE_PREFETCH_BBITMAP, + EXT4_LI_MODE_ITABLE, +}; + struct ext4_li_request { struct super_block *lr_super; - struct ext4_sb_info *lr_sbi; + enum ext4_li_mode lr_mode; + ext4_group_t lr_first_not_zeroed; ext4_group_t lr_next_group; struct list_head lr_request; unsigned long lr_next_sched; @@ -2446,7 +2478,8 @@ extern struct ext4_group_desc * ext4_get_group_desc(struct super_block * sb, extern int ext4_should_retry_alloc(struct super_block *sb, int *retries); extern struct buffer_head *ext4_read_block_bitmap_nowait(struct super_block *sb, - ext4_group_t block_group); + ext4_group_t block_group, + bool ignore_locked); extern int ext4_wait_block_bitmap(struct super_block *sb, ext4_group_t block_group, struct buffer_head *bh); @@ -2651,9 +2684,15 @@ extern int ext4_mb_release(struct super_block *); extern ext4_fsblk_t ext4_mb_new_blocks(handle_t *, struct ext4_allocation_request *, int *); extern int ext4_mb_reserve_blocks(struct super_block *, int); -extern void ext4_discard_preallocations(struct inode *); +extern void ext4_discard_preallocations(struct inode *, unsigned int); extern int __init ext4_init_mballoc(void); extern void ext4_exit_mballoc(void); +extern ext4_group_t ext4_mb_prefetch(struct super_block *sb, + ext4_group_t group, + unsigned int nr, int *cnt); +extern void ext4_mb_prefetch_fini(struct super_block *sb, ext4_group_t group, + unsigned int nr); + extern void ext4_free_blocks(handle_t *handle, struct inode *inode, struct buffer_head *bh, ext4_fsblk_t block, unsigned long count, int flags); @@ -2765,8 +2804,7 @@ extern int ext4_search_dir(struct buffer_head *bh, struct ext4_filename *fname, unsigned int offset, struct ext4_dir_entry_2 **res_dir); -extern int ext4_generic_delete_entry(handle_t *handle, - struct inode *dir, +extern int ext4_generic_delete_entry(struct inode *dir, struct ext4_dir_entry_2 *de_del, struct buffer_head *bh, void *entry_buf, @@ -2924,12 +2962,6 @@ do { \ #endif -extern int ext4_update_compat_feature(handle_t *handle, struct super_block *sb, - __u32 compat); -extern int ext4_update_rocompat_feature(handle_t *handle, - struct super_block *sb, __u32 rocompat); -extern int ext4_update_incompat_feature(handle_t *handle, - struct super_block *sb, __u32 incompat); extern ext4_fsblk_t ext4_block_bitmap(struct super_block *sb, struct ext4_group_desc *bg); extern ext4_fsblk_t ext4_inode_bitmap(struct super_block *sb, @@ -3145,6 +3177,7 @@ struct ext4_group_info { (1 << EXT4_GROUP_INFO_BBITMAP_CORRUPT_BIT) #define EXT4_GROUP_INFO_IBITMAP_CORRUPT \ (1 << EXT4_GROUP_INFO_IBITMAP_CORRUPT_BIT) +#define EXT4_GROUP_INFO_BBITMAP_READ_BIT 4 #define EXT4_MB_GRP_NEED_INIT(grp) \ (test_bit(EXT4_GROUP_INFO_NEED_INIT_BIT, &((grp)->bb_state))) @@ -3159,6 +3192,8 @@ struct ext4_group_info { (set_bit(EXT4_GROUP_INFO_WAS_TRIMMED_BIT, &((grp)->bb_state))) #define EXT4_MB_GRP_CLEAR_TRIMMED(grp) \ (clear_bit(EXT4_GROUP_INFO_WAS_TRIMMED_BIT, &((grp)->bb_state))) +#define EXT4_MB_GRP_TEST_AND_SET_READ(grp) \ + (test_and_set_bit(EXT4_GROUP_INFO_BBITMAP_READ_BIT, &((grp)->bb_state))) #define EXT4_MAX_CONTENTION 8 #define EXT4_CONTENTION_THRESHOLD 2 @@ -3363,9 +3398,9 @@ extern void ext4_release_system_zone(struct super_block *sb); extern int ext4_setup_system_zone(struct super_block *sb); extern int __init ext4_init_system_zone(void); extern void ext4_exit_system_zone(void); -extern int ext4_data_block_valid(struct ext4_sb_info *sbi, - ext4_fsblk_t start_blk, - unsigned int count); +extern int ext4_inode_block_valid(struct inode *inode, + ext4_fsblk_t start_blk, + unsigned int count); extern int ext4_check_blockref(const char *, unsigned int, struct inode *, __le32 *, unsigned int); diff --git a/fs/ext4/ext4_jbd2.c b/fs/ext4/ext4_jbd2.c index 0c76cdd44d90..760b9ee49dc0 100644 --- a/fs/ext4/ext4_jbd2.c +++ b/fs/ext4/ext4_jbd2.c @@ -195,6 +195,28 @@ static void ext4_journal_abort_handle(const char *caller, unsigned int line, jbd2_journal_abort_handle(handle); } +static void ext4_check_bdev_write_error(struct super_block *sb) +{ + struct address_space *mapping = sb->s_bdev->bd_inode->i_mapping; + struct ext4_sb_info *sbi = EXT4_SB(sb); + int err; + + /* + * If the block device has write error flag, it may have failed to + * async write out metadata buffers in the background. In this case, + * we could read old data from disk and write it out again, which + * may lead to on-disk filesystem inconsistency. + */ + if (errseq_check(&mapping->wb_err, READ_ONCE(sbi->s_bdev_wb_err))) { + spin_lock(&sbi->s_bdev_wb_lock); + err = errseq_check_and_advance(&mapping->wb_err, &sbi->s_bdev_wb_err); + spin_unlock(&sbi->s_bdev_wb_lock); + if (err) + ext4_error_err(sb, -err, + "Error while async write back metadata"); + } +} + int __ext4_journal_get_write_access(const char *where, unsigned int line, handle_t *handle, struct buffer_head *bh) { @@ -202,6 +224,9 @@ int __ext4_journal_get_write_access(const char *where, unsigned int line, might_sleep(); + if (bh->b_bdev->bd_super) + ext4_check_bdev_write_error(bh->b_bdev->bd_super); + if (ext4_handle_valid(handle)) { err = jbd2_journal_get_write_access(handle, bh); if (err) diff --git a/fs/ext4/extents.c b/fs/ext4/extents.c index 221f240eae60..a0481582187a 100644 --- a/fs/ext4/extents.c +++ b/fs/ext4/extents.c @@ -100,7 +100,7 @@ static int ext4_ext_trunc_restart_fn(struct inode *inode, int *dropped) * i_mutex. So we can safely drop the i_data_sem here. */ BUG_ON(EXT4_JOURNAL(inode) == NULL); - ext4_discard_preallocations(inode); + ext4_discard_preallocations(inode, 0); up_write(&EXT4_I(inode)->i_data_sem); *dropped = 1; return 0; @@ -340,7 +340,7 @@ static int ext4_valid_extent(struct inode *inode, struct ext4_extent *ext) */ if (lblock + len <= lblock) return 0; - return ext4_data_block_valid(EXT4_SB(inode->i_sb), block, len); + return ext4_inode_block_valid(inode, block, len); } static int ext4_valid_extent_idx(struct inode *inode, @@ -348,7 +348,7 @@ static int ext4_valid_extent_idx(struct inode *inode, { ext4_fsblk_t block = ext4_idx_pblock(ext_idx); - return ext4_data_block_valid(EXT4_SB(inode->i_sb), block, 1); + return ext4_inode_block_valid(inode, block, 1); } static int ext4_valid_extent_entries(struct inode *inode, @@ -507,14 +507,10 @@ __read_extent_tree_block(const char *function, unsigned int line, } if (buffer_verified(bh) && !(flags & EXT4_EX_FORCE_CACHE)) return bh; - if (!ext4_has_feature_journal(inode->i_sb) || - (inode->i_ino != - le32_to_cpu(EXT4_SB(inode->i_sb)->s_es->s_journal_inum))) { - err = __ext4_ext_check(function, line, inode, - ext_block_hdr(bh), depth, pblk); - if (err) - goto errout; - } + err = __ext4_ext_check(function, line, inode, + ext_block_hdr(bh), depth, pblk); + if (err) + goto errout; set_buffer_verified(bh); /* * If this is a leaf block, cache all of its entries @@ -693,10 +689,8 @@ void ext4_ext_drop_refs(struct ext4_ext_path *path) return; depth = path->p_depth; for (i = 0; i <= depth; i++, path++) { - if (path->p_bh) { - brelse(path->p_bh); - path->p_bh = NULL; - } + brelse(path->p_bh); + path->p_bh = NULL; } } @@ -1915,7 +1909,7 @@ out: /* * ext4_ext_insert_extent: - * tries to merge requsted extent into the existing extent or + * tries to merge requested extent into the existing extent or * inserts requested extent as new one into the tree, * creating new leaf in the no-space case. */ @@ -3125,7 +3119,7 @@ static int ext4_ext_zeroout(struct inode *inode, struct ext4_extent *ex) * * * Splits extent [a, b] into two extents [a, @split) and [@split, b], states - * of which are deterimined by split_flag. + * of which are determined by split_flag. * * There are two cases: * a> the extent are splitted into two extent. @@ -3650,7 +3644,7 @@ static int ext4_split_convert_extents(handle_t *handle, eof_block = map->m_lblk + map->m_len; /* * It is safe to convert extent to initialized via explicit - * zeroout only if extent is fully insde i_size or new_size. + * zeroout only if extent is fully inside i_size or new_size. */ depth = ext_depth(inode); ex = path[depth].p_ext; @@ -4272,7 +4266,7 @@ got_allocated_blocks: * not a good idea to call discard here directly, * but otherwise we'd need to call it every free(). */ - ext4_discard_preallocations(inode); + ext4_discard_preallocations(inode, 0); if (flags & EXT4_GET_BLOCKS_DELALLOC_RESERVE) fb_flags = EXT4_FREE_BLOCKS_NO_QUOT_UPDATE; ext4_free_blocks(handle, inode, NULL, newblock, @@ -4495,7 +4489,7 @@ static long ext4_zero_range(struct file *file, loff_t offset, } /* - * Round up offset. This is not fallocate, we neet to zero out + * Round up offset. This is not fallocate, we need to zero out * blocks, so convert interior block aligned part of the range to * unwritten and possibly manually zero out unaligned parts of the * range. @@ -5299,7 +5293,7 @@ static int ext4_collapse_range(struct inode *inode, loff_t offset, loff_t len) } down_write(&EXT4_I(inode)->i_data_sem); - ext4_discard_preallocations(inode); + ext4_discard_preallocations(inode, 0); ret = ext4_es_remove_extent(inode, punch_start, EXT_MAX_BLOCKS - punch_start); @@ -5313,7 +5307,7 @@ static int ext4_collapse_range(struct inode *inode, loff_t offset, loff_t len) up_write(&EXT4_I(inode)->i_data_sem); goto out_stop; } - ext4_discard_preallocations(inode); + ext4_discard_preallocations(inode, 0); ret = ext4_ext_shift_extents(inode, handle, punch_stop, punch_stop - punch_start, SHIFT_LEFT); @@ -5445,7 +5439,7 @@ static int ext4_insert_range(struct inode *inode, loff_t offset, loff_t len) goto out_stop; down_write(&EXT4_I(inode)->i_data_sem); - ext4_discard_preallocations(inode); + ext4_discard_preallocations(inode, 0); path = ext4_find_extent(inode, offset_lblk, NULL, 0); if (IS_ERR(path)) { @@ -5579,7 +5573,7 @@ ext4_swap_extents(handle_t *handle, struct inode *inode1, } ex1 = path1[path1->p_depth].p_ext; ex2 = path2[path2->p_depth].p_ext; - /* Do we have somthing to swap ? */ + /* Do we have something to swap ? */ if (unlikely(!ex2 || !ex1)) goto finish; diff --git a/fs/ext4/file.c b/fs/ext4/file.c index 2a01e31a032c..7d61069531d3 100644 --- a/fs/ext4/file.c +++ b/fs/ext4/file.c @@ -145,10 +145,9 @@ static int ext4_release_file(struct inode *inode, struct file *filp) /* if we are the last writer on the inode, drop the block reservation */ if ((filp->f_mode & FMODE_WRITE) && (atomic_read(&inode->i_writecount) == 1) && - !EXT4_I(inode)->i_reserved_data_blocks) - { + !EXT4_I(inode)->i_reserved_data_blocks) { down_write(&EXT4_I(inode)->i_data_sem); - ext4_discard_preallocations(inode); + ext4_discard_preallocations(inode, 0); up_write(&EXT4_I(inode)->i_data_sem); } if (is_dx(inode) && filp->private_data) @@ -428,6 +427,10 @@ restart: */ if (*ilock_shared && (!IS_NOSEC(inode) || *extend || !ext4_overwrite_io(inode, offset, count))) { + if (iocb->ki_flags & IOCB_NOWAIT) { + ret = -EAGAIN; + goto out; + } inode_unlock_shared(inode); *ilock_shared = false; inode_lock(inode); @@ -544,6 +547,8 @@ static ssize_t ext4_dio_write_iter(struct kiocb *iocb, struct iov_iter *from) iomap_ops = &ext4_iomap_overwrite_ops; ret = iomap_dio_rw(iocb, from, iomap_ops, &ext4_dio_write_ops, is_sync_kiocb(iocb) || unaligned_io || extend); + if (ret == -ENOTBLK) + ret = 0; if (extend) ret = ext4_handle_inode_extension(inode, offset, ret, count); @@ -810,7 +815,7 @@ out: return err; } -static int ext4_file_open(struct inode * inode, struct file * filp) +static int ext4_file_open(struct inode *inode, struct file *filp) { int ret; diff --git a/fs/ext4/hash.c b/fs/ext4/hash.c index 3e133793a5a3..2924261226e0 100644 --- a/fs/ext4/hash.c +++ b/fs/ext4/hash.c @@ -233,7 +233,7 @@ static int __ext4fs_dirhash(const char *name, int len, break; case DX_HASH_HALF_MD4_UNSIGNED: str2hashbuf = str2hashbuf_unsigned; - /* fall through */ + fallthrough; case DX_HASH_HALF_MD4: p = name; while (len > 0) { @@ -247,7 +247,7 @@ static int __ext4fs_dirhash(const char *name, int len, break; case DX_HASH_TEA_UNSIGNED: str2hashbuf = str2hashbuf_unsigned; - /* fall through */ + fallthrough; case DX_HASH_TEA: p = name; while (len > 0) { diff --git a/fs/ext4/indirect.c b/fs/ext4/indirect.c index be2b66eb65f7..80c9f33800be 100644 --- a/fs/ext4/indirect.c +++ b/fs/ext4/indirect.c @@ -696,7 +696,7 @@ static int ext4_ind_trunc_restart_fn(handle_t *handle, struct inode *inode, * i_mutex. So we can safely drop the i_data_sem here. */ BUG_ON(EXT4_JOURNAL(inode) == NULL); - ext4_discard_preallocations(inode); + ext4_discard_preallocations(inode, 0); up_write(&EXT4_I(inode)->i_data_sem); *dropped = 1; return 0; @@ -858,8 +858,7 @@ static int ext4_clear_blocks(handle_t *handle, struct inode *inode, else if (ext4_should_journal_data(inode)) flags |= EXT4_FREE_BLOCKS_FORGET; - if (!ext4_data_block_valid(EXT4_SB(inode->i_sb), block_to_free, - count)) { + if (!ext4_inode_block_valid(inode, block_to_free, count)) { EXT4_ERROR_INODE(inode, "attempt to clear invalid " "blocks %llu len %lu", (unsigned long long) block_to_free, count); @@ -1004,8 +1003,7 @@ static void ext4_free_branches(handle_t *handle, struct inode *inode, if (!nr) continue; /* A hole */ - if (!ext4_data_block_valid(EXT4_SB(inode->i_sb), - nr, 1)) { + if (!ext4_inode_block_valid(inode, nr, 1)) { EXT4_ERROR_INODE(inode, "invalid indirect mapped " "block %lu (level %d)", @@ -1182,21 +1180,21 @@ do_indirects: ext4_free_branches(handle, inode, NULL, &nr, &nr+1, 1); i_data[EXT4_IND_BLOCK] = 0; } - /* fall through */ + fallthrough; case EXT4_IND_BLOCK: nr = i_data[EXT4_DIND_BLOCK]; if (nr) { ext4_free_branches(handle, inode, NULL, &nr, &nr+1, 2); i_data[EXT4_DIND_BLOCK] = 0; } - /* fall through */ + fallthrough; case EXT4_DIND_BLOCK: nr = i_data[EXT4_TIND_BLOCK]; if (nr) { ext4_free_branches(handle, inode, NULL, &nr, &nr+1, 3); i_data[EXT4_TIND_BLOCK] = 0; } - /* fall through */ + fallthrough; case EXT4_TIND_BLOCK: ; } @@ -1436,7 +1434,7 @@ do_indirects: ext4_free_branches(handle, inode, NULL, &nr, &nr+1, 1); i_data[EXT4_IND_BLOCK] = 0; } - /* fall through */ + fallthrough; case EXT4_IND_BLOCK: if (++n >= n2) break; @@ -1445,7 +1443,7 @@ do_indirects: ext4_free_branches(handle, inode, NULL, &nr, &nr+1, 2); i_data[EXT4_DIND_BLOCK] = 0; } - /* fall through */ + fallthrough; case EXT4_DIND_BLOCK: if (++n >= n2) break; @@ -1454,7 +1452,7 @@ do_indirects: ext4_free_branches(handle, inode, NULL, &nr, &nr+1, 3); i_data[EXT4_TIND_BLOCK] = 0; } - /* fall through */ + fallthrough; case EXT4_TIND_BLOCK: ; } diff --git a/fs/ext4/inline.c b/fs/ext4/inline.c index c3a1ad2db122..75c97bca0815 100644 --- a/fs/ext4/inline.c +++ b/fs/ext4/inline.c @@ -276,7 +276,7 @@ static int ext4_create_inline_data(handle_t *handle, len = 0; } - /* Insert the the xttr entry. */ + /* Insert the xttr entry. */ i.value = value; i.value_len = len; @@ -1706,7 +1706,7 @@ int ext4_delete_inline_entry(handle_t *handle, if (err) goto out; - err = ext4_generic_delete_entry(handle, dir, de_del, bh, + err = ext4_generic_delete_entry(dir, de_del, bh, inline_start, inline_size, 0); if (err) goto out; diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c index 10dd470876b3..bf596467c234 100644 --- a/fs/ext4/inode.c +++ b/fs/ext4/inode.c @@ -383,7 +383,7 @@ void ext4_da_update_reserve_space(struct inode *inode, */ if ((ei->i_reserved_data_blocks == 0) && !inode_is_open_for_write(inode)) - ext4_discard_preallocations(inode); + ext4_discard_preallocations(inode, 0); } static int __check_block_validity(struct inode *inode, const char *func, @@ -394,8 +394,7 @@ static int __check_block_validity(struct inode *inode, const char *func, (inode->i_ino == le32_to_cpu(EXT4_SB(inode->i_sb)->s_es->s_journal_inum))) return 0; - if (!ext4_data_block_valid(EXT4_SB(inode->i_sb), map->m_pblk, - map->m_len)) { + if (!ext4_inode_block_valid(inode, map->m_pblk, map->m_len)) { ext4_error_inode(inode, func, line, map->m_pblk, "lblock %lu mapped to illegal pblock %llu " "(length %d)", (unsigned long) map->m_lblk, @@ -1096,7 +1095,7 @@ static int ext4_block_write_begin(struct page *page, loff_t pos, unsigned len, } if (unlikely(err)) { page_zero_new_buffers(page, from, to); - } else if (IS_ENCRYPTED(inode) && S_ISREG(inode->i_mode)) { + } else if (fscrypt_inode_uses_fs_layer_crypto(inode)) { for (i = 0; i < nr_wait; i++) { int err2; @@ -3288,7 +3287,7 @@ static int ext4_releasepage(struct page *page, gfp_t wait) if (PageChecked(page)) return 0; if (journal) - return jbd2_journal_try_to_free_buffers(journal, page, wait); + return jbd2_journal_try_to_free_buffers(journal, page); else return try_to_free_buffers(page); } @@ -3737,7 +3736,7 @@ static int __ext4_block_zero_page_range(handle_t *handle, /* Uhhuh. Read error. Complain and punt. */ if (!buffer_uptodate(bh)) goto unlock; - if (S_ISREG(inode->i_mode) && IS_ENCRYPTED(inode)) { + if (fscrypt_inode_uses_fs_layer_crypto(inode)) { /* We expect the key to be set. */ BUG_ON(!fscrypt_has_encryption_key(inode)); err = fscrypt_decrypt_pagecache_blocks(page, blocksize, @@ -4056,7 +4055,7 @@ int ext4_punch_hole(struct inode *inode, loff_t offset, loff_t length) if (stop_block > first_block) { down_write(&EXT4_I(inode)->i_data_sem); - ext4_discard_preallocations(inode); + ext4_discard_preallocations(inode, 0); ret = ext4_es_remove_extent(inode, first_block, stop_block - first_block); @@ -4163,7 +4162,7 @@ int ext4_truncate(struct inode *inode) trace_ext4_truncate_enter(inode); if (!ext4_can_truncate(inode)) - return 0; + goto out_trace; if (inode->i_size == 0 && !test_opt(inode->i_sb, NO_AUTO_DA_ALLOC)) ext4_set_inode_state(inode, EXT4_STATE_DA_ALLOC_CLOSE); @@ -4172,16 +4171,14 @@ int ext4_truncate(struct inode *inode) int has_inline = 1; err = ext4_inline_data_truncate(inode, &has_inline); - if (err) - return err; - if (has_inline) - return 0; + if (err || has_inline) + goto out_trace; } /* If we zero-out tail of the page, we have to create jinode for jbd2 */ if (inode->i_size & (inode->i_sb->s_blocksize - 1)) { if (ext4_inode_attach_jinode(inode) < 0) - return 0; + goto out_trace; } if (ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)) @@ -4190,8 +4187,10 @@ int ext4_truncate(struct inode *inode) credits = ext4_blocks_for_truncate(inode); handle = ext4_journal_start(inode, EXT4_HT_TRUNCATE, credits); - if (IS_ERR(handle)) - return PTR_ERR(handle); + if (IS_ERR(handle)) { + err = PTR_ERR(handle); + goto out_trace; + } if (inode->i_size & (inode->i_sb->s_blocksize - 1)) ext4_block_truncate_page(handle, mapping, inode->i_size); @@ -4211,7 +4210,7 @@ int ext4_truncate(struct inode *inode) down_write(&EXT4_I(inode)->i_data_sem); - ext4_discard_preallocations(inode); + ext4_discard_preallocations(inode, 0); if (ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)) err = ext4_ext_truncate(handle, inode); @@ -4242,6 +4241,7 @@ out_stop: err = err2; ext4_journal_stop(handle); +out_trace: trace_ext4_truncate_exit(inode); return err; } @@ -4760,7 +4760,7 @@ struct inode *__ext4_iget(struct super_block *sb, unsigned long ino, ret = 0; if (ei->i_file_acl && - !ext4_data_block_valid(EXT4_SB(sb), ei->i_file_acl, 1)) { + !ext4_inode_block_valid(inode, ei->i_file_acl, 1)) { ext4_error_inode(inode, function, line, 0, "iget: bad extended attribute block %llu", ei->i_file_acl); @@ -4901,7 +4901,7 @@ static void __ext4_update_other_inode_time(struct super_block *sb, (inode->i_state & I_DIRTY_TIME)) { struct ext4_inode_info *ei = EXT4_I(inode); - inode->i_state &= ~(I_DIRTY_TIME | I_DIRTY_TIME_EXPIRED); + inode->i_state &= ~I_DIRTY_TIME; spin_unlock(&inode->i_lock); spin_lock(&ei->i_raw_lock); diff --git a/fs/ext4/ioctl.c b/fs/ext4/ioctl.c index 999cf6add39c..36eca3bc036a 100644 --- a/fs/ext4/ioctl.c +++ b/fs/ext4/ioctl.c @@ -202,7 +202,7 @@ static long swap_inode_boot_loader(struct super_block *sb, reset_inode_seed(inode); reset_inode_seed(inode_bl); - ext4_discard_preallocations(inode); + ext4_discard_preallocations(inode, 0); err = ext4_mark_inode_dirty(handle, inode); if (err < 0) { @@ -819,12 +819,12 @@ long ext4_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) switch (cmd) { case FS_IOC_GETFSMAP: return ext4_ioc_getfsmap(sb, (void __user *)arg); - case EXT4_IOC_GETFLAGS: + case FS_IOC_GETFLAGS: flags = ei->i_flags & EXT4_FL_USER_VISIBLE; if (S_ISREG(inode->i_mode)) flags &= ~EXT4_PROJINHERIT_FL; return put_user(flags, (int __user *) arg); - case EXT4_IOC_SETFLAGS: { + case FS_IOC_SETFLAGS: { int err; if (!inode_owner_or_capable(inode)) @@ -1129,12 +1129,12 @@ resizefs_out: case EXT4_IOC_PRECACHE_EXTENTS: return ext4_ext_precache(inode); - case EXT4_IOC_SET_ENCRYPTION_POLICY: + case FS_IOC_SET_ENCRYPTION_POLICY: if (!ext4_has_feature_encrypt(sb)) return -EOPNOTSUPP; return fscrypt_ioctl_set_policy(filp, (const void __user *)arg); - case EXT4_IOC_GET_ENCRYPTION_PWSALT: { + case FS_IOC_GET_ENCRYPTION_PWSALT: { #ifdef CONFIG_FS_ENCRYPTION int err, err2; struct ext4_sb_info *sbi = EXT4_SB(sb); @@ -1174,7 +1174,7 @@ resizefs_out: return -EOPNOTSUPP; #endif } - case EXT4_IOC_GET_ENCRYPTION_POLICY: + case FS_IOC_GET_ENCRYPTION_POLICY: if (!ext4_has_feature_encrypt(sb)) return -EOPNOTSUPP; return fscrypt_ioctl_get_policy(filp, (void __user *)arg); @@ -1236,7 +1236,7 @@ resizefs_out: case EXT4_IOC_GET_ES_CACHE: return ext4_ioctl_get_es_cache(filp, arg); - case EXT4_IOC_FSGETXATTR: + case FS_IOC_FSGETXATTR: { struct fsxattr fa; @@ -1247,7 +1247,7 @@ resizefs_out: return -EFAULT; return 0; } - case EXT4_IOC_FSSETXATTR: + case FS_IOC_FSSETXATTR: { struct fsxattr fa, old_fa; int err; @@ -1313,11 +1313,11 @@ long ext4_compat_ioctl(struct file *file, unsigned int cmd, unsigned long arg) { /* These are just misnamed, they actually get/put from/to user an int */ switch (cmd) { - case EXT4_IOC32_GETFLAGS: - cmd = EXT4_IOC_GETFLAGS; + case FS_IOC32_GETFLAGS: + cmd = FS_IOC_GETFLAGS; break; - case EXT4_IOC32_SETFLAGS: - cmd = EXT4_IOC_SETFLAGS; + case FS_IOC32_SETFLAGS: + cmd = FS_IOC_SETFLAGS; break; case EXT4_IOC32_GETVERSION: cmd = EXT4_IOC_GETVERSION; @@ -1361,9 +1361,9 @@ long ext4_compat_ioctl(struct file *file, unsigned int cmd, unsigned long arg) case EXT4_IOC_RESIZE_FS: case FITRIM: case EXT4_IOC_PRECACHE_EXTENTS: - case EXT4_IOC_SET_ENCRYPTION_POLICY: - case EXT4_IOC_GET_ENCRYPTION_PWSALT: - case EXT4_IOC_GET_ENCRYPTION_POLICY: + case FS_IOC_SET_ENCRYPTION_POLICY: + case FS_IOC_GET_ENCRYPTION_PWSALT: + case FS_IOC_GET_ENCRYPTION_POLICY: case FS_IOC_GET_ENCRYPTION_POLICY_EX: case FS_IOC_ADD_ENCRYPTION_KEY: case FS_IOC_REMOVE_ENCRYPTION_KEY: @@ -1377,8 +1377,8 @@ long ext4_compat_ioctl(struct file *file, unsigned int cmd, unsigned long arg) case EXT4_IOC_CLEAR_ES_CACHE: case EXT4_IOC_GETSTATE: case EXT4_IOC_GET_ES_CACHE: - case EXT4_IOC_FSGETXATTR: - case EXT4_IOC_FSSETXATTR: + case FS_IOC_FSGETXATTR: + case FS_IOC_FSSETXATTR: break; default: return -ENOIOCTLCMD; diff --git a/fs/ext4/mballoc.c b/fs/ext4/mballoc.c index c0a331e2feb0..132c118d12e1 100644 --- a/fs/ext4/mballoc.c +++ b/fs/ext4/mballoc.c @@ -922,7 +922,7 @@ static int ext4_mb_init_cache(struct page *page, char *incore, gfp_t gfp) bh[i] = NULL; continue; } - bh[i] = ext4_read_block_bitmap_nowait(sb, group); + bh[i] = ext4_read_block_bitmap_nowait(sb, group, false); if (IS_ERR(bh[i])) { err = PTR_ERR(bh[i]); bh[i] = NULL; @@ -1279,9 +1279,6 @@ ext4_mb_load_buddy_gfp(struct super_block *sb, ext4_group_t group, e4b->bd_buddy_page = page; e4b->bd_buddy = page_address(page) + (poff * sb->s_blocksize); - BUG_ON(e4b->bd_bitmap_page == NULL); - BUG_ON(e4b->bd_buddy_page == NULL); - return 0; err: @@ -1743,10 +1740,6 @@ static void ext4_mb_use_best_found(struct ext4_allocation_context *ac, } -/* - * regular allocator, for general purposes allocation - */ - static void ext4_mb_check_limits(struct ext4_allocation_context *ac, struct ext4_buddy *e4b, int finish_group) @@ -2119,13 +2112,11 @@ static bool ext4_mb_good_group(struct ext4_allocation_context *ac, BUG_ON(cr < 0 || cr >= 4); - free = grp->bb_free; - if (free == 0) - return false; - if (cr <= 2 && free < ac->ac_g_ex.fe_len) + if (unlikely(EXT4_MB_GRP_BBITMAP_CORRUPT(grp))) return false; - if (unlikely(EXT4_MB_GRP_BBITMAP_CORRUPT(grp))) + free = grp->bb_free; + if (free == 0) return false; fragments = grp->bb_fragments; @@ -2142,8 +2133,10 @@ static bool ext4_mb_good_group(struct ext4_allocation_context *ac, ((group % flex_size) == 0)) return false; - if ((ac->ac_2order > ac->ac_sb->s_blocksize_bits+1) || - (free / fragments) >= ac->ac_g_ex.fe_len) + if (free < ac->ac_g_ex.fe_len) + return false; + + if (ac->ac_2order > ac->ac_sb->s_blocksize_bits+1) return true; if (grp->bb_largest_free_order < ac->ac_2order) @@ -2177,6 +2170,7 @@ static int ext4_mb_good_group_nolock(struct ext4_allocation_context *ac, { struct ext4_group_info *grp = ext4_get_group_info(ac->ac_sb, group); struct super_block *sb = ac->ac_sb; + struct ext4_sb_info *sbi = EXT4_SB(sb); bool should_lock = ac->ac_flags & EXT4_MB_STRICT_CHECK; ext4_grpblk_t free; int ret = 0; @@ -2195,7 +2189,25 @@ static int ext4_mb_good_group_nolock(struct ext4_allocation_context *ac, /* We only do this if the grp has never been initialized */ if (unlikely(EXT4_MB_GRP_NEED_INIT(grp))) { - ret = ext4_mb_init_group(ac->ac_sb, group, GFP_NOFS); + struct ext4_group_desc *gdp = + ext4_get_group_desc(sb, group, NULL); + int ret; + + /* cr=0/1 is a very optimistic search to find large + * good chunks almost for free. If buddy data is not + * ready, then this optimization makes no sense. But + * we never skip the first block group in a flex_bg, + * since this gets used for metadata block allocation, + * and we want to make sure we locate metadata blocks + * in the first block group in the flex_bg if possible. + */ + if (cr < 2 && + (!sbi->s_log_groups_per_flex || + ((group & ((1 << sbi->s_log_groups_per_flex) - 1)) != 0)) && + !(ext4_has_group_desc_csum(sb) && + (gdp->bg_flags & cpu_to_le16(EXT4_BG_BLOCK_UNINIT)))) + return 0; + ret = ext4_mb_init_group(sb, group, GFP_NOFS); if (ret) return ret; } @@ -2209,15 +2221,95 @@ out: return ret; } +/* + * Start prefetching @nr block bitmaps starting at @group. + * Return the next group which needs to be prefetched. + */ +ext4_group_t ext4_mb_prefetch(struct super_block *sb, ext4_group_t group, + unsigned int nr, int *cnt) +{ + ext4_group_t ngroups = ext4_get_groups_count(sb); + struct buffer_head *bh; + struct blk_plug plug; + + blk_start_plug(&plug); + while (nr-- > 0) { + struct ext4_group_desc *gdp = ext4_get_group_desc(sb, group, + NULL); + struct ext4_group_info *grp = ext4_get_group_info(sb, group); + + /* + * Prefetch block groups with free blocks; but don't + * bother if it is marked uninitialized on disk, since + * it won't require I/O to read. Also only try to + * prefetch once, so we avoid getblk() call, which can + * be expensive. + */ + if (!EXT4_MB_GRP_TEST_AND_SET_READ(grp) && + EXT4_MB_GRP_NEED_INIT(grp) && + ext4_free_group_clusters(sb, gdp) > 0 && + !(ext4_has_group_desc_csum(sb) && + (gdp->bg_flags & cpu_to_le16(EXT4_BG_BLOCK_UNINIT)))) { + bh = ext4_read_block_bitmap_nowait(sb, group, true); + if (bh && !IS_ERR(bh)) { + if (!buffer_uptodate(bh) && cnt) + (*cnt)++; + brelse(bh); + } + } + if (++group >= ngroups) + group = 0; + } + blk_finish_plug(&plug); + return group; +} + +/* + * Prefetching reads the block bitmap into the buffer cache; but we + * need to make sure that the buddy bitmap in the page cache has been + * initialized. Note that ext4_mb_init_group() will block if the I/O + * is not yet completed, or indeed if it was not initiated by + * ext4_mb_prefetch did not start the I/O. + * + * TODO: We should actually kick off the buddy bitmap setup in a work + * queue when the buffer I/O is completed, so that we don't block + * waiting for the block allocation bitmap read to finish when + * ext4_mb_prefetch_fini is called from ext4_mb_regular_allocator(). + */ +void ext4_mb_prefetch_fini(struct super_block *sb, ext4_group_t group, + unsigned int nr) +{ + while (nr-- > 0) { + struct ext4_group_desc *gdp = ext4_get_group_desc(sb, group, + NULL); + struct ext4_group_info *grp = ext4_get_group_info(sb, group); + + if (!group) + group = ext4_get_groups_count(sb); + group--; + grp = ext4_get_group_info(sb, group); + + if (EXT4_MB_GRP_NEED_INIT(grp) && + ext4_free_group_clusters(sb, gdp) > 0 && + !(ext4_has_group_desc_csum(sb) && + (gdp->bg_flags & cpu_to_le16(EXT4_BG_BLOCK_UNINIT)))) { + if (ext4_mb_init_group(sb, group, GFP_NOFS)) + break; + } + } +} + static noinline_for_stack int ext4_mb_regular_allocator(struct ext4_allocation_context *ac) { - ext4_group_t ngroups, group, i; + ext4_group_t prefetch_grp = 0, ngroups, group, i; int cr = -1; int err = 0, first_err = 0; + unsigned int nr = 0, prefetch_ios = 0; struct ext4_sb_info *sbi; struct super_block *sb; struct ext4_buddy e4b; + int lost; sb = ac->ac_sb; sbi = EXT4_SB(sb); @@ -2237,8 +2329,8 @@ ext4_mb_regular_allocator(struct ext4_allocation_context *ac) goto out; /* - * ac->ac2_order is set only if the fe_len is a power of 2 - * if ac2_order is set we also set criteria to 0 so that we + * ac->ac_2order is set only if the fe_len is a power of 2 + * if ac->ac_2order is set we also set criteria to 0 so that we * try exact allocation using buddy. */ i = fls(ac->ac_g_ex.fe_len); @@ -2282,6 +2374,7 @@ repeat: * from the goal value specified */ group = ac->ac_g_ex.fe_group; + prefetch_grp = group; for (i = 0; i < ngroups; group++, i++) { int ret = 0; @@ -2293,6 +2386,29 @@ repeat: if (group >= ngroups) group = 0; + /* + * Batch reads of the block allocation bitmaps + * to get multiple READs in flight; limit + * prefetching at cr=0/1, otherwise mballoc can + * spend a lot of time loading imperfect groups + */ + if ((prefetch_grp == group) && + (cr > 1 || + prefetch_ios < sbi->s_mb_prefetch_limit)) { + unsigned int curr_ios = prefetch_ios; + + nr = sbi->s_mb_prefetch; + if (ext4_has_feature_flex_bg(sb)) { + nr = (group / sbi->s_mb_prefetch) * + sbi->s_mb_prefetch; + nr = nr + sbi->s_mb_prefetch - group; + } + prefetch_grp = ext4_mb_prefetch(sb, group, + nr, &prefetch_ios); + if (prefetch_ios == curr_ios) + nr = 0; + } + /* This now checks without needing the buddy page */ ret = ext4_mb_good_group_nolock(ac, group, cr); if (ret <= 0) { @@ -2341,22 +2457,24 @@ repeat: * We've been searching too long. Let's try to allocate * the best chunk we've found so far */ - ext4_mb_try_best_found(ac, &e4b); if (ac->ac_status != AC_STATUS_FOUND) { /* * Someone more lucky has already allocated it. * The only thing we can do is just take first * found block(s) - printk(KERN_DEBUG "EXT4-fs: someone won our chunk\n"); */ + lost = atomic_inc_return(&sbi->s_mb_lost_chunks); + mb_debug(sb, "lost chunk, group: %u, start: %d, len: %d, lost: %d\n", + ac->ac_b_ex.fe_group, ac->ac_b_ex.fe_start, + ac->ac_b_ex.fe_len, lost); + ac->ac_b_ex.fe_group = 0; ac->ac_b_ex.fe_start = 0; ac->ac_b_ex.fe_len = 0; ac->ac_status = AC_STATUS_CONTINUE; ac->ac_flags |= EXT4_MB_HINT_FIRST; cr = 3; - atomic_inc(&sbi->s_mb_lost_chunks); goto repeat; } } @@ -2367,6 +2485,10 @@ out: mb_debug(sb, "Best len %d, origin len %d, ac_status %u, ac_flags 0x%x, cr %d ret %d\n", ac->ac_b_ex.fe_len, ac->ac_o_ex.fe_len, ac->ac_status, ac->ac_flags, cr, err); + + if (nr) + ext4_mb_prefetch_fini(sb, prefetch_grp, nr); + return err; } @@ -2439,7 +2561,7 @@ static int ext4_mb_seq_groups_show(struct seq_file *seq, void *v) for (i = 0; i <= 13; i++) seq_printf(seq, " %-5u", i <= blocksize_bits + 1 ? sg.info.bb_counters[i] : 0); - seq_printf(seq, " ]\n"); + seq_puts(seq, " ]\n"); return 0; } @@ -2613,6 +2735,26 @@ static int ext4_mb_init_backend(struct super_block *sb) goto err_freebuddy; } + if (ext4_has_feature_flex_bg(sb)) { + /* a single flex group is supposed to be read by a single IO */ + sbi->s_mb_prefetch = 1 << sbi->s_es->s_log_groups_per_flex; + sbi->s_mb_prefetch *= 8; /* 8 prefetch IOs in flight at most */ + } else { + sbi->s_mb_prefetch = 32; + } + if (sbi->s_mb_prefetch > ext4_get_groups_count(sb)) + sbi->s_mb_prefetch = ext4_get_groups_count(sb); + /* now many real IOs to prefetch within a single allocation at cr=0 + * given cr=0 is an CPU-related optimization we shouldn't try to + * load too many groups, at some point we should start to use what + * we've got in memory. + * with an average random access time 5ms, it'd take a second to get + * 200 groups (* N with flex_bg), so let's make this limit 4 + */ + sbi->s_mb_prefetch_limit = sbi->s_mb_prefetch * 4; + if (sbi->s_mb_prefetch_limit > ext4_get_groups_count(sb)) + sbi->s_mb_prefetch_limit = ext4_get_groups_count(sb); + return 0; err_freebuddy: @@ -2736,6 +2878,7 @@ int ext4_mb_init(struct super_block *sb) sbi->s_mb_stats = MB_DEFAULT_STATS; sbi->s_mb_stream_request = MB_DEFAULT_STREAM_THRESHOLD; sbi->s_mb_order2_reqs = MB_DEFAULT_ORDER2_REQS; + sbi->s_mb_max_inode_prealloc = MB_DEFAULT_MAX_INODE_PREALLOC; /* * The default group preallocation is 512, which for 4k block * sizes translates to 2 megabytes. However for bigalloc file @@ -3090,7 +3233,7 @@ ext4_mb_mark_diskspace_used(struct ext4_allocation_context *ac, block = ext4_grp_offs_to_block(sb, &ac->ac_b_ex); len = EXT4_C2B(sbi, ac->ac_b_ex.fe_len); - if (!ext4_data_block_valid(sbi, block, len)) { + if (!ext4_inode_block_valid(ac->ac_inode, block, len)) { ext4_error(sb, "Allocating blocks %llu-%llu which overlap " "fs metadata", block, block+len); /* File system mounted not to panic on error @@ -3674,6 +3817,26 @@ void ext4_mb_generate_from_pa(struct super_block *sb, void *bitmap, mb_debug(sb, "preallocated %d for group %u\n", preallocated, group); } +static void ext4_mb_mark_pa_deleted(struct super_block *sb, + struct ext4_prealloc_space *pa) +{ + struct ext4_inode_info *ei; + + if (pa->pa_deleted) { + ext4_warning(sb, "deleted pa, type:%d, pblk:%llu, lblk:%u, len:%d\n", + pa->pa_type, pa->pa_pstart, pa->pa_lstart, + pa->pa_len); + return; + } + + pa->pa_deleted = 1; + + if (pa->pa_type == MB_INODE_PA) { + ei = EXT4_I(pa->pa_inode); + atomic_dec(&ei->i_prealloc_active); + } +} + static void ext4_mb_pa_callback(struct rcu_head *head) { struct ext4_prealloc_space *pa; @@ -3706,7 +3869,7 @@ static void ext4_mb_put_pa(struct ext4_allocation_context *ac, return; } - pa->pa_deleted = 1; + ext4_mb_mark_pa_deleted(sb, pa); spin_unlock(&pa->pa_lock); grp_blk = pa->pa_pstart; @@ -3830,6 +3993,7 @@ ext4_mb_new_inode_pa(struct ext4_allocation_context *ac) spin_lock(pa->pa_obj_lock); list_add_rcu(&pa->pa_inode_list, &ei->i_prealloc_list); spin_unlock(pa->pa_obj_lock); + atomic_inc(&ei->i_prealloc_active); } /* @@ -4040,7 +4204,7 @@ repeat: } /* seems this one can be freed ... */ - pa->pa_deleted = 1; + ext4_mb_mark_pa_deleted(sb, pa); /* we can trust pa_free ... */ free += pa->pa_free; @@ -4103,7 +4267,7 @@ out_dbg: * * FIXME!! Make sure it is valid at all the call sites */ -void ext4_discard_preallocations(struct inode *inode) +void ext4_discard_preallocations(struct inode *inode, unsigned int needed) { struct ext4_inode_info *ei = EXT4_I(inode); struct super_block *sb = inode->i_sb; @@ -4121,15 +4285,19 @@ void ext4_discard_preallocations(struct inode *inode) mb_debug(sb, "discard preallocation for inode %lu\n", inode->i_ino); - trace_ext4_discard_preallocations(inode); + trace_ext4_discard_preallocations(inode, + atomic_read(&ei->i_prealloc_active), needed); INIT_LIST_HEAD(&list); + if (needed == 0) + needed = UINT_MAX; + repeat: /* first, collect all pa's in the inode */ spin_lock(&ei->i_prealloc_lock); - while (!list_empty(&ei->i_prealloc_list)) { - pa = list_entry(ei->i_prealloc_list.next, + while (!list_empty(&ei->i_prealloc_list) && needed) { + pa = list_entry(ei->i_prealloc_list.prev, struct ext4_prealloc_space, pa_inode_list); BUG_ON(pa->pa_obj_lock != &ei->i_prealloc_lock); spin_lock(&pa->pa_lock); @@ -4146,10 +4314,11 @@ repeat: } if (pa->pa_deleted == 0) { - pa->pa_deleted = 1; + ext4_mb_mark_pa_deleted(sb, pa); spin_unlock(&pa->pa_lock); list_del_rcu(&pa->pa_inode_list); list_add(&pa->u.pa_tmp_list, &list); + needed--; continue; } @@ -4399,7 +4568,7 @@ ext4_mb_initialize_context(struct ext4_allocation_context *ac, ac->ac_g_ex = ac->ac_o_ex; ac->ac_flags = ar->flags; - /* we have to define context: we'll we work with a file or + /* we have to define context: we'll work with a file or * locality group. this is a policy, actually */ ext4_mb_group_or_file(ac); @@ -4450,7 +4619,7 @@ ext4_mb_discard_lg_preallocations(struct super_block *sb, BUG_ON(pa->pa_type != MB_GROUP_PA); /* seems this one can be freed ... */ - pa->pa_deleted = 1; + ext4_mb_mark_pa_deleted(sb, pa); spin_unlock(&pa->pa_lock); list_del_rcu(&pa->pa_inode_list); @@ -4549,10 +4718,29 @@ static void ext4_mb_add_n_trim(struct ext4_allocation_context *ac) } /* + * if per-inode prealloc list is too long, trim some PA + */ +static void ext4_mb_trim_inode_pa(struct inode *inode) +{ + struct ext4_inode_info *ei = EXT4_I(inode); + struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb); + int count, delta; + + count = atomic_read(&ei->i_prealloc_active); + delta = (sbi->s_mb_max_inode_prealloc >> 2) + 1; + if (count > sbi->s_mb_max_inode_prealloc + delta) { + count -= sbi->s_mb_max_inode_prealloc; + ext4_discard_preallocations(inode, count); + } +} + +/* * release all resource we used in allocation */ static int ext4_mb_release_context(struct ext4_allocation_context *ac) { + struct inode *inode = ac->ac_inode; + struct ext4_inode_info *ei = EXT4_I(inode); struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb); struct ext4_prealloc_space *pa = ac->ac_pa; if (pa) { @@ -4564,21 +4752,31 @@ static int ext4_mb_release_context(struct ext4_allocation_context *ac) pa->pa_free -= ac->ac_b_ex.fe_len; pa->pa_len -= ac->ac_b_ex.fe_len; spin_unlock(&pa->pa_lock); + + /* + * We want to add the pa to the right bucket. + * Remove it from the list and while adding + * make sure the list to which we are adding + * doesn't grow big. + */ + if (likely(pa->pa_free)) { + spin_lock(pa->pa_obj_lock); + list_del_rcu(&pa->pa_inode_list); + spin_unlock(pa->pa_obj_lock); + ext4_mb_add_n_trim(ac); + } } - } - if (pa) { - /* - * We want to add the pa to the right bucket. - * Remove it from the list and while adding - * make sure the list to which we are adding - * doesn't grow big. - */ - if ((pa->pa_type == MB_GROUP_PA) && likely(pa->pa_free)) { + + if (pa->pa_type == MB_INODE_PA) { + /* + * treat per-inode prealloc list as a lru list, then try + * to trim the least recently used PA. + */ spin_lock(pa->pa_obj_lock); - list_del_rcu(&pa->pa_inode_list); + list_move(&pa->pa_inode_list, &ei->i_prealloc_list); spin_unlock(pa->pa_obj_lock); - ext4_mb_add_n_trim(ac); } + ext4_mb_put_pa(ac, ac->ac_sb, pa); } if (ac->ac_bitmap_page) @@ -4588,6 +4786,7 @@ static int ext4_mb_release_context(struct ext4_allocation_context *ac) if (ac->ac_flags & EXT4_MB_HINT_GROUP_ALLOC) mutex_unlock(&ac->ac_lg->lg_mutex); ext4_mb_collect_stats(ac); + ext4_mb_trim_inode_pa(inode); return 0; } @@ -4915,7 +5114,7 @@ void ext4_free_blocks(handle_t *handle, struct inode *inode, sbi = EXT4_SB(sb); if (!(flags & EXT4_FREE_BLOCKS_VALIDATED) && - !ext4_data_block_valid(sbi, block, count)) { + !ext4_inode_block_valid(inode, block, count)) { ext4_error(sb, "Freeing blocks not in datazone - " "block = %llu, count = %lu", block, count); goto error_return; diff --git a/fs/ext4/mballoc.h b/fs/ext4/mballoc.h index 6b4d17c2935d..e75b4749aa1c 100644 --- a/fs/ext4/mballoc.h +++ b/fs/ext4/mballoc.h @@ -73,6 +73,10 @@ */ #define MB_DEFAULT_GROUP_PREALLOC 512 +/* + * maximum length of inode prealloc list + */ +#define MB_DEFAULT_MAX_INODE_PREALLOC 512 struct ext4_free_data { /* this links the free block information from sb_info */ diff --git a/fs/ext4/move_extent.c b/fs/ext4/move_extent.c index 1ed86fb6c302..0d601b822875 100644 --- a/fs/ext4/move_extent.c +++ b/fs/ext4/move_extent.c @@ -686,8 +686,8 @@ ext4_move_extents(struct file *o_filp, struct file *d_filp, __u64 orig_blk, out: if (*moved_len) { - ext4_discard_preallocations(orig_inode); - ext4_discard_preallocations(donor_inode); + ext4_discard_preallocations(orig_inode, 0); + ext4_discard_preallocations(donor_inode, 0); } ext4_ext_drop_refs(path); diff --git a/fs/ext4/namei.c b/fs/ext4/namei.c index 56738b538ddf..153a9fbe1dd0 100644 --- a/fs/ext4/namei.c +++ b/fs/ext4/namei.c @@ -1396,8 +1396,8 @@ int ext4_search_dir(struct buffer_head *bh, char *search_buf, int buf_size, ext4_match(dir, fname, de)) { /* found a match - just to be sure, do * a full check */ - if (ext4_check_dir_entry(dir, NULL, de, bh, bh->b_data, - bh->b_size, offset)) + if (ext4_check_dir_entry(dir, NULL, de, bh, search_buf, + buf_size, offset)) return -1; *res_dir = de; return 1; @@ -1858,7 +1858,7 @@ static struct ext4_dir_entry_2 *do_split(handle_t *handle, struct inode *dir, blocksize, hinfo, map); map -= count; dx_sort_map(map, count); - /* Split the existing block in the middle, size-wise */ + /* Ensure that neither split block is over half full */ size = 0; move = 0; for (i = count-1; i >= 0; i--) { @@ -1868,8 +1868,18 @@ static struct ext4_dir_entry_2 *do_split(handle_t *handle, struct inode *dir, size += map[i].size; move++; } - /* map index at which we will split */ - split = count - move; + /* + * map index at which we will split + * + * If the sum of active entries didn't exceed half the block size, just + * split it in half by count; each resulting block will have at least + * half the space free. + */ + if (i > 0) + split = count - move; + else + split = count/2; + hash2 = map[split].hash; continued = hash2 == map[split - 1].hash; dxtrace(printk(KERN_INFO "Split block %lu at %x, %i/%i\n", @@ -2455,8 +2465,7 @@ cleanup: * ext4_generic_delete_entry deletes a directory entry by merging it * with the previous entry */ -int ext4_generic_delete_entry(handle_t *handle, - struct inode *dir, +int ext4_generic_delete_entry(struct inode *dir, struct ext4_dir_entry_2 *de_del, struct buffer_head *bh, void *entry_buf, @@ -2472,7 +2481,7 @@ int ext4_generic_delete_entry(handle_t *handle, de = (struct ext4_dir_entry_2 *)entry_buf; while (i < buf_size - csum_size) { if (ext4_check_dir_entry(dir, NULL, de, bh, - bh->b_data, bh->b_size, i)) + entry_buf, buf_size, i)) return -EFSCORRUPTED; if (de == de_del) { if (pde) @@ -2517,8 +2526,7 @@ static int ext4_delete_entry(handle_t *handle, if (unlikely(err)) goto out; - err = ext4_generic_delete_entry(handle, dir, de_del, - bh, bh->b_data, + err = ext4_generic_delete_entry(dir, de_del, bh, bh->b_data, dir->i_sb->s_blocksize, csum_size); if (err) goto out; @@ -3193,30 +3201,33 @@ static int ext4_unlink(struct inode *dir, struct dentry *dentry) * in separate transaction */ retval = dquot_initialize(dir); if (retval) - return retval; + goto out_trace; retval = dquot_initialize(d_inode(dentry)); if (retval) - return retval; + goto out_trace; - retval = -ENOENT; bh = ext4_find_entry(dir, &dentry->d_name, &de, NULL); - if (IS_ERR(bh)) - return PTR_ERR(bh); - if (!bh) - goto end_unlink; + if (IS_ERR(bh)) { + retval = PTR_ERR(bh); + goto out_trace; + } + if (!bh) { + retval = -ENOENT; + goto out_trace; + } inode = d_inode(dentry); - retval = -EFSCORRUPTED; - if (le32_to_cpu(de->inode) != inode->i_ino) - goto end_unlink; + if (le32_to_cpu(de->inode) != inode->i_ino) { + retval = -EFSCORRUPTED; + goto out_bh; + } handle = ext4_journal_start(dir, EXT4_HT_DIR, EXT4_DATA_TRANS_BLOCKS(dir->i_sb)); if (IS_ERR(handle)) { retval = PTR_ERR(handle); - handle = NULL; - goto end_unlink; + goto out_bh; } if (IS_DIRSYNC(dir)) @@ -3224,12 +3235,12 @@ static int ext4_unlink(struct inode *dir, struct dentry *dentry) retval = ext4_delete_entry(handle, dir, de, bh); if (retval) - goto end_unlink; + goto out_handle; dir->i_ctime = dir->i_mtime = current_time(dir); ext4_update_dx_flag(dir); retval = ext4_mark_inode_dirty(handle, dir); if (retval) - goto end_unlink; + goto out_handle; if (inode->i_nlink == 0) ext4_warning_inode(inode, "Deleting file '%.*s' with no links", dentry->d_name.len, dentry->d_name.name); @@ -3251,10 +3262,11 @@ static int ext4_unlink(struct inode *dir, struct dentry *dentry) d_invalidate(dentry); #endif -end_unlink: +out_handle: + ext4_journal_stop(handle); +out_bh: brelse(bh); - if (handle) - ext4_journal_stop(handle); +out_trace: trace_ext4_unlink_exit(dentry, retval); return retval; } diff --git a/fs/ext4/page-io.c b/fs/ext4/page-io.c index de6fe969f773..defd2e10dfd1 100644 --- a/fs/ext4/page-io.c +++ b/fs/ext4/page-io.c @@ -402,6 +402,7 @@ static void io_submit_init_bio(struct ext4_io_submit *io, * __GFP_DIRECT_RECLAIM is set, see comments for bio_alloc_bioset(). */ bio = bio_alloc(GFP_NOIO, BIO_MAX_PAGES); + fscrypt_set_bio_crypt_ctx_bh(bio, bh, GFP_NOIO); bio->bi_iter.bi_sector = bh->b_blocknr * (bh->b_size >> 9); bio_set_dev(bio, bh->b_bdev); bio->bi_end_io = ext4_end_bio; @@ -418,7 +419,8 @@ static void io_submit_add_bh(struct ext4_io_submit *io, { int ret; - if (io->io_bio && bh->b_blocknr != io->io_next_block) { + if (io->io_bio && (bh->b_blocknr != io->io_next_block || + !fscrypt_mergeable_bio_bh(io->io_bio, bh))) { submit_and_retry: ext4_io_submit(io); } @@ -506,7 +508,7 @@ int ext4_bio_write_page(struct ext4_io_submit *io, * (e.g. holes) to be unnecessarily encrypted, but this is rare and * can't happen in the common case of blocksize == PAGE_SIZE. */ - if (IS_ENCRYPTED(inode) && S_ISREG(inode->i_mode) && nr_to_submit) { + if (fscrypt_inode_uses_fs_layer_crypto(inode) && nr_to_submit) { gfp_t gfp_flags = GFP_NOFS; unsigned int enc_bytes = round_up(len, i_blocksize(inode)); diff --git a/fs/ext4/readpage.c b/fs/ext4/readpage.c index 5761e9961682..f014c5e473a9 100644 --- a/fs/ext4/readpage.c +++ b/fs/ext4/readpage.c @@ -140,7 +140,7 @@ static void bio_post_read_processing(struct bio_post_read_ctx *ctx) return; } ctx->cur_step++; - /* fall-through */ + fallthrough; case STEP_VERITY: if (ctx->enabled_steps & (1 << STEP_VERITY)) { INIT_WORK(&ctx->work, verity_work); @@ -148,7 +148,7 @@ static void bio_post_read_processing(struct bio_post_read_ctx *ctx) return; } ctx->cur_step++; - /* fall-through */ + fallthrough; default: __read_end_io(ctx->bio); } @@ -195,7 +195,7 @@ static void ext4_set_bio_post_read_ctx(struct bio *bio, { unsigned int post_read_steps = 0; - if (IS_ENCRYPTED(inode) && S_ISREG(inode->i_mode)) + if (fscrypt_inode_uses_fs_layer_crypto(inode)) post_read_steps |= 1 << STEP_DECRYPT; if (ext4_need_verity(inode, first_idx)) @@ -230,6 +230,7 @@ int ext4_mpage_readpages(struct inode *inode, const unsigned blkbits = inode->i_blkbits; const unsigned blocks_per_page = PAGE_SIZE >> blkbits; const unsigned blocksize = 1 << blkbits; + sector_t next_block; sector_t block_in_file; sector_t last_block; sector_t last_block_in_file; @@ -258,7 +259,8 @@ int ext4_mpage_readpages(struct inode *inode, if (page_has_buffers(page)) goto confused; - block_in_file = (sector_t)page->index << (PAGE_SHIFT - blkbits); + block_in_file = next_block = + (sector_t)page->index << (PAGE_SHIFT - blkbits); last_block = block_in_file + nr_pages * blocks_per_page; last_block_in_file = (ext4_readpage_limit(inode) + blocksize - 1) >> blkbits; @@ -358,7 +360,8 @@ int ext4_mpage_readpages(struct inode *inode, * This page will go to BIO. Do we need to send this * BIO off first? */ - if (bio && (last_block_in_bio != blocks[0] - 1)) { + if (bio && (last_block_in_bio != blocks[0] - 1 || + !fscrypt_mergeable_bio(bio, inode, next_block))) { submit_and_realloc: submit_bio(bio); bio = NULL; @@ -370,6 +373,8 @@ int ext4_mpage_readpages(struct inode *inode, */ bio = bio_alloc(GFP_KERNEL, min_t(int, nr_pages, BIO_MAX_PAGES)); + fscrypt_set_bio_crypt_ctx(bio, inode, next_block, + GFP_KERNEL); ext4_set_bio_post_read_ctx(bio, inode, page->index); bio_set_dev(bio, bdev); bio->bi_iter.bi_sector = blocks[0] << (blkbits - 9); diff --git a/fs/ext4/super.c b/fs/ext4/super.c index 330957ed1f05..ea425b49b345 100644 --- a/fs/ext4/super.c +++ b/fs/ext4/super.c @@ -66,10 +66,10 @@ static int ext4_load_journal(struct super_block *, struct ext4_super_block *, unsigned long journal_devnum); static int ext4_show_options(struct seq_file *seq, struct dentry *root); static int ext4_commit_super(struct super_block *sb, int sync); -static void ext4_mark_recovery_complete(struct super_block *sb, +static int ext4_mark_recovery_complete(struct super_block *sb, struct ext4_super_block *es); -static void ext4_clear_journal_err(struct super_block *sb, - struct ext4_super_block *es); +static int ext4_clear_journal_err(struct super_block *sb, + struct ext4_super_block *es); static int ext4_sync_fs(struct super_block *sb, int wait); static int ext4_remount(struct super_block *sb, int *flags, char *data); static int ext4_statfs(struct dentry *dentry, struct kstatfs *buf); @@ -744,6 +744,7 @@ void __ext4_msg(struct super_block *sb, struct va_format vaf; va_list args; + atomic_inc(&EXT4_SB(sb)->s_msg_count); if (!___ratelimit(&(EXT4_SB(sb)->s_msg_ratelimit_state), "EXT4-fs")) return; @@ -754,9 +755,12 @@ void __ext4_msg(struct super_block *sb, va_end(args); } -#define ext4_warning_ratelimit(sb) \ - ___ratelimit(&(EXT4_SB(sb)->s_warning_ratelimit_state), \ - "EXT4-fs warning") +static int ext4_warning_ratelimit(struct super_block *sb) +{ + atomic_inc(&EXT4_SB(sb)->s_warning_count); + return ___ratelimit(&(EXT4_SB(sb)->s_warning_ratelimit_state), + "EXT4-fs warning"); +} void __ext4_warning(struct super_block *sb, const char *function, unsigned int line, const char *fmt, ...) @@ -1123,6 +1127,7 @@ static struct inode *ext4_alloc_inode(struct super_block *sb) inode_set_iversion(&ei->vfs_inode, 1); spin_lock_init(&ei->i_raw_lock); INIT_LIST_HEAD(&ei->i_prealloc_list); + atomic_set(&ei->i_prealloc_active, 0); spin_lock_init(&ei->i_prealloc_lock); ext4_es_init_tree(&ei->i_es_tree); rwlock_init(&ei->i_es_lock); @@ -1216,7 +1221,7 @@ void ext4_clear_inode(struct inode *inode) { invalidate_inode_buffers(inode); clear_inode(inode); - ext4_discard_preallocations(inode); + ext4_discard_preallocations(inode, 0); ext4_es_remove_extent(inode, 0, EXT_MAX_BLOCKS); dquot_drop(inode); if (EXT4_I(inode)->jinode) { @@ -1288,8 +1293,8 @@ static int bdev_try_to_free_page(struct super_block *sb, struct page *page, if (!page_has_buffers(page)) return 0; if (journal) - return jbd2_journal_try_to_free_buffers(journal, page, - wait & ~__GFP_DIRECT_RECLAIM); + return jbd2_journal_try_to_free_buffers(journal, page); + return try_to_free_buffers(page); } @@ -1508,6 +1513,7 @@ enum { Opt_journal_path, Opt_journal_checksum, Opt_journal_async_commit, Opt_abort, Opt_data_journal, Opt_data_ordered, Opt_data_writeback, Opt_data_err_abort, Opt_data_err_ignore, Opt_test_dummy_encryption, + Opt_inlinecrypt, Opt_usrjquota, Opt_grpjquota, Opt_offusrjquota, Opt_offgrpjquota, Opt_jqfmt_vfsold, Opt_jqfmt_vfsv0, Opt_jqfmt_vfsv1, Opt_quota, Opt_noquota, Opt_barrier, Opt_nobarrier, Opt_err, @@ -1521,6 +1527,7 @@ enum { Opt_dioread_nolock, Opt_dioread_lock, Opt_discard, Opt_nodiscard, Opt_init_itable, Opt_noinit_itable, Opt_max_dir_size_kb, Opt_nojournal_checksum, Opt_nombcache, + Opt_prefetch_block_bitmaps, }; static const match_table_t tokens = { @@ -1610,8 +1617,10 @@ static const match_table_t tokens = { {Opt_max_dir_size_kb, "max_dir_size_kb=%u"}, {Opt_test_dummy_encryption, "test_dummy_encryption=%s"}, {Opt_test_dummy_encryption, "test_dummy_encryption"}, + {Opt_inlinecrypt, "inlinecrypt"}, {Opt_nombcache, "nombcache"}, {Opt_nombcache, "no_mbcache"}, /* for backward compatibility */ + {Opt_prefetch_block_bitmaps, "prefetch_block_bitmaps"}, {Opt_removed, "check=none"}, /* mount option from ext2/3 */ {Opt_removed, "nocheck"}, /* mount option from ext2/3 */ {Opt_removed, "reservation"}, /* mount option from ext2/3 */ @@ -1829,6 +1838,8 @@ static const struct mount_opts { {Opt_max_dir_size_kb, 0, MOPT_GTE0}, {Opt_test_dummy_encryption, 0, MOPT_STRING}, {Opt_nombcache, EXT4_MOUNT_NO_MBCACHE, MOPT_SET}, + {Opt_prefetch_block_bitmaps, EXT4_MOUNT_PREFETCH_BLOCK_BITMAPS, + MOPT_SET}, {Opt_err, 0, 0} }; @@ -1946,6 +1957,13 @@ static int handle_mount_opt(struct super_block *sb, char *opt, int token, case Opt_nolazytime: sb->s_flags &= ~SB_LAZYTIME; return 1; + case Opt_inlinecrypt: +#ifdef CONFIG_FS_ENCRYPTION_INLINE_CRYPT + sb->s_flags |= SB_INLINECRYPT; +#else + ext4_msg(sb, KERN_ERR, "inline encryption not supported"); +#endif + return 1; } for (m = ext4_mount_opts; m->token != Opt_err; m++) @@ -2404,6 +2422,9 @@ static int _ext4_show_options(struct seq_file *seq, struct super_block *sb, fscrypt_show_test_dummy_encryption(seq, sep, sb); + if (sb->s_flags & SB_INLINECRYPT) + SEQ_OPTS_PUTS("inlinecrypt"); + if (test_opt(sb, DAX_ALWAYS)) { if (IS_EXT2_SB(sb)) SEQ_OPTS_PUTS("dax"); @@ -3201,15 +3222,34 @@ static void print_daily_error_info(struct timer_list *t) static int ext4_run_li_request(struct ext4_li_request *elr) { struct ext4_group_desc *gdp = NULL; - ext4_group_t group, ngroups; - struct super_block *sb; + struct super_block *sb = elr->lr_super; + ext4_group_t ngroups = EXT4_SB(sb)->s_groups_count; + ext4_group_t group = elr->lr_next_group; unsigned long timeout = 0; + unsigned int prefetch_ios = 0; int ret = 0; - sb = elr->lr_super; - ngroups = EXT4_SB(sb)->s_groups_count; + if (elr->lr_mode == EXT4_LI_MODE_PREFETCH_BBITMAP) { + elr->lr_next_group = ext4_mb_prefetch(sb, group, + EXT4_SB(sb)->s_mb_prefetch, &prefetch_ios); + if (prefetch_ios) + ext4_mb_prefetch_fini(sb, elr->lr_next_group, + prefetch_ios); + trace_ext4_prefetch_bitmaps(sb, group, elr->lr_next_group, + prefetch_ios); + if (group >= elr->lr_next_group) { + ret = 1; + if (elr->lr_first_not_zeroed != ngroups && + !sb_rdonly(sb) && test_opt(sb, INIT_INODE_TABLE)) { + elr->lr_next_group = elr->lr_first_not_zeroed; + elr->lr_mode = EXT4_LI_MODE_ITABLE; + ret = 0; + } + } + return ret; + } - for (group = elr->lr_next_group; group < ngroups; group++) { + for (; group < ngroups; group++) { gdp = ext4_get_group_desc(sb, group, NULL); if (!gdp) { ret = 1; @@ -3227,9 +3267,10 @@ static int ext4_run_li_request(struct ext4_li_request *elr) timeout = jiffies; ret = ext4_init_inode_table(sb, group, elr->lr_timeout ? 0 : 1); + trace_ext4_lazy_itable_init(sb, group); if (elr->lr_timeout == 0) { timeout = (jiffies - timeout) * - elr->lr_sbi->s_li_wait_mult; + EXT4_SB(elr->lr_super)->s_li_wait_mult; elr->lr_timeout = timeout; } elr->lr_next_sched = jiffies + elr->lr_timeout; @@ -3244,15 +3285,11 @@ static int ext4_run_li_request(struct ext4_li_request *elr) */ static void ext4_remove_li_request(struct ext4_li_request *elr) { - struct ext4_sb_info *sbi; - if (!elr) return; - sbi = elr->lr_sbi; - list_del(&elr->lr_request); - sbi->s_li_request = NULL; + EXT4_SB(elr->lr_super)->s_li_request = NULL; kfree(elr); } @@ -3461,7 +3498,6 @@ static int ext4_li_info_new(void) static struct ext4_li_request *ext4_li_request_new(struct super_block *sb, ext4_group_t start) { - struct ext4_sb_info *sbi = EXT4_SB(sb); struct ext4_li_request *elr; elr = kzalloc(sizeof(*elr), GFP_KERNEL); @@ -3469,8 +3505,13 @@ static struct ext4_li_request *ext4_li_request_new(struct super_block *sb, return NULL; elr->lr_super = sb; - elr->lr_sbi = sbi; - elr->lr_next_group = start; + elr->lr_first_not_zeroed = start; + if (test_opt(sb, PREFETCH_BLOCK_BITMAPS)) + elr->lr_mode = EXT4_LI_MODE_PREFETCH_BBITMAP; + else { + elr->lr_mode = EXT4_LI_MODE_ITABLE; + elr->lr_next_group = start; + } /* * Randomize first schedule time of the request to @@ -3500,8 +3541,9 @@ int ext4_register_li_request(struct super_block *sb, goto out; } - if (first_not_zeroed == ngroups || sb_rdonly(sb) || - !test_opt(sb, INIT_INODE_TABLE)) + if (!test_opt(sb, PREFETCH_BLOCK_BITMAPS) && + (first_not_zeroed == ngroups || sb_rdonly(sb) || + !test_opt(sb, INIT_INODE_TABLE))) goto out; elr = ext4_li_request_new(sb, first_not_zeroed); @@ -4698,11 +4740,13 @@ no_journal: ext4_set_resv_clusters(sb); - err = ext4_setup_system_zone(sb); - if (err) { - ext4_msg(sb, KERN_ERR, "failed to initialize system " - "zone (%d)", err); - goto failed_mount4a; + if (test_opt(sb, BLOCK_VALIDITY)) { + err = ext4_setup_system_zone(sb); + if (err) { + ext4_msg(sb, KERN_ERR, "failed to initialize system " + "zone (%d)", err); + goto failed_mount4a; + } } ext4_ext_init(sb); @@ -4765,12 +4809,23 @@ no_journal: } #endif /* CONFIG_QUOTA */ + /* + * Save the original bdev mapping's wb_err value which could be + * used to detect the metadata async write error. + */ + spin_lock_init(&sbi->s_bdev_wb_lock); + if (!sb_rdonly(sb)) + errseq_check_and_advance(&sb->s_bdev->bd_inode->i_mapping->wb_err, + &sbi->s_bdev_wb_err); + sb->s_bdev->bd_super = sb; EXT4_SB(sb)->s_mount_state |= EXT4_ORPHAN_FS; ext4_orphan_cleanup(sb, es); EXT4_SB(sb)->s_mount_state &= ~EXT4_ORPHAN_FS; if (needs_recovery) { ext4_msg(sb, KERN_INFO, "recovery complete"); - ext4_mark_recovery_complete(sb, es); + err = ext4_mark_recovery_complete(sb, es); + if (err) + goto failed_mount8; } if (EXT4_SB(sb)->s_journal) { if (test_opt(sb, DATA_FLAGS) == EXT4_MOUNT_JOURNAL_DATA) @@ -4804,6 +4859,8 @@ no_journal: ratelimit_state_init(&sbi->s_err_ratelimit_state, 5 * HZ, 10); ratelimit_state_init(&sbi->s_warning_ratelimit_state, 5 * HZ, 10); ratelimit_state_init(&sbi->s_msg_ratelimit_state, 5 * HZ, 10); + atomic_set(&sbi->s_warning_count, 0); + atomic_set(&sbi->s_msg_count, 0); kfree(orig_data); return 0; @@ -4813,10 +4870,8 @@ cantfind_ext4: ext4_msg(sb, KERN_ERR, "VFS: Can't find ext4 filesystem"); goto failed_mount; -#ifdef CONFIG_QUOTA failed_mount8: ext4_unregister_sysfs(sb); -#endif failed_mount7: ext4_unregister_li_request(sb); failed_mount6: @@ -4956,7 +5011,8 @@ static journal_t *ext4_get_journal(struct super_block *sb, struct inode *journal_inode; journal_t *journal; - BUG_ON(!ext4_has_feature_journal(sb)); + if (WARN_ON_ONCE(!ext4_has_feature_journal(sb))) + return NULL; journal_inode = ext4_get_journal_inode(sb, journal_inum); if (!journal_inode) @@ -4986,7 +5042,8 @@ static journal_t *ext4_get_dev_journal(struct super_block *sb, struct ext4_super_block *es; struct block_device *bdev; - BUG_ON(!ext4_has_feature_journal(sb)); + if (WARN_ON_ONCE(!ext4_has_feature_journal(sb))) + return NULL; bdev = ext4_blkdev_get(j_dev, sb); if (bdev == NULL) @@ -5077,8 +5134,10 @@ static int ext4_load_journal(struct super_block *sb, dev_t journal_dev; int err = 0; int really_read_only; + int journal_dev_ro; - BUG_ON(!ext4_has_feature_journal(sb)); + if (WARN_ON_ONCE(!ext4_has_feature_journal(sb))) + return -EFSCORRUPTED; if (journal_devnum && journal_devnum != le32_to_cpu(es->s_journal_dev)) { @@ -5088,7 +5147,31 @@ static int ext4_load_journal(struct super_block *sb, } else journal_dev = new_decode_dev(le32_to_cpu(es->s_journal_dev)); - really_read_only = bdev_read_only(sb->s_bdev); + if (journal_inum && journal_dev) { + ext4_msg(sb, KERN_ERR, + "filesystem has both journal inode and journal device!"); + return -EINVAL; + } + + if (journal_inum) { + journal = ext4_get_journal(sb, journal_inum); + if (!journal) + return -EINVAL; + } else { + journal = ext4_get_dev_journal(sb, journal_dev); + if (!journal) + return -EINVAL; + } + + journal_dev_ro = bdev_read_only(journal->j_dev); + really_read_only = bdev_read_only(sb->s_bdev) | journal_dev_ro; + + if (journal_dev_ro && !sb_rdonly(sb)) { + ext4_msg(sb, KERN_ERR, + "journal device read-only, try mounting with '-o ro'"); + err = -EROFS; + goto err_out; + } /* * Are we loading a blank journal or performing recovery after a @@ -5103,27 +5186,14 @@ static int ext4_load_journal(struct super_block *sb, ext4_msg(sb, KERN_ERR, "write access " "unavailable, cannot proceed " "(try mounting with noload)"); - return -EROFS; + err = -EROFS; + goto err_out; } ext4_msg(sb, KERN_INFO, "write access will " "be enabled during recovery"); } } - if (journal_inum && journal_dev) { - ext4_msg(sb, KERN_ERR, "filesystem has both journal " - "and inode journals!"); - return -EINVAL; - } - - if (journal_inum) { - if (!(journal = ext4_get_journal(sb, journal_inum))) - return -EINVAL; - } else { - if (!(journal = ext4_get_dev_journal(sb, journal_dev))) - return -EINVAL; - } - if (!(journal->j_flags & JBD2_BARRIER)) ext4_msg(sb, KERN_INFO, "barriers disabled"); @@ -5143,12 +5213,16 @@ static int ext4_load_journal(struct super_block *sb, if (err) { ext4_msg(sb, KERN_ERR, "error loading journal"); - jbd2_journal_destroy(journal); - return err; + goto err_out; } EXT4_SB(sb)->s_journal = journal; - ext4_clear_journal_err(sb, es); + err = ext4_clear_journal_err(sb, es); + if (err) { + EXT4_SB(sb)->s_journal = NULL; + jbd2_journal_destroy(journal); + return err; + } if (!really_read_only && journal_devnum && journal_devnum != le32_to_cpu(es->s_journal_dev)) { @@ -5159,6 +5233,10 @@ static int ext4_load_journal(struct super_block *sb, } return 0; + +err_out: + jbd2_journal_destroy(journal); + return err; } static int ext4_commit_super(struct super_block *sb, int sync) @@ -5171,13 +5249,6 @@ static int ext4_commit_super(struct super_block *sb, int sync) return error; /* - * The superblock bh should be mapped, but it might not be if the - * device was hot-removed. Not much we can do but fail the I/O. - */ - if (!buffer_mapped(sbh)) - return error; - - /* * If the file system is mounted read-only, don't update the * superblock write time. This avoids updating the superblock * write time when we are mounting the root file system @@ -5244,26 +5315,32 @@ static int ext4_commit_super(struct super_block *sb, int sync) * remounting) the filesystem readonly, then we will end up with a * consistent fs on disk. Record that fact. */ -static void ext4_mark_recovery_complete(struct super_block *sb, - struct ext4_super_block *es) +static int ext4_mark_recovery_complete(struct super_block *sb, + struct ext4_super_block *es) { + int err; journal_t *journal = EXT4_SB(sb)->s_journal; if (!ext4_has_feature_journal(sb)) { - BUG_ON(journal != NULL); - return; + if (journal != NULL) { + ext4_error(sb, "Journal got removed while the fs was " + "mounted!"); + return -EFSCORRUPTED; + } + return 0; } jbd2_journal_lock_updates(journal); - if (jbd2_journal_flush(journal) < 0) + err = jbd2_journal_flush(journal); + if (err < 0) goto out; if (ext4_has_feature_journal_needs_recovery(sb) && sb_rdonly(sb)) { ext4_clear_feature_journal_needs_recovery(sb); ext4_commit_super(sb, 1); } - out: jbd2_journal_unlock_updates(journal); + return err; } /* @@ -5271,14 +5348,17 @@ out: * has recorded an error from a previous lifetime, move that error to the * main filesystem now. */ -static void ext4_clear_journal_err(struct super_block *sb, +static int ext4_clear_journal_err(struct super_block *sb, struct ext4_super_block *es) { journal_t *journal; int j_errno; const char *errstr; - BUG_ON(!ext4_has_feature_journal(sb)); + if (!ext4_has_feature_journal(sb)) { + ext4_error(sb, "Journal got removed while the fs was mounted!"); + return -EFSCORRUPTED; + } journal = EXT4_SB(sb)->s_journal; @@ -5303,6 +5383,7 @@ static void ext4_clear_journal_err(struct super_block *sb, jbd2_journal_clear_err(journal); jbd2_journal_update_sb_errno(journal); } + return 0; } /* @@ -5445,7 +5526,7 @@ static int ext4_remount(struct super_block *sb, int *flags, char *data) { struct ext4_super_block *es; struct ext4_sb_info *sbi = EXT4_SB(sb); - unsigned long old_sb_flags; + unsigned long old_sb_flags, vfs_flags; struct ext4_mount_options old_opts; int enable_quota = 0; ext4_group_t g; @@ -5488,6 +5569,14 @@ static int ext4_remount(struct super_block *sb, int *flags, char *data) if (sbi->s_journal && sbi->s_journal->j_task->io_context) journal_ioprio = sbi->s_journal->j_task->io_context->ioprio; + /* + * Some options can be enabled by ext4 and/or by VFS mount flag + * either way we need to make sure it matches in both *flags and + * s_flags. Copy those selected flags from *flags to s_flags + */ + vfs_flags = SB_LAZYTIME | SB_I_VERSION; + sb->s_flags = (sb->s_flags & ~vfs_flags) | (*flags & vfs_flags); + if (!parse_options(data, sb, NULL, &journal_ioprio, 1)) { err = -EINVAL; goto restore_opts; @@ -5541,9 +5630,6 @@ static int ext4_remount(struct super_block *sb, int *flags, char *data) set_task_ioprio(sbi->s_journal->j_task, journal_ioprio); } - if (*flags & SB_LAZYTIME) - sb->s_flags |= SB_LAZYTIME; - if ((bool)(*flags & SB_RDONLY) != sb_rdonly(sb)) { if (sbi->s_mount_flags & EXT4_MF_FS_ABORTED) { err = -EROFS; @@ -5573,8 +5659,13 @@ static int ext4_remount(struct super_block *sb, int *flags, char *data) (sbi->s_mount_state & EXT4_VALID_FS)) es->s_state = cpu_to_le16(sbi->s_mount_state); - if (sbi->s_journal) + if (sbi->s_journal) { + /* + * We let remount-ro finish even if marking fs + * as clean failed... + */ ext4_mark_recovery_complete(sb, es); + } if (sbi->s_mmp_tsk) kthread_stop(sbi->s_mmp_tsk); } else { @@ -5617,13 +5708,24 @@ static int ext4_remount(struct super_block *sb, int *flags, char *data) } /* + * Update the original bdev mapping's wb_err value + * which could be used to detect the metadata async + * write error. + */ + errseq_check_and_advance(&sb->s_bdev->bd_inode->i_mapping->wb_err, + &sbi->s_bdev_wb_err); + + /* * Mounting a RDONLY partition read-write, so reread * and store the current valid flag. (It may have * been changed by e2fsck since we originally mounted * the partition.) */ - if (sbi->s_journal) - ext4_clear_journal_err(sb, es); + if (sbi->s_journal) { + err = ext4_clear_journal_err(sb, es); + if (err) + goto restore_opts; + } sbi->s_mount_state = le16_to_cpu(es->s_state); err = ext4_setup_super(sb, es, 0); @@ -5653,7 +5755,17 @@ static int ext4_remount(struct super_block *sb, int *flags, char *data) ext4_register_li_request(sb, first_not_zeroed); } - ext4_setup_system_zone(sb); + /* + * Handle creation of system zone data early because it can fail. + * Releasing of existing data is done when we are sure remount will + * succeed. + */ + if (test_opt(sb, BLOCK_VALIDITY) && !sbi->system_blks) { + err = ext4_setup_system_zone(sb); + if (err) + goto restore_opts; + } + if (sbi->s_journal == NULL && !(old_sb_flags & SB_RDONLY)) { err = ext4_commit_super(sb, 1); if (err) @@ -5674,8 +5786,16 @@ static int ext4_remount(struct super_block *sb, int *flags, char *data) } } #endif + if (!test_opt(sb, BLOCK_VALIDITY) && sbi->system_blks) + ext4_release_system_zone(sb); + + /* + * Some options can be enabled by ext4 and/or by VFS mount flag + * either way we need to make sure it matches in both *flags and + * s_flags. Copy those selected flags from s_flags to *flags + */ + *flags = (*flags & ~vfs_flags) | (sb->s_flags & vfs_flags); - *flags = (*flags & ~SB_LAZYTIME) | (sb->s_flags & SB_LAZYTIME); ext4_msg(sb, KERN_INFO, "re-mounted. Opts: %s", orig_data); kfree(orig_data); return 0; @@ -5689,6 +5809,8 @@ restore_opts: sbi->s_commit_interval = old_opts.s_commit_interval; sbi->s_min_batch_time = old_opts.s_min_batch_time; sbi->s_max_batch_time = old_opts.s_max_batch_time; + if (!test_opt(sb, BLOCK_VALIDITY) && sbi->system_blks) + ext4_release_system_zone(sb); #ifdef CONFIG_QUOTA sbi->s_jquota_fmt = old_opts.s_jquota_fmt; for (i = 0; i < EXT4_MAXQUOTAS; i++) { diff --git a/fs/ext4/sysfs.c b/fs/ext4/sysfs.c index 6c9fc9e21c13..bfabb799fa45 100644 --- a/fs/ext4/sysfs.c +++ b/fs/ext4/sysfs.c @@ -189,6 +189,9 @@ static struct ext4_attr ext4_attr_##_name = { \ #define EXT4_RW_ATTR_SBI_UL(_name,_elname) \ EXT4_ATTR_OFFSET(_name, 0644, pointer_ul, ext4_sb_info, _elname) +#define EXT4_RO_ATTR_SBI_ATOMIC(_name,_elname) \ + EXT4_ATTR_OFFSET(_name, 0444, pointer_atomic, ext4_sb_info, _elname) + #define EXT4_ATTR_PTR(_name,_mode,_id,_ptr) \ static struct ext4_attr ext4_attr_##_name = { \ .attr = {.name = __stringify(_name), .mode = _mode }, \ @@ -215,6 +218,7 @@ EXT4_RW_ATTR_SBI_UI(mb_min_to_scan, s_mb_min_to_scan); EXT4_RW_ATTR_SBI_UI(mb_order2_req, s_mb_order2_reqs); EXT4_RW_ATTR_SBI_UI(mb_stream_req, s_mb_stream_request); EXT4_RW_ATTR_SBI_UI(mb_group_prealloc, s_mb_group_prealloc); +EXT4_RW_ATTR_SBI_UI(mb_max_inode_prealloc, s_mb_max_inode_prealloc); EXT4_RW_ATTR_SBI_UI(extent_max_zeroout_kb, s_extent_max_zeroout_kb); EXT4_ATTR(trigger_fs_error, 0200, trigger_test_error); EXT4_RW_ATTR_SBI_UI(err_ratelimit_interval_ms, s_err_ratelimit_state.interval); @@ -226,6 +230,8 @@ EXT4_RW_ATTR_SBI_UI(msg_ratelimit_burst, s_msg_ratelimit_state.burst); #ifdef CONFIG_EXT4_DEBUG EXT4_RW_ATTR_SBI_UL(simulate_fail, s_simulate_fail); #endif +EXT4_RO_ATTR_SBI_ATOMIC(warning_count, s_warning_count); +EXT4_RO_ATTR_SBI_ATOMIC(msg_count, s_msg_count); EXT4_RO_ATTR_ES_UI(errors_count, s_error_count); EXT4_RO_ATTR_ES_U8(first_error_errcode, s_first_error_errcode); EXT4_RO_ATTR_ES_U8(last_error_errcode, s_last_error_errcode); @@ -240,6 +246,8 @@ EXT4_RO_ATTR_ES_STRING(last_error_func, s_last_error_func, 32); EXT4_ATTR(first_error_time, 0444, first_error_time); EXT4_ATTR(last_error_time, 0444, last_error_time); EXT4_ATTR(journal_task, 0444, journal_task); +EXT4_RW_ATTR_SBI_UI(mb_prefetch, s_mb_prefetch); +EXT4_RW_ATTR_SBI_UI(mb_prefetch_limit, s_mb_prefetch_limit); static unsigned int old_bump_val = 128; EXT4_ATTR_PTR(max_writeback_mb_bump, 0444, pointer_ui, &old_bump_val); @@ -257,6 +265,7 @@ static struct attribute *ext4_attrs[] = { ATTR_LIST(mb_order2_req), ATTR_LIST(mb_stream_req), ATTR_LIST(mb_group_prealloc), + ATTR_LIST(mb_max_inode_prealloc), ATTR_LIST(max_writeback_mb_bump), ATTR_LIST(extent_max_zeroout_kb), ATTR_LIST(trigger_fs_error), @@ -267,6 +276,8 @@ static struct attribute *ext4_attrs[] = { ATTR_LIST(msg_ratelimit_interval_ms), ATTR_LIST(msg_ratelimit_burst), ATTR_LIST(errors_count), + ATTR_LIST(warning_count), + ATTR_LIST(msg_count), ATTR_LIST(first_error_ino), ATTR_LIST(last_error_ino), ATTR_LIST(first_error_block), @@ -283,6 +294,8 @@ static struct attribute *ext4_attrs[] = { #ifdef CONFIG_EXT4_DEBUG ATTR_LIST(simulate_fail), #endif + ATTR_LIST(mb_prefetch), + ATTR_LIST(mb_prefetch_limit), NULL, }; ATTRIBUTE_GROUPS(ext4); diff --git a/fs/ext4/xattr.c b/fs/ext4/xattr.c index 7d2f6576d954..cba4b877c606 100644 --- a/fs/ext4/xattr.c +++ b/fs/ext4/xattr.c @@ -1356,8 +1356,7 @@ retry: block = 0; while (wsize < bufsize) { - if (bh != NULL) - brelse(bh); + brelse(bh); csize = (bufsize - wsize) > blocksize ? blocksize : bufsize - wsize; bh = ext4_getblk(handle, ea_inode, block, 0); diff --git a/fs/f2fs/checkpoint.c b/fs/f2fs/checkpoint.c index 236064930251..ff807e14c891 100644 --- a/fs/f2fs/checkpoint.c +++ b/fs/f2fs/checkpoint.c @@ -523,7 +523,7 @@ void f2fs_remove_ino_entry(struct f2fs_sb_info *sbi, nid_t ino, int type) __remove_ino_entry(sbi, ino, type); } -/* mode should be APPEND_INO or UPDATE_INO */ +/* mode should be APPEND_INO, UPDATE_INO or TRANS_DIR_INO */ bool f2fs_exist_written_data(struct f2fs_sb_info *sbi, nid_t ino, int mode) { struct inode_management *im = &sbi->im[mode]; @@ -1258,8 +1258,6 @@ void f2fs_wait_on_all_pages(struct f2fs_sb_info *sbi, int type) DEFINE_WAIT(wait); for (;;) { - prepare_to_wait(&sbi->cp_wait, &wait, TASK_UNINTERRUPTIBLE); - if (!get_pages(sbi, type)) break; @@ -1269,6 +1267,10 @@ void f2fs_wait_on_all_pages(struct f2fs_sb_info *sbi, int type) if (type == F2FS_DIRTY_META) f2fs_sync_meta_pages(sbi, META, LONG_MAX, FS_CP_META_IO); + else if (type == F2FS_WB_CP_DATA) + f2fs_submit_merged_write(sbi, DATA); + + prepare_to_wait(&sbi->cp_wait, &wait, TASK_UNINTERRUPTIBLE); io_schedule_timeout(DEFAULT_IO_TIMEOUT); } finish_wait(&sbi->cp_wait, &wait); @@ -1415,7 +1417,7 @@ static int do_checkpoint(struct f2fs_sb_info *sbi, struct cp_control *cpc) curseg_alloc_type(sbi, i + CURSEG_HOT_DATA); } - /* 2 cp + n data seg summary + orphan inode blocks */ + /* 2 cp + n data seg summary + orphan inode blocks */ data_sum_blocks = f2fs_npages_for_summary_flush(sbi, false); spin_lock_irqsave(&sbi->cp_lock, flags); if (data_sum_blocks < NR_CURSEG_DATA_TYPE) @@ -1515,9 +1517,10 @@ static int do_checkpoint(struct f2fs_sb_info *sbi, struct cp_control *cpc) /* * invalidate intermediate page cache borrowed from meta inode which are - * used for migration of encrypted or verity inode's blocks. + * used for migration of encrypted, verity or compressed inode's blocks. */ - if (f2fs_sb_has_encrypt(sbi) || f2fs_sb_has_verity(sbi)) + if (f2fs_sb_has_encrypt(sbi) || f2fs_sb_has_verity(sbi) || + f2fs_sb_has_compression(sbi)) invalidate_mapping_pages(META_MAPPING(sbi), MAIN_BLKADDR(sbi), MAX_BLKADDR(sbi) - 1); diff --git a/fs/f2fs/compress.c b/fs/f2fs/compress.c index 1e02a8c106b0..1dfb126a0cb2 100644 --- a/fs/f2fs/compress.c +++ b/fs/f2fs/compress.c @@ -49,6 +49,13 @@ bool f2fs_is_compressed_page(struct page *page) return false; if (IS_ATOMIC_WRITTEN_PAGE(page) || IS_DUMMY_WRITTEN_PAGE(page)) return false; + /* + * page->private may be set with pid. + * pid_max is enough to check if it is traced. + */ + if (IS_IO_TRACED_PAGE(page)) + return false; + f2fs_bug_on(F2FS_M_SB(page->mapping), *((u32 *)page_private(page)) != F2FS_COMPRESSED_PAGE_MAGIC); return true; @@ -506,7 +513,7 @@ bool f2fs_is_compress_backend_ready(struct inode *inode) return f2fs_cops[F2FS_I(inode)->i_compress_algorithm]; } -static mempool_t *compress_page_pool = NULL; +static mempool_t *compress_page_pool; static int num_compress_pages = 512; module_param(num_compress_pages, uint, 0444); MODULE_PARM_DESC(num_compress_pages, @@ -663,6 +670,7 @@ void f2fs_decompress_pages(struct bio *bio, struct page *page, bool verity) const struct f2fs_compress_ops *cops = f2fs_cops[fi->i_compress_algorithm]; int ret; + int i; dec_page_count(sbi, F2FS_RD_DATA); @@ -681,6 +689,26 @@ void f2fs_decompress_pages(struct bio *bio, struct page *page, bool verity) goto out_free_dic; } + dic->tpages = f2fs_kzalloc(sbi, sizeof(struct page *) * + dic->cluster_size, GFP_NOFS); + if (!dic->tpages) { + ret = -ENOMEM; + goto out_free_dic; + } + + for (i = 0; i < dic->cluster_size; i++) { + if (dic->rpages[i]) { + dic->tpages[i] = dic->rpages[i]; + continue; + } + + dic->tpages[i] = f2fs_compress_alloc_page(); + if (!dic->tpages[i]) { + ret = -ENOMEM; + goto out_free_dic; + } + } + if (cops->init_decompress_ctx) { ret = cops->init_decompress_ctx(dic); if (ret) @@ -821,7 +849,7 @@ static int f2fs_compressed_blocks(struct compress_ctx *cc) } /* return # of valid blocks in compressed cluster */ -static int f2fs_cluster_blocks(struct compress_ctx *cc, bool compr) +static int f2fs_cluster_blocks(struct compress_ctx *cc) { return __f2fs_cluster_blocks(cc, false); } @@ -835,7 +863,7 @@ int f2fs_is_compressed_cluster(struct inode *inode, pgoff_t index) .cluster_idx = index >> F2FS_I(inode)->i_log_cluster_size, }; - return f2fs_cluster_blocks(&cc, false); + return f2fs_cluster_blocks(&cc); } static bool cluster_may_compress(struct compress_ctx *cc) @@ -886,7 +914,7 @@ static int prepare_compress_overwrite(struct compress_ctx *cc, bool prealloc; retry: - ret = f2fs_cluster_blocks(cc, false); + ret = f2fs_cluster_blocks(cc); if (ret <= 0) return ret; @@ -949,7 +977,7 @@ retry: } if (prealloc) { - __do_map_lock(sbi, F2FS_GET_BLOCK_PRE_AIO, true); + f2fs_do_map_lock(sbi, F2FS_GET_BLOCK_PRE_AIO, true); set_new_dnode(&dn, cc->inode, NULL, NULL, 0); @@ -964,7 +992,7 @@ retry: break; } - __do_map_lock(sbi, F2FS_GET_BLOCK_PRE_AIO, false); + f2fs_do_map_lock(sbi, F2FS_GET_BLOCK_PRE_AIO, false); } if (likely(!ret)) { @@ -1086,7 +1114,7 @@ static int f2fs_write_compressed_pages(struct compress_ctx *cc, .submitted = false, .io_type = io_type, .io_wbc = wbc, - .encrypted = f2fs_encrypted_file(cc->inode), + .encrypted = fscrypt_inode_uses_fs_layer_crypto(cc->inode), }; struct dnode_of_data dn; struct node_info ni; @@ -1096,8 +1124,16 @@ static int f2fs_write_compressed_pages(struct compress_ctx *cc, loff_t psize; int i, err; - if (!IS_NOQUOTA(inode) && !f2fs_trylock_op(sbi)) + if (IS_NOQUOTA(inode)) { + /* + * We need to wait for node_write to avoid block allocation during + * checkpoint. This can only happen to quota writes which can cause + * the below discard race condition. + */ + down_read(&sbi->node_write); + } else if (!f2fs_trylock_op(sbi)) { return -EAGAIN; + } set_new_dnode(&dn, cc->inode, NULL, NULL, 0); @@ -1137,6 +1173,13 @@ static int f2fs_write_compressed_pages(struct compress_ctx *cc, f2fs_set_compressed_page(cc->cpages[i], inode, cc->rpages[i + 1]->index, cic); fio.compressed_page = cc->cpages[i]; + + fio.old_blkaddr = data_blkaddr(dn.inode, dn.node_page, + dn.ofs_in_node + i + 1); + + /* wait for GCed page writeback via META_MAPPING */ + f2fs_wait_on_block_writeback(inode, fio.old_blkaddr); + if (fio.encrypted) { fio.page = cc->rpages[i + 1]; err = f2fs_encrypt_one_page(&fio); @@ -1203,7 +1246,9 @@ unlock_continue: set_inode_flag(inode, FI_FIRST_BLOCK_WRITTEN); f2fs_put_dnode(&dn); - if (!IS_NOQUOTA(inode)) + if (IS_NOQUOTA(inode)) + up_read(&sbi->node_write); + else f2fs_unlock_op(sbi); spin_lock(&fi->i_size_lock); @@ -1230,7 +1275,9 @@ out_put_cic: out_put_dnode: f2fs_put_dnode(&dn); out_unlock_op: - if (!IS_NOQUOTA(inode)) + if (IS_NOQUOTA(inode)) + up_read(&sbi->node_write); + else f2fs_unlock_op(sbi); return -EAGAIN; } @@ -1310,6 +1357,12 @@ retry_write: congestion_wait(BLK_RW_ASYNC, DEFAULT_IO_TIMEOUT); lock_page(cc->rpages[i]); + + if (!PageDirty(cc->rpages[i])) { + unlock_page(cc->rpages[i]); + continue; + } + clear_page_dirty_for_io(cc->rpages[i]); goto retry_write; } @@ -1353,6 +1406,8 @@ int f2fs_write_multi_pages(struct compress_ctx *cc, err = f2fs_write_compressed_pages(cc, submitted, wbc, io_type); cops->destroy_compress_ctx(cc); + kfree(cc->cpages); + cc->cpages = NULL; if (!err) return 0; f2fs_bug_on(F2FS_I_SB(cc->inode), err != -EAGAIN); @@ -1415,22 +1470,6 @@ struct decompress_io_ctx *f2fs_alloc_dic(struct compress_ctx *cc) dic->cpages[i] = page; } - dic->tpages = f2fs_kzalloc(sbi, sizeof(struct page *) * - dic->cluster_size, GFP_NOFS); - if (!dic->tpages) - goto out_free; - - for (i = 0; i < dic->cluster_size; i++) { - if (cc->rpages[i]) { - dic->tpages[i] = cc->rpages[i]; - continue; - } - - dic->tpages[i] = f2fs_compress_alloc_page(); - if (!dic->tpages[i]) - goto out_free; - } - return dic; out_free: diff --git a/fs/f2fs/data.c b/fs/f2fs/data.c index 326c63879ddc..73683e58a08d 100644 --- a/fs/f2fs/data.c +++ b/fs/f2fs/data.c @@ -14,6 +14,7 @@ #include <linux/pagevec.h> #include <linux/blkdev.h> #include <linux/bio.h> +#include <linux/blk-crypto.h> #include <linux/swap.h> #include <linux/prefetch.h> #include <linux/uio.h> @@ -86,7 +87,7 @@ static bool __is_cp_guaranteed(struct page *page) sbi = F2FS_I_SB(inode); if (inode->i_ino == F2FS_META_INO(sbi) || - inode->i_ino == F2FS_NODE_INO(sbi) || + inode->i_ino == F2FS_NODE_INO(sbi) || S_ISDIR(inode->i_mode) || (S_ISREG(inode->i_mode) && (f2fs_is_atomic_file(inode) || IS_NOQUOTA(inode))) || @@ -459,6 +460,33 @@ static struct bio *__bio_alloc(struct f2fs_io_info *fio, int npages) return bio; } +static void f2fs_set_bio_crypt_ctx(struct bio *bio, const struct inode *inode, + pgoff_t first_idx, + const struct f2fs_io_info *fio, + gfp_t gfp_mask) +{ + /* + * The f2fs garbage collector sets ->encrypted_page when it wants to + * read/write raw data without encryption. + */ + if (!fio || !fio->encrypted_page) + fscrypt_set_bio_crypt_ctx(bio, inode, first_idx, gfp_mask); +} + +static bool f2fs_crypt_mergeable_bio(struct bio *bio, const struct inode *inode, + pgoff_t next_idx, + const struct f2fs_io_info *fio) +{ + /* + * The f2fs garbage collector sets ->encrypted_page when it wants to + * read/write raw data without encryption. + */ + if (fio && fio->encrypted_page) + return !bio_has_crypt_ctx(bio); + + return fscrypt_mergeable_bio(bio, inode, next_idx); +} + static inline void __submit_bio(struct f2fs_sb_info *sbi, struct bio *bio, enum page_type type) { @@ -684,6 +712,9 @@ int f2fs_submit_page_bio(struct f2fs_io_info *fio) /* Allocate a new bio */ bio = __bio_alloc(fio, 1); + f2fs_set_bio_crypt_ctx(bio, fio->page->mapping->host, + fio->page->index, fio, GFP_NOIO); + if (bio_add_page(bio, page, PAGE_SIZE, 0) < PAGE_SIZE) { bio_put(bio); return -EFAULT; @@ -763,9 +794,10 @@ static void del_bio_entry(struct bio_entry *be) kmem_cache_free(bio_entry_slab, be); } -static int add_ipu_page(struct f2fs_sb_info *sbi, struct bio **bio, +static int add_ipu_page(struct f2fs_io_info *fio, struct bio **bio, struct page *page) { + struct f2fs_sb_info *sbi = fio->sbi; enum temp_type temp; bool found = false; int ret = -EAGAIN; @@ -782,13 +814,19 @@ static int add_ipu_page(struct f2fs_sb_info *sbi, struct bio **bio, found = true; - if (bio_add_page(*bio, page, PAGE_SIZE, 0) == - PAGE_SIZE) { + f2fs_bug_on(sbi, !page_is_mergeable(sbi, *bio, + *fio->last_block, + fio->new_blkaddr)); + if (f2fs_crypt_mergeable_bio(*bio, + fio->page->mapping->host, + fio->page->index, fio) && + bio_add_page(*bio, page, PAGE_SIZE, 0) == + PAGE_SIZE) { ret = 0; break; } - /* bio is full */ + /* page can't be merged into bio; submit the bio */ del_bio_entry(be); __submit_bio(sbi, *bio, DATA); break; @@ -880,11 +918,13 @@ alloc_new: if (!bio) { bio = __bio_alloc(fio, BIO_MAX_PAGES); __attach_io_flag(fio); + f2fs_set_bio_crypt_ctx(bio, fio->page->mapping->host, + fio->page->index, fio, GFP_NOIO); bio_set_op_attrs(bio, fio->op, fio->op_flags); add_bio_entry(fio->sbi, bio, page, fio->temp); } else { - if (add_ipu_page(fio->sbi, &bio, page)) + if (add_ipu_page(fio, &bio, page)) goto alloc_new; } @@ -936,8 +976,11 @@ next: inc_page_count(sbi, WB_DATA_TYPE(bio_page)); - if (io->bio && !io_is_mergeable(sbi, io->bio, io, fio, - io->last_block_in_bio, fio->new_blkaddr)) + if (io->bio && + (!io_is_mergeable(sbi, io->bio, io, fio, io->last_block_in_bio, + fio->new_blkaddr) || + !f2fs_crypt_mergeable_bio(io->bio, fio->page->mapping->host, + bio_page->index, fio))) __submit_merged_bio(io); alloc_new: if (io->bio == NULL) { @@ -949,6 +992,8 @@ alloc_new: goto skip; } io->bio = __bio_alloc(fio, BIO_MAX_PAGES); + f2fs_set_bio_crypt_ctx(io->bio, fio->page->mapping->host, + bio_page->index, fio, GFP_NOIO); io->fio = *fio; } @@ -993,11 +1038,14 @@ static struct bio *f2fs_grab_read_bio(struct inode *inode, block_t blkaddr, for_write); if (!bio) return ERR_PTR(-ENOMEM); + + f2fs_set_bio_crypt_ctx(bio, inode, first_idx, NULL, GFP_NOFS); + f2fs_target_device(sbi, blkaddr, bio); bio->bi_end_io = f2fs_read_end_io; bio_set_op_attrs(bio, REQ_OP_READ, op_flag); - if (f2fs_encrypted_file(inode)) + if (fscrypt_inode_uses_fs_layer_crypto(inode)) post_read_steps |= 1 << STEP_DECRYPT; if (f2fs_compressed_file(inode)) post_read_steps |= 1 << STEP_DECOMPRESS_NOWQ; @@ -1025,12 +1073,13 @@ static void f2fs_release_read_bio(struct bio *bio) /* This can handle encryption stuffs */ static int f2fs_submit_page_read(struct inode *inode, struct page *page, - block_t blkaddr, bool for_write) + block_t blkaddr, int op_flags, bool for_write) { struct f2fs_sb_info *sbi = F2FS_I_SB(inode); struct bio *bio; - bio = f2fs_grab_read_bio(inode, blkaddr, 1, 0, page->index, for_write); + bio = f2fs_grab_read_bio(inode, blkaddr, 1, op_flags, + page->index, for_write); if (IS_ERR(bio)) return PTR_ERR(bio); @@ -1145,7 +1194,7 @@ int f2fs_reserve_block(struct dnode_of_data *dn, pgoff_t index) int f2fs_get_block(struct dnode_of_data *dn, pgoff_t index) { - struct extent_info ei = {0,0,0}; + struct extent_info ei = {0, 0, 0}; struct inode *inode = dn->inode; if (f2fs_lookup_extent_cache(inode, index, &ei)) { @@ -1217,7 +1266,8 @@ got_it: return page; } - err = f2fs_submit_page_read(inode, page, dn.data_blkaddr, for_write); + err = f2fs_submit_page_read(inode, page, dn.data_blkaddr, + op_flags, for_write); if (err) goto put_err; return page; @@ -1366,7 +1416,7 @@ alloc: set_summary(&sum, dn->nid, dn->ofs_in_node, ni.version); old_blkaddr = dn->data_blkaddr; f2fs_allocate_data_block(sbi, NULL, old_blkaddr, &dn->data_blkaddr, - &sum, seg_type, NULL, false); + &sum, seg_type, NULL); if (GET_SEGNO(sbi, old_blkaddr) != NULL_SEGNO) invalidate_mapping_pages(META_MAPPING(sbi), old_blkaddr, old_blkaddr); @@ -1426,7 +1476,7 @@ map_blocks: return err; } -void __do_map_lock(struct f2fs_sb_info *sbi, int flag, bool lock) +void f2fs_do_map_lock(struct f2fs_sb_info *sbi, int flag, bool lock) { if (flag == F2FS_GET_BLOCK_PRE_AIO) { if (lock) @@ -1491,7 +1541,7 @@ int f2fs_map_blocks(struct inode *inode, struct f2fs_map_blocks *map, next_dnode: if (map->m_may_create) - __do_map_lock(sbi, flag, true); + f2fs_do_map_lock(sbi, flag, true); /* When reading holes, we need its node page */ set_new_dnode(&dn, inode, NULL, NULL, 0); @@ -1640,7 +1690,7 @@ skip: f2fs_put_dnode(&dn); if (map->m_may_create) { - __do_map_lock(sbi, flag, false); + f2fs_do_map_lock(sbi, flag, false); f2fs_balance_fs(sbi, dn.node_changed); } goto next_dnode; @@ -1666,7 +1716,7 @@ sync_out: f2fs_put_dnode(&dn); unlock_out: if (map->m_may_create) { - __do_map_lock(sbi, flag, false); + f2fs_do_map_lock(sbi, flag, false); f2fs_balance_fs(sbi, dn.node_changed); } out: @@ -1813,6 +1863,7 @@ static int f2fs_xattr_fiemap(struct inode *inode, flags |= FIEMAP_EXTENT_LAST; err = fiemap_fill_next_extent(fieinfo, 0, phys, len, flags); + trace_f2fs_fiemap(inode, 0, phys, len, flags, err); if (err || err == 1) return err; } @@ -1836,8 +1887,10 @@ static int f2fs_xattr_fiemap(struct inode *inode, flags = FIEMAP_EXTENT_LAST; } - if (phys) + if (phys) { err = fiemap_fill_next_extent(fieinfo, 0, phys, len, flags); + trace_f2fs_fiemap(inode, 0, phys, len, flags, err); + } return (err < 0 ? err : 0); } @@ -1931,6 +1984,7 @@ next: ret = fiemap_fill_next_extent(fieinfo, logical, phys, size, flags); + trace_f2fs_fiemap(inode, logical, phys, size, flags, ret); if (ret) goto out; size = 0; @@ -2073,8 +2127,9 @@ zero_out: * This page will go to BIO. Do we need to send this * BIO off first? */ - if (bio && !page_is_mergeable(F2FS_I_SB(inode), bio, - *last_block_in_bio, block_nr)) { + if (bio && (!page_is_mergeable(F2FS_I_SB(inode), bio, + *last_block_in_bio, block_nr) || + !f2fs_crypt_mergeable_bio(bio, inode, page->index, NULL))) { submit_and_realloc: __submit_bio(F2FS_I_SB(inode), bio, DATA); bio = NULL; @@ -2164,9 +2219,7 @@ int f2fs_read_multi_pages(struct compress_ctx *cc, struct bio **bio_ret, if (ret) goto out; - /* cluster was overwritten as normal cluster */ - if (dn.data_blkaddr != COMPRESS_ADDR) - goto out; + f2fs_bug_on(sbi, dn.data_blkaddr != COMPRESS_ADDR); for (i = 1; i < cc->cluster_size; i++) { block_t blkaddr; @@ -2204,8 +2257,9 @@ int f2fs_read_multi_pages(struct compress_ctx *cc, struct bio **bio_ret, blkaddr = data_blkaddr(dn.inode, dn.node_page, dn.ofs_in_node + i + 1); - if (bio && !page_is_mergeable(sbi, bio, - *last_block_in_bio, blkaddr)) { + if (bio && (!page_is_mergeable(sbi, bio, + *last_block_in_bio, blkaddr) || + !f2fs_crypt_mergeable_bio(bio, inode, page->index, NULL))) { submit_and_realloc: __submit_bio(sbi, bio, DATA); bio = NULL; @@ -2292,6 +2346,7 @@ static int f2fs_mpage_readpages(struct inode *inode, unsigned nr_pages = rac ? readahead_count(rac) : 1; unsigned max_nr_pages = nr_pages; int ret = 0; + bool drop_ra = false; map.m_pblk = 0; map.m_lblk = 0; @@ -2302,10 +2357,26 @@ static int f2fs_mpage_readpages(struct inode *inode, map.m_seg_type = NO_CHECK_TYPE; map.m_may_create = false; + /* + * Two readahead threads for same address range can cause race condition + * which fragments sequential read IOs. So let's avoid each other. + */ + if (rac && readahead_count(rac)) { + if (READ_ONCE(F2FS_I(inode)->ra_offset) == readahead_index(rac)) + drop_ra = true; + else + WRITE_ONCE(F2FS_I(inode)->ra_offset, + readahead_index(rac)); + } + for (; nr_pages; nr_pages--) { if (rac) { page = readahead_page(rac); prefetchw(&page->flags); + if (drop_ra) { + f2fs_put_page(page, 1); + continue; + } } #ifdef CONFIG_F2FS_FS_COMPRESSION @@ -2368,6 +2439,9 @@ next_page: } if (bio) __submit_bio(F2FS_I_SB(inode), bio, DATA); + + if (rac && readahead_count(rac) && !drop_ra) + WRITE_ONCE(F2FS_I(inode)->ra_offset, -1); return ret; } @@ -2421,6 +2495,9 @@ int f2fs_encrypt_one_page(struct f2fs_io_info *fio) /* wait for GCed page writeback via META_MAPPING */ f2fs_wait_on_block_writeback(inode, fio->old_blkaddr); + if (fscrypt_inode_uses_inline_crypto(inode)) + return 0; + retry_encrypt: fio->encrypted_page = fscrypt_encrypt_pagecache_blocks(page, PAGE_SIZE, 0, gfp_flags); @@ -2594,7 +2671,7 @@ got_it: f2fs_unlock_op(fio->sbi); err = f2fs_inplace_write_data(fio); if (err) { - if (f2fs_encrypted_file(inode)) + if (fscrypt_inode_uses_fs_layer_crypto(inode)) fscrypt_finalize_bounce_page(&fio->encrypted_page); if (PageWriteback(page)) end_page_writeback(page); @@ -2719,8 +2796,20 @@ write: /* Dentry/quota blocks are controlled by checkpoint */ if (S_ISDIR(inode->i_mode) || IS_NOQUOTA(inode)) { + /* + * We need to wait for node_write to avoid block allocation during + * checkpoint. This can only happen to quota writes which can cause + * the below discard race condition. + */ + if (IS_NOQUOTA(inode)) + down_read(&sbi->node_write); + fio.need_lock = LOCK_DONE; err = f2fs_do_write_data_page(&fio); + + if (IS_NOQUOTA(inode)) + up_read(&sbi->node_write); + goto done; } @@ -2856,7 +2945,6 @@ static int f2fs_write_cache_pages(struct address_space *mapping, }; #endif int nr_pages; - pgoff_t uninitialized_var(writeback_index); pgoff_t index; pgoff_t end; /* Inclusive */ pgoff_t done_index; @@ -2875,8 +2963,7 @@ static int f2fs_write_cache_pages(struct address_space *mapping, clear_inode_flag(mapping->host, FI_HOT_DATA); if (wbc->range_cyclic) { - writeback_index = mapping->writeback_index; /* prev offset */ - index = writeback_index; + index = mapping->writeback_index; /* prev offset */ end = -1; } else { index = wbc->range_start >> PAGE_SHIFT; @@ -3217,7 +3304,7 @@ static int prepare_write_begin(struct f2fs_sb_info *sbi, if (f2fs_has_inline_data(inode) || (pos & PAGE_MASK) >= i_size_read(inode)) { - __do_map_lock(sbi, flag, true); + f2fs_do_map_lock(sbi, flag, true); locked = true; } @@ -3254,7 +3341,7 @@ restart: err = f2fs_get_dnode_of_data(&dn, index, LOOKUP_NODE); if (err || dn.data_blkaddr == NULL_ADDR) { f2fs_put_dnode(&dn); - __do_map_lock(sbi, F2FS_GET_BLOCK_PRE_AIO, + f2fs_do_map_lock(sbi, F2FS_GET_BLOCK_PRE_AIO, true); WARN_ON(flag != F2FS_GET_BLOCK_PRE_AIO); locked = true; @@ -3270,7 +3357,7 @@ out: f2fs_put_dnode(&dn); unlock_out: if (locked) - __do_map_lock(sbi, flag, false); + f2fs_do_map_lock(sbi, flag, false); return err; } @@ -3382,7 +3469,7 @@ repeat: err = -EFSCORRUPTED; goto fail; } - err = f2fs_submit_page_read(inode, page, blkaddr, true); + err = f2fs_submit_page_read(inode, page, blkaddr, 0, true); if (err) goto fail; @@ -3432,6 +3519,10 @@ static int f2fs_write_end(struct file *file, if (f2fs_compressed_file(inode) && fsdata) { f2fs_compress_write_end(inode, fsdata, page->index, copied); f2fs_update_time(F2FS_I_SB(inode), REQ_TIME); + + if (pos + copied > i_size_read(inode) && + !f2fs_verity_in_progress(inode)) + f2fs_i_size_write(inode, pos + copied); return copied; } #endif @@ -3459,6 +3550,9 @@ static int check_direct_IO(struct inode *inode, struct iov_iter *iter, unsigned long align = offset | iov_iter_alignment(iter); struct block_device *bdev = inode->i_sb->s_bdev; + if (iov_iter_rw(iter) == READ && offset >= i_size_read(inode)) + return 1; + if (align & blocksize_mask) { if (bdev) blkbits = blksize_bits(bdev_logical_block_size(bdev)); @@ -3691,10 +3785,9 @@ static sector_t f2fs_bmap_compress(struct inode *inode, sector_t block) } f2fs_put_dnode(&dn); - return blknr; #else - return -EOPNOTSUPP; + return 0; #endif } @@ -3702,18 +3795,26 @@ static sector_t f2fs_bmap_compress(struct inode *inode, sector_t block) static sector_t f2fs_bmap(struct address_space *mapping, sector_t block) { struct inode *inode = mapping->host; + struct buffer_head tmp = { + .b_size = i_blocksize(inode), + }; + sector_t blknr = 0; if (f2fs_has_inline_data(inode)) - return 0; + goto out; /* make sure allocating whole blocks */ if (mapping_tagged(mapping, PAGECACHE_TAG_DIRTY)) filemap_write_and_wait(mapping); if (f2fs_compressed_file(inode)) - return f2fs_bmap_compress(inode, block); + blknr = f2fs_bmap_compress(inode, block); - return generic_block_bmap(mapping, block, get_data_block_bmap); + if (!get_data_block_bmap(inode, block, &tmp, 0)) + blknr = tmp.b_blocknr; +out: + trace_f2fs_bmap(inode, block, blknr); + return blknr; } #ifdef CONFIG_MIGRATION diff --git a/fs/f2fs/debug.c b/fs/f2fs/debug.c index 0dbcb0f9c019..4276c0f79beb 100644 --- a/fs/f2fs/debug.c +++ b/fs/f2fs/debug.c @@ -174,6 +174,26 @@ static void update_general_status(struct f2fs_sb_info *sbi) for (i = META_CP; i < META_MAX; i++) si->meta_count[i] = atomic_read(&sbi->meta_count[i]); + for (i = 0; i < NO_CHECK_TYPE; i++) { + si->dirty_seg[i] = 0; + si->full_seg[i] = 0; + si->valid_blks[i] = 0; + } + + for (i = 0; i < MAIN_SEGS(sbi); i++) { + int blks = get_seg_entry(sbi, i)->valid_blocks; + int type = get_seg_entry(sbi, i)->type; + + if (!blks) + continue; + + if (blks == sbi->blocks_per_seg) + si->full_seg[type]++; + else + si->dirty_seg[type]++; + si->valid_blks[type] += blks; + } + for (i = 0; i < 2; i++) { si->segment_count[i] = sbi->segment_count[i]; si->block_count[i] = sbi->block_count[i]; @@ -329,30 +349,50 @@ static int stat_show(struct seq_file *s, void *v) seq_printf(s, "\nMain area: %d segs, %d secs %d zones\n", si->main_area_segs, si->main_area_sections, si->main_area_zones); - seq_printf(s, " - COLD data: %d, %d, %d\n", + seq_printf(s, " TYPE %8s %8s %8s %10s %10s %10s\n", + "segno", "secno", "zoneno", "dirty_seg", "full_seg", "valid_blk"); + seq_printf(s, " - COLD data: %8d %8d %8d %10u %10u %10u\n", si->curseg[CURSEG_COLD_DATA], si->cursec[CURSEG_COLD_DATA], - si->curzone[CURSEG_COLD_DATA]); - seq_printf(s, " - WARM data: %d, %d, %d\n", + si->curzone[CURSEG_COLD_DATA], + si->dirty_seg[CURSEG_COLD_DATA], + si->full_seg[CURSEG_COLD_DATA], + si->valid_blks[CURSEG_COLD_DATA]); + seq_printf(s, " - WARM data: %8d %8d %8d %10u %10u %10u\n", si->curseg[CURSEG_WARM_DATA], si->cursec[CURSEG_WARM_DATA], - si->curzone[CURSEG_WARM_DATA]); - seq_printf(s, " - HOT data: %d, %d, %d\n", + si->curzone[CURSEG_WARM_DATA], + si->dirty_seg[CURSEG_WARM_DATA], + si->full_seg[CURSEG_WARM_DATA], + si->valid_blks[CURSEG_WARM_DATA]); + seq_printf(s, " - HOT data: %8d %8d %8d %10u %10u %10u\n", si->curseg[CURSEG_HOT_DATA], si->cursec[CURSEG_HOT_DATA], - si->curzone[CURSEG_HOT_DATA]); - seq_printf(s, " - Dir dnode: %d, %d, %d\n", + si->curzone[CURSEG_HOT_DATA], + si->dirty_seg[CURSEG_HOT_DATA], + si->full_seg[CURSEG_HOT_DATA], + si->valid_blks[CURSEG_HOT_DATA]); + seq_printf(s, " - Dir dnode: %8d %8d %8d %10u %10u %10u\n", si->curseg[CURSEG_HOT_NODE], si->cursec[CURSEG_HOT_NODE], - si->curzone[CURSEG_HOT_NODE]); - seq_printf(s, " - File dnode: %d, %d, %d\n", + si->curzone[CURSEG_HOT_NODE], + si->dirty_seg[CURSEG_HOT_NODE], + si->full_seg[CURSEG_HOT_NODE], + si->valid_blks[CURSEG_HOT_NODE]); + seq_printf(s, " - File dnode: %8d %8d %8d %10u %10u %10u\n", si->curseg[CURSEG_WARM_NODE], si->cursec[CURSEG_WARM_NODE], - si->curzone[CURSEG_WARM_NODE]); - seq_printf(s, " - Indir nodes: %d, %d, %d\n", + si->curzone[CURSEG_WARM_NODE], + si->dirty_seg[CURSEG_WARM_NODE], + si->full_seg[CURSEG_WARM_NODE], + si->valid_blks[CURSEG_WARM_NODE]); + seq_printf(s, " - Indir nodes: %8d %8d %8d %10u %10u %10u\n", si->curseg[CURSEG_COLD_NODE], si->cursec[CURSEG_COLD_NODE], - si->curzone[CURSEG_COLD_NODE]); + si->curzone[CURSEG_COLD_NODE], + si->dirty_seg[CURSEG_COLD_NODE], + si->full_seg[CURSEG_COLD_NODE], + si->valid_blks[CURSEG_COLD_NODE]); seq_printf(s, "\n - Valid: %d\n - Dirty: %d\n", si->main_area_segs - si->dirty_count - si->prefree_count - si->free_segs, diff --git a/fs/f2fs/dir.c b/fs/f2fs/dir.c index d35976785e8c..069f498af1e3 100644 --- a/fs/f2fs/dir.c +++ b/fs/f2fs/dir.c @@ -779,7 +779,7 @@ int f2fs_do_add_link(struct inode *dir, const struct qstr *name, return err; /* - * An immature stakable filesystem shows a race condition between lookup + * An immature stackable filesystem shows a race condition between lookup * and create. If we have same task when doing lookup and create, it's * definitely fine as expected by VFS normally. Otherwise, let's just * verify on-disk dentry one more time, which guarantees filesystem diff --git a/fs/f2fs/extent_cache.c b/fs/f2fs/extent_cache.c index e60078460ad1..686c68b98610 100644 --- a/fs/f2fs/extent_cache.c +++ b/fs/f2fs/extent_cache.c @@ -325,9 +325,10 @@ static void __drop_largest_extent(struct extent_tree *et, } /* return true, if inode page is changed */ -static bool __f2fs_init_extent_tree(struct inode *inode, struct f2fs_extent *i_ext) +static void __f2fs_init_extent_tree(struct inode *inode, struct page *ipage) { struct f2fs_sb_info *sbi = F2FS_I_SB(inode); + struct f2fs_extent *i_ext = ipage ? &F2FS_INODE(ipage)->i_ext : NULL; struct extent_tree *et; struct extent_node *en; struct extent_info ei; @@ -335,16 +336,18 @@ static bool __f2fs_init_extent_tree(struct inode *inode, struct f2fs_extent *i_e if (!f2fs_may_extent_tree(inode)) { /* drop largest extent */ if (i_ext && i_ext->len) { + f2fs_wait_on_page_writeback(ipage, NODE, true, true); i_ext->len = 0; - return true; + set_page_dirty(ipage); + return; } - return false; + return; } et = __grab_extent_tree(inode); if (!i_ext || !i_ext->len) - return false; + return; get_extent_info(&ei, i_ext); @@ -360,17 +363,14 @@ static bool __f2fs_init_extent_tree(struct inode *inode, struct f2fs_extent *i_e } out: write_unlock(&et->lock); - return false; } -bool f2fs_init_extent_tree(struct inode *inode, struct f2fs_extent *i_ext) +void f2fs_init_extent_tree(struct inode *inode, struct page *ipage) { - bool ret = __f2fs_init_extent_tree(inode, i_ext); + __f2fs_init_extent_tree(inode, ipage); if (!F2FS_I(inode)->extent_tree) set_inode_flag(inode, FI_NO_EXTENT); - - return ret; } static bool f2fs_lookup_extent_tree(struct inode *inode, pgoff_t pgofs, diff --git a/fs/f2fs/f2fs.h b/fs/f2fs/f2fs.h index b35a50f4953c..d9e52a7f3702 100644 --- a/fs/f2fs/f2fs.h +++ b/fs/f2fs/f2fs.h @@ -402,12 +402,8 @@ static inline bool __has_cursum_space(struct f2fs_journal *journal, } /* - * ioctl commands + * f2fs-specific ioctl commands */ -#define F2FS_IOC_GETFLAGS FS_IOC_GETFLAGS -#define F2FS_IOC_SETFLAGS FS_IOC_SETFLAGS -#define F2FS_IOC_GETVERSION FS_IOC_GETVERSION - #define F2FS_IOCTL_MAGIC 0xf5 #define F2FS_IOC_START_ATOMIC_WRITE _IO(F2FS_IOCTL_MAGIC, 1) #define F2FS_IOC_COMMIT_ATOMIC_WRITE _IO(F2FS_IOCTL_MAGIC, 2) @@ -434,13 +430,8 @@ static inline bool __has_cursum_space(struct f2fs_journal *journal, _IOR(F2FS_IOCTL_MAGIC, 18, __u64) #define F2FS_IOC_RESERVE_COMPRESS_BLOCKS \ _IOR(F2FS_IOCTL_MAGIC, 19, __u64) - -#define F2FS_IOC_GET_VOLUME_NAME FS_IOC_GETFSLABEL -#define F2FS_IOC_SET_VOLUME_NAME FS_IOC_SETFSLABEL - -#define F2FS_IOC_SET_ENCRYPTION_POLICY FS_IOC_SET_ENCRYPTION_POLICY -#define F2FS_IOC_GET_ENCRYPTION_POLICY FS_IOC_GET_ENCRYPTION_POLICY -#define F2FS_IOC_GET_ENCRYPTION_PWSALT FS_IOC_GET_ENCRYPTION_PWSALT +#define F2FS_IOC_SEC_TRIM_FILE _IOW(F2FS_IOCTL_MAGIC, 20, \ + struct f2fs_sectrim_range) /* * should be same as XFS_IOC_GOINGDOWN. @@ -453,17 +444,12 @@ static inline bool __has_cursum_space(struct f2fs_journal *journal, #define F2FS_GOING_DOWN_METAFLUSH 0x3 /* going down with meta flush */ #define F2FS_GOING_DOWN_NEED_FSCK 0x4 /* going down to trigger fsck */ -#if defined(__KERNEL__) && defined(CONFIG_COMPAT) /* - * ioctl commands in 32 bit emulation + * Flags used by F2FS_IOC_SEC_TRIM_FILE */ -#define F2FS_IOC32_GETFLAGS FS_IOC32_GETFLAGS -#define F2FS_IOC32_SETFLAGS FS_IOC32_SETFLAGS -#define F2FS_IOC32_GETVERSION FS_IOC32_GETVERSION -#endif - -#define F2FS_IOC_FSGETXATTR FS_IOC_FSGETXATTR -#define F2FS_IOC_FSSETXATTR FS_IOC_FSSETXATTR +#define F2FS_TRIM_FILE_DISCARD 0x1 /* send discard command */ +#define F2FS_TRIM_FILE_ZEROOUT 0x2 /* zero out */ +#define F2FS_TRIM_FILE_MASK 0x3 struct f2fs_gc_range { u32 sync; @@ -488,6 +474,12 @@ struct f2fs_flush_device { u32 segments; /* # of segments to flush */ }; +struct f2fs_sectrim_range { + u64 start; + u64 len; + u64 flags; +}; + /* for inline stuff */ #define DEF_INLINE_RESERVED_SIZE 1 static inline int get_extra_isize(struct inode *inode); @@ -794,6 +786,7 @@ struct f2fs_inode_info { struct list_head inmem_pages; /* inmemory pages managed by f2fs */ struct task_struct *inmem_task; /* store inmemory task */ struct mutex inmem_lock; /* lock for inmemory pages */ + pgoff_t ra_offset; /* ongoing readahead offset */ struct extent_tree *extent_tree; /* cached extent_tree entry */ /* avoid racing between foreground op and gc */ @@ -1267,7 +1260,8 @@ enum { GC_NORMAL, GC_IDLE_CB, GC_IDLE_GREEDY, - GC_URGENT, + GC_URGENT_HIGH, + GC_URGENT_LOW, }; enum { @@ -1313,6 +1307,14 @@ enum fsync_mode { #define IS_DUMMY_WRITTEN_PAGE(page) \ (page_private(page) == (unsigned long)DUMMY_WRITTEN_PAGE) +#ifdef CONFIG_F2FS_IO_TRACE +#define IS_IO_TRACED_PAGE(page) \ + (page_private(page) > 0 && \ + page_private(page) < (unsigned long)PID_MAX_LIMIT) +#else +#define IS_IO_TRACED_PAGE(page) (0) +#endif + #ifdef CONFIG_FS_ENCRYPTION #define DUMMY_ENCRYPTION_ENABLED(sbi) \ (unlikely(F2FS_OPTION(sbi).dummy_enc_ctx.ctx != NULL)) @@ -1438,7 +1440,7 @@ struct f2fs_sb_info { unsigned long last_time[MAX_TIME]; /* to store time in jiffies */ long interval_time[MAX_TIME]; /* to store thresholds */ - struct inode_management im[MAX_INO_ENTRY]; /* manage inode cache */ + struct inode_management im[MAX_INO_ENTRY]; /* manage inode cache */ spinlock_t fsync_node_lock; /* for node entry lock */ struct list_head fsync_node_list; /* node list head */ @@ -1516,8 +1518,9 @@ struct f2fs_sb_info { unsigned int cur_victim_sec; /* current victim section num */ unsigned int gc_mode; /* current GC state */ unsigned int next_victim_seg[2]; /* next segment in victim section */ + /* for skip statistic */ - unsigned int atomic_files; /* # of opened atomic file */ + unsigned int atomic_files; /* # of opened atomic file */ unsigned long long skipped_atomic_files[2]; /* FG_GC and BG_GC */ unsigned long long skipped_gc_rwsem; /* FG_GC only */ @@ -2456,7 +2459,7 @@ static inline void *f2fs_kmem_cache_alloc(struct kmem_cache *cachep, static inline bool is_idle(struct f2fs_sb_info *sbi, int type) { - if (sbi->gc_mode == GC_URGENT) + if (sbi->gc_mode == GC_URGENT_HIGH) return true; if (get_pages(sbi, F2FS_RD_DATA) || get_pages(sbi, F2FS_RD_NODE) || @@ -2474,6 +2477,10 @@ static inline bool is_idle(struct f2fs_sb_info *sbi, int type) atomic_read(&SM_I(sbi)->fcc_info->queued_flush)) return false; + if (sbi->gc_mode == GC_URGENT_LOW && + (type == DISCARD_TIME || type == GC_TIME)) + return true; + return f2fs_time_over(sbi, type); } @@ -2639,7 +2646,7 @@ static inline void __mark_inode_dirty_flag(struct inode *inode, case FI_NEW_INODE: if (set) return; - /* fall through */ + fallthrough; case FI_DATA_EXIST: case FI_INLINE_DOTS: case FI_PIN_FILE: @@ -2649,7 +2656,7 @@ static inline void __mark_inode_dirty_flag(struct inode *inode, static inline void set_inode_flag(struct inode *inode, int flag) { - test_and_set_bit(flag, F2FS_I(inode)->flags); + set_bit(flag, F2FS_I(inode)->flags); __mark_inode_dirty_flag(inode, flag, true); } @@ -2660,7 +2667,7 @@ static inline int is_inode_flag_set(struct inode *inode, int flag) static inline void clear_inode_flag(struct inode *inode, int flag) { - test_and_clear_bit(flag, F2FS_I(inode)->flags); + clear_bit(flag, F2FS_I(inode)->flags); __mark_inode_dirty_flag(inode, flag, false); } @@ -3275,7 +3282,7 @@ void f2fs_ra_node_page(struct f2fs_sb_info *sbi, nid_t nid); struct page *f2fs_get_node_page(struct f2fs_sb_info *sbi, pgoff_t nid); struct page *f2fs_get_node_page_ra(struct page *parent, int start); int f2fs_move_node_page(struct page *node_page, int gc_type); -int f2fs_flush_inline_data(struct f2fs_sb_info *sbi); +void f2fs_flush_inline_data(struct f2fs_sb_info *sbi); int f2fs_fsync_node_pages(struct f2fs_sb_info *sbi, struct inode *inode, struct writeback_control *wbc, bool atomic, unsigned int *seq_id); @@ -3287,7 +3294,7 @@ bool f2fs_alloc_nid(struct f2fs_sb_info *sbi, nid_t *nid); void f2fs_alloc_nid_done(struct f2fs_sb_info *sbi, nid_t nid); void f2fs_alloc_nid_failed(struct f2fs_sb_info *sbi, nid_t nid); int f2fs_try_to_free_nids(struct f2fs_sb_info *sbi, int nr_shrink); -void f2fs_recover_inline_xattr(struct inode *inode, struct page *page); +int f2fs_recover_inline_xattr(struct inode *inode, struct page *page); int f2fs_recover_xattr_data(struct inode *inode, struct page *page); int f2fs_recover_inode_page(struct f2fs_sb_info *sbi, struct page *page); int f2fs_restore_node_summary(struct f2fs_sb_info *sbi, @@ -3325,9 +3332,10 @@ block_t f2fs_get_unusable_blocks(struct f2fs_sb_info *sbi); int f2fs_disable_cp_again(struct f2fs_sb_info *sbi, block_t unusable); void f2fs_release_discard_addrs(struct f2fs_sb_info *sbi); int f2fs_npages_for_summary_flush(struct f2fs_sb_info *sbi, bool for_ra); -void allocate_segment_for_resize(struct f2fs_sb_info *sbi, int type, +void f2fs_allocate_segment_for_resize(struct f2fs_sb_info *sbi, int type, unsigned int start, unsigned int end); -void f2fs_allocate_new_segments(struct f2fs_sb_info *sbi, int type); +void f2fs_allocate_new_segment(struct f2fs_sb_info *sbi, int type); +void f2fs_allocate_new_segments(struct f2fs_sb_info *sbi); int f2fs_trim_fs(struct f2fs_sb_info *sbi, struct fstrim_range *range); bool f2fs_exist_trim_candidates(struct f2fs_sb_info *sbi, struct cp_control *cpc); @@ -3350,7 +3358,7 @@ void f2fs_replace_block(struct f2fs_sb_info *sbi, struct dnode_of_data *dn, void f2fs_allocate_data_block(struct f2fs_sb_info *sbi, struct page *page, block_t old_blkaddr, block_t *new_blkaddr, struct f2fs_summary *sum, int type, - struct f2fs_io_info *fio, bool add_list); + struct f2fs_io_info *fio); void f2fs_wait_on_page_writeback(struct page *page, enum page_type type, bool ordered, bool locked); void f2fs_wait_on_block_writeback(struct inode *inode, block_t blkaddr); @@ -3448,7 +3456,7 @@ struct page *f2fs_get_lock_data_page(struct inode *inode, pgoff_t index, struct page *f2fs_get_new_data_page(struct inode *inode, struct page *ipage, pgoff_t index, bool new_i_size); int f2fs_do_write_data_page(struct f2fs_io_info *fio); -void __do_map_lock(struct f2fs_sb_info *sbi, int flag, bool lock); +void f2fs_do_map_lock(struct f2fs_sb_info *sbi, int flag, bool lock); int f2fs_map_blocks(struct inode *inode, struct f2fs_map_blocks *map, int create, int flag); int f2fs_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo, @@ -3536,6 +3544,9 @@ struct f2fs_stat_info { int curseg[NR_CURSEG_TYPE]; int cursec[NR_CURSEG_TYPE]; int curzone[NR_CURSEG_TYPE]; + unsigned int dirty_seg[NR_CURSEG_TYPE]; + unsigned int full_seg[NR_CURSEG_TYPE]; + unsigned int valid_blks[NR_CURSEG_TYPE]; unsigned int meta_count[META_MAX]; unsigned int segment_count[2]; @@ -3750,7 +3761,7 @@ int f2fs_convert_inline_page(struct dnode_of_data *dn, struct page *page); int f2fs_convert_inline_inode(struct inode *inode); int f2fs_try_convert_inline_dir(struct inode *dir, struct dentry *dentry); int f2fs_write_inline_data(struct inode *inode, struct page *page); -bool f2fs_recover_inline_data(struct inode *inode, struct page *npage); +int f2fs_recover_inline_data(struct inode *inode, struct page *npage); struct f2fs_dir_entry *f2fs_find_in_inline_dir(struct inode *dir, const struct f2fs_filename *fname, struct page **res_page); @@ -3795,7 +3806,7 @@ struct rb_entry *f2fs_lookup_rb_tree_ret(struct rb_root_cached *root, bool f2fs_check_rb_tree_consistence(struct f2fs_sb_info *sbi, struct rb_root_cached *root); unsigned int f2fs_shrink_extent_tree(struct f2fs_sb_info *sbi, int nr_shrink); -bool f2fs_init_extent_tree(struct inode *inode, struct f2fs_extent *i_ext); +void f2fs_init_extent_tree(struct inode *inode, struct page *ipage); void f2fs_drop_extent_tree(struct inode *inode); unsigned int f2fs_destroy_extent_node(struct inode *inode); void f2fs_destroy_extent_tree(struct inode *inode); diff --git a/fs/f2fs/file.c b/fs/f2fs/file.c index 3268f8dd59bb..8a422400e824 100644 --- a/fs/f2fs/file.c +++ b/fs/f2fs/file.c @@ -21,6 +21,7 @@ #include <linux/uuid.h> #include <linux/file.h> #include <linux/nls.h> +#include <linux/sched/signal.h> #include "f2fs.h" #include "node.h" @@ -105,11 +106,11 @@ static vm_fault_t f2fs_vm_page_mkwrite(struct vm_fault *vmf) if (need_alloc) { /* block allocation */ - __do_map_lock(sbi, F2FS_GET_BLOCK_PRE_AIO, true); + f2fs_do_map_lock(sbi, F2FS_GET_BLOCK_PRE_AIO, true); set_new_dnode(&dn, inode, NULL, NULL, 0); err = f2fs_get_block(&dn, page->index); f2fs_put_dnode(&dn); - __do_map_lock(sbi, F2FS_GET_BLOCK_PRE_AIO, false); + f2fs_do_map_lock(sbi, F2FS_GET_BLOCK_PRE_AIO, false); } #ifdef CONFIG_F2FS_FS_COMPRESSION @@ -1373,8 +1374,6 @@ static int f2fs_collapse_range(struct inode *inode, loff_t offset, loff_t len) truncate_pagecache(inode, offset); new_size = i_size_read(inode) - len; - truncate_pagecache(inode, new_size); - ret = f2fs_truncate_blocks(inode, new_size, true); up_write(&F2FS_I(inode)->i_mmap_sem); if (!ret) @@ -1660,7 +1659,7 @@ next_alloc: map.m_seg_type = CURSEG_COLD_DATA_PINNED; f2fs_lock_op(sbi); - f2fs_allocate_new_segments(sbi, CURSEG_COLD_DATA); + f2fs_allocate_new_segment(sbi, CURSEG_COLD_DATA); f2fs_unlock_op(sbi); err = f2fs_map_blocks(inode, &map, 1, F2FS_GET_BLOCK_PRE_DIO); @@ -2527,6 +2526,11 @@ do_more: } ret = f2fs_gc(sbi, range.sync, true, GET_SEGNO(sbi, range.start)); + if (ret) { + if (ret == -EBUSY) + ret = -EAGAIN; + goto out; + } range.start += BLKS_PER_SEC(sbi); if (range.start <= end) goto do_more; @@ -3359,7 +3363,7 @@ static int f2fs_ioc_measure_verity(struct file *filp, unsigned long arg) return fsverity_ioctl_measure(filp, (void __user *)arg); } -static int f2fs_get_volume_name(struct file *filp, unsigned long arg) +static int f2fs_ioc_getfslabel(struct file *filp, unsigned long arg) { struct inode *inode = file_inode(filp); struct f2fs_sb_info *sbi = F2FS_I_SB(inode); @@ -3385,7 +3389,7 @@ static int f2fs_get_volume_name(struct file *filp, unsigned long arg) return err; } -static int f2fs_set_volume_name(struct file *filp, unsigned long arg) +static int f2fs_ioc_setfslabel(struct file *filp, unsigned long arg) { struct inode *inode = file_inode(filp); struct f2fs_sb_info *sbi = F2FS_I_SB(inode); @@ -3531,14 +3535,14 @@ static int f2fs_release_compress_blocks(struct file *filp, unsigned long arg) if (ret) goto out; - if (!F2FS_I(inode)->i_compr_blocks) - goto out; - F2FS_I(inode)->i_flags |= F2FS_IMMUTABLE_FL; f2fs_set_inode_flags(inode); inode->i_ctime = current_time(inode); f2fs_mark_inode_dirty_sync(inode, true); + if (!F2FS_I(inode)->i_compr_blocks) + goto out; + down_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]); down_write(&F2FS_I(inode)->i_mmap_sem); @@ -3756,6 +3760,193 @@ out: return ret; } +static int f2fs_secure_erase(struct block_device *bdev, struct inode *inode, + pgoff_t off, block_t block, block_t len, u32 flags) +{ + struct request_queue *q = bdev_get_queue(bdev); + sector_t sector = SECTOR_FROM_BLOCK(block); + sector_t nr_sects = SECTOR_FROM_BLOCK(len); + int ret = 0; + + if (!q) + return -ENXIO; + + if (flags & F2FS_TRIM_FILE_DISCARD) + ret = blkdev_issue_discard(bdev, sector, nr_sects, GFP_NOFS, + blk_queue_secure_erase(q) ? + BLKDEV_DISCARD_SECURE : 0); + + if (!ret && (flags & F2FS_TRIM_FILE_ZEROOUT)) { + if (IS_ENCRYPTED(inode)) + ret = fscrypt_zeroout_range(inode, off, block, len); + else + ret = blkdev_issue_zeroout(bdev, sector, nr_sects, + GFP_NOFS, 0); + } + + return ret; +} + +static int f2fs_sec_trim_file(struct file *filp, unsigned long arg) +{ + struct inode *inode = file_inode(filp); + struct f2fs_sb_info *sbi = F2FS_I_SB(inode); + struct address_space *mapping = inode->i_mapping; + struct block_device *prev_bdev = NULL; + struct f2fs_sectrim_range range; + pgoff_t index, pg_end, prev_index = 0; + block_t prev_block = 0, len = 0; + loff_t end_addr; + bool to_end = false; + int ret = 0; + + if (!(filp->f_mode & FMODE_WRITE)) + return -EBADF; + + if (copy_from_user(&range, (struct f2fs_sectrim_range __user *)arg, + sizeof(range))) + return -EFAULT; + + if (range.flags == 0 || (range.flags & ~F2FS_TRIM_FILE_MASK) || + !S_ISREG(inode->i_mode)) + return -EINVAL; + + if (((range.flags & F2FS_TRIM_FILE_DISCARD) && + !f2fs_hw_support_discard(sbi)) || + ((range.flags & F2FS_TRIM_FILE_ZEROOUT) && + IS_ENCRYPTED(inode) && f2fs_is_multi_device(sbi))) + return -EOPNOTSUPP; + + file_start_write(filp); + inode_lock(inode); + + if (f2fs_is_atomic_file(inode) || f2fs_compressed_file(inode) || + range.start >= inode->i_size) { + ret = -EINVAL; + goto err; + } + + if (range.len == 0) + goto err; + + if (inode->i_size - range.start > range.len) { + end_addr = range.start + range.len; + } else { + end_addr = range.len == (u64)-1 ? + sbi->sb->s_maxbytes : inode->i_size; + to_end = true; + } + + if (!IS_ALIGNED(range.start, F2FS_BLKSIZE) || + (!to_end && !IS_ALIGNED(end_addr, F2FS_BLKSIZE))) { + ret = -EINVAL; + goto err; + } + + index = F2FS_BYTES_TO_BLK(range.start); + pg_end = DIV_ROUND_UP(end_addr, F2FS_BLKSIZE); + + ret = f2fs_convert_inline_inode(inode); + if (ret) + goto err; + + down_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]); + down_write(&F2FS_I(inode)->i_mmap_sem); + + ret = filemap_write_and_wait_range(mapping, range.start, + to_end ? LLONG_MAX : end_addr - 1); + if (ret) + goto out; + + truncate_inode_pages_range(mapping, range.start, + to_end ? -1 : end_addr - 1); + + while (index < pg_end) { + struct dnode_of_data dn; + pgoff_t end_offset, count; + int i; + + set_new_dnode(&dn, inode, NULL, NULL, 0); + ret = f2fs_get_dnode_of_data(&dn, index, LOOKUP_NODE); + if (ret) { + if (ret == -ENOENT) { + index = f2fs_get_next_page_offset(&dn, index); + continue; + } + goto out; + } + + end_offset = ADDRS_PER_PAGE(dn.node_page, inode); + count = min(end_offset - dn.ofs_in_node, pg_end - index); + for (i = 0; i < count; i++, index++, dn.ofs_in_node++) { + struct block_device *cur_bdev; + block_t blkaddr = f2fs_data_blkaddr(&dn); + + if (!__is_valid_data_blkaddr(blkaddr)) + continue; + + if (!f2fs_is_valid_blkaddr(sbi, blkaddr, + DATA_GENERIC_ENHANCE)) { + ret = -EFSCORRUPTED; + f2fs_put_dnode(&dn); + goto out; + } + + cur_bdev = f2fs_target_device(sbi, blkaddr, NULL); + if (f2fs_is_multi_device(sbi)) { + int di = f2fs_target_device_index(sbi, blkaddr); + + blkaddr -= FDEV(di).start_blk; + } + + if (len) { + if (prev_bdev == cur_bdev && + index == prev_index + len && + blkaddr == prev_block + len) { + len++; + } else { + ret = f2fs_secure_erase(prev_bdev, + inode, prev_index, prev_block, + len, range.flags); + if (ret) { + f2fs_put_dnode(&dn); + goto out; + } + + len = 0; + } + } + + if (!len) { + prev_bdev = cur_bdev; + prev_index = index; + prev_block = blkaddr; + len = 1; + } + } + + f2fs_put_dnode(&dn); + + if (fatal_signal_pending(current)) { + ret = -EINTR; + goto out; + } + cond_resched(); + } + + if (len) + ret = f2fs_secure_erase(prev_bdev, inode, prev_index, + prev_block, len, range.flags); +out: + up_write(&F2FS_I(inode)->i_mmap_sem); + up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]); +err: + inode_unlock(inode); + file_end_write(filp); + + return ret; +} + long f2fs_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) { if (unlikely(f2fs_cp_error(F2FS_I_SB(file_inode(filp))))) @@ -3764,11 +3955,11 @@ long f2fs_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) return -ENOSPC; switch (cmd) { - case F2FS_IOC_GETFLAGS: + case FS_IOC_GETFLAGS: return f2fs_ioc_getflags(filp, arg); - case F2FS_IOC_SETFLAGS: + case FS_IOC_SETFLAGS: return f2fs_ioc_setflags(filp, arg); - case F2FS_IOC_GETVERSION: + case FS_IOC_GETVERSION: return f2fs_ioc_getversion(filp, arg); case F2FS_IOC_START_ATOMIC_WRITE: return f2fs_ioc_start_atomic_write(filp); @@ -3784,11 +3975,11 @@ long f2fs_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) return f2fs_ioc_shutdown(filp, arg); case FITRIM: return f2fs_ioc_fitrim(filp, arg); - case F2FS_IOC_SET_ENCRYPTION_POLICY: + case FS_IOC_SET_ENCRYPTION_POLICY: return f2fs_ioc_set_encryption_policy(filp, arg); - case F2FS_IOC_GET_ENCRYPTION_POLICY: + case FS_IOC_GET_ENCRYPTION_POLICY: return f2fs_ioc_get_encryption_policy(filp, arg); - case F2FS_IOC_GET_ENCRYPTION_PWSALT: + case FS_IOC_GET_ENCRYPTION_PWSALT: return f2fs_ioc_get_encryption_pwsalt(filp, arg); case FS_IOC_GET_ENCRYPTION_POLICY_EX: return f2fs_ioc_get_encryption_policy_ex(filp, arg); @@ -3816,9 +4007,9 @@ long f2fs_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) return f2fs_ioc_flush_device(filp, arg); case F2FS_IOC_GET_FEATURES: return f2fs_ioc_get_features(filp, arg); - case F2FS_IOC_FSGETXATTR: + case FS_IOC_FSGETXATTR: return f2fs_ioc_fsgetxattr(filp, arg); - case F2FS_IOC_FSSETXATTR: + case FS_IOC_FSSETXATTR: return f2fs_ioc_fssetxattr(filp, arg); case F2FS_IOC_GET_PIN_FILE: return f2fs_ioc_get_pin_file(filp, arg); @@ -3832,16 +4023,18 @@ long f2fs_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) return f2fs_ioc_enable_verity(filp, arg); case FS_IOC_MEASURE_VERITY: return f2fs_ioc_measure_verity(filp, arg); - case F2FS_IOC_GET_VOLUME_NAME: - return f2fs_get_volume_name(filp, arg); - case F2FS_IOC_SET_VOLUME_NAME: - return f2fs_set_volume_name(filp, arg); + case FS_IOC_GETFSLABEL: + return f2fs_ioc_getfslabel(filp, arg); + case FS_IOC_SETFSLABEL: + return f2fs_ioc_setfslabel(filp, arg); case F2FS_IOC_GET_COMPRESS_BLOCKS: return f2fs_get_compress_blocks(filp, arg); case F2FS_IOC_RELEASE_COMPRESS_BLOCKS: return f2fs_release_compress_blocks(filp, arg); case F2FS_IOC_RESERVE_COMPRESS_BLOCKS: return f2fs_reserve_compress_blocks(filp, arg); + case F2FS_IOC_SEC_TRIM_FILE: + return f2fs_sec_trim_file(filp, arg); default: return -ENOTTY; } @@ -3966,14 +4159,14 @@ out: long f2fs_compat_ioctl(struct file *file, unsigned int cmd, unsigned long arg) { switch (cmd) { - case F2FS_IOC32_GETFLAGS: - cmd = F2FS_IOC_GETFLAGS; + case FS_IOC32_GETFLAGS: + cmd = FS_IOC_GETFLAGS; break; - case F2FS_IOC32_SETFLAGS: - cmd = F2FS_IOC_SETFLAGS; + case FS_IOC32_SETFLAGS: + cmd = FS_IOC_SETFLAGS; break; - case F2FS_IOC32_GETVERSION: - cmd = F2FS_IOC_GETVERSION; + case FS_IOC32_GETVERSION: + cmd = FS_IOC_GETVERSION; break; case F2FS_IOC_START_ATOMIC_WRITE: case F2FS_IOC_COMMIT_ATOMIC_WRITE: @@ -3982,9 +4175,9 @@ long f2fs_compat_ioctl(struct file *file, unsigned int cmd, unsigned long arg) case F2FS_IOC_ABORT_VOLATILE_WRITE: case F2FS_IOC_SHUTDOWN: case FITRIM: - case F2FS_IOC_SET_ENCRYPTION_POLICY: - case F2FS_IOC_GET_ENCRYPTION_PWSALT: - case F2FS_IOC_GET_ENCRYPTION_POLICY: + case FS_IOC_SET_ENCRYPTION_POLICY: + case FS_IOC_GET_ENCRYPTION_PWSALT: + case FS_IOC_GET_ENCRYPTION_POLICY: case FS_IOC_GET_ENCRYPTION_POLICY_EX: case FS_IOC_ADD_ENCRYPTION_KEY: case FS_IOC_REMOVE_ENCRYPTION_KEY: @@ -3998,19 +4191,20 @@ long f2fs_compat_ioctl(struct file *file, unsigned int cmd, unsigned long arg) case F2FS_IOC_MOVE_RANGE: case F2FS_IOC_FLUSH_DEVICE: case F2FS_IOC_GET_FEATURES: - case F2FS_IOC_FSGETXATTR: - case F2FS_IOC_FSSETXATTR: + case FS_IOC_FSGETXATTR: + case FS_IOC_FSSETXATTR: case F2FS_IOC_GET_PIN_FILE: case F2FS_IOC_SET_PIN_FILE: case F2FS_IOC_PRECACHE_EXTENTS: case F2FS_IOC_RESIZE_FS: case FS_IOC_ENABLE_VERITY: case FS_IOC_MEASURE_VERITY: - case F2FS_IOC_GET_VOLUME_NAME: - case F2FS_IOC_SET_VOLUME_NAME: + case FS_IOC_GETFSLABEL: + case FS_IOC_SETFSLABEL: case F2FS_IOC_GET_COMPRESS_BLOCKS: case F2FS_IOC_RELEASE_COMPRESS_BLOCKS: case F2FS_IOC_RESERVE_COMPRESS_BLOCKS: + case F2FS_IOC_SEC_TRIM_FILE: break; default: return -ENOIOCTLCMD; diff --git a/fs/f2fs/gc.c b/fs/f2fs/gc.c index 5b95d5a146eb..11b4adde9baf 100644 --- a/fs/f2fs/gc.c +++ b/fs/f2fs/gc.c @@ -21,6 +21,9 @@ #include "gc.h" #include <trace/events/f2fs.h> +static unsigned int count_bits(const unsigned long *addr, + unsigned int offset, unsigned int len); + static int gc_thread_func(void *data) { struct f2fs_sb_info *sbi = data; @@ -79,7 +82,7 @@ static int gc_thread_func(void *data) * invalidated soon after by user update or deletion. * So, I'd like to wait some time to collect dirty segments. */ - if (sbi->gc_mode == GC_URGENT) { + if (sbi->gc_mode == GC_URGENT_HIGH) { wait_ms = gc_th->urgent_sleep_time; down_write(&sbi->gc_lock); goto do_gc; @@ -173,7 +176,7 @@ static int select_gc_type(struct f2fs_sb_info *sbi, int gc_type) gc_mode = GC_CB; break; case GC_IDLE_GREEDY: - case GC_URGENT: + case GC_URGENT_HIGH: gc_mode = GC_GREEDY; break; } @@ -187,14 +190,20 @@ static void select_policy(struct f2fs_sb_info *sbi, int gc_type, if (p->alloc_mode == SSR) { p->gc_mode = GC_GREEDY; - p->dirty_segmap = dirty_i->dirty_segmap[type]; + p->dirty_bitmap = dirty_i->dirty_segmap[type]; p->max_search = dirty_i->nr_dirty[type]; p->ofs_unit = 1; } else { p->gc_mode = select_gc_type(sbi, gc_type); - p->dirty_segmap = dirty_i->dirty_segmap[DIRTY]; - p->max_search = dirty_i->nr_dirty[DIRTY]; p->ofs_unit = sbi->segs_per_sec; + if (__is_large_section(sbi)) { + p->dirty_bitmap = dirty_i->dirty_secmap; + p->max_search = count_bits(p->dirty_bitmap, + 0, MAIN_SECS(sbi)); + } else { + p->dirty_bitmap = dirty_i->dirty_segmap[DIRTY]; + p->max_search = dirty_i->nr_dirty[DIRTY]; + } } /* @@ -202,7 +211,7 @@ static void select_policy(struct f2fs_sb_info *sbi, int gc_type, * foreground GC and urgent GC cases. */ if (gc_type != FG_GC && - (sbi->gc_mode != GC_URGENT) && + (sbi->gc_mode != GC_URGENT_HIGH) && p->max_search > sbi->max_victim_search) p->max_search = sbi->max_victim_search; @@ -321,6 +330,7 @@ static int get_victim_by_default(struct f2fs_sb_info *sbi, unsigned int secno, last_victim; unsigned int last_segment; unsigned int nsearched = 0; + int ret = 0; mutex_lock(&dirty_i->seglist_lock); last_segment = MAIN_SECS(sbi) * sbi->segs_per_sec; @@ -332,12 +342,19 @@ static int get_victim_by_default(struct f2fs_sb_info *sbi, p.min_cost = get_max_cost(sbi, &p); if (*result != NULL_SEGNO) { - if (get_valid_blocks(sbi, *result, false) && - !sec_usage_check(sbi, GET_SEC_FROM_SEG(sbi, *result))) + if (!get_valid_blocks(sbi, *result, false)) { + ret = -ENODATA; + goto out; + } + + if (sec_usage_check(sbi, GET_SEC_FROM_SEG(sbi, *result))) + ret = -EBUSY; + else p.min_segno = *result; goto out; } + ret = -ENODATA; if (p.max_search == 0) goto out; @@ -365,10 +382,14 @@ static int get_victim_by_default(struct f2fs_sb_info *sbi, } while (1) { - unsigned long cost; - unsigned int segno; - - segno = find_next_bit(p.dirty_segmap, last_segment, p.offset); + unsigned long cost, *dirty_bitmap; + unsigned int unit_no, segno; + + dirty_bitmap = p.dirty_bitmap; + unit_no = find_next_bit(dirty_bitmap, + last_segment / p.ofs_unit, + p.offset / p.ofs_unit); + segno = unit_no * p.ofs_unit; if (segno >= last_segment) { if (sm->last_victim[p.gc_mode]) { last_segment = @@ -381,14 +402,7 @@ static int get_victim_by_default(struct f2fs_sb_info *sbi, } p.offset = segno + p.ofs_unit; - if (p.ofs_unit > 1) { - p.offset -= segno % p.ofs_unit; - nsearched += count_bits(p.dirty_segmap, - p.offset - p.ofs_unit, - p.ofs_unit); - } else { - nsearched++; - } + nsearched++; #ifdef CONFIG_F2FS_CHECK_FS /* @@ -421,9 +435,10 @@ static int get_victim_by_default(struct f2fs_sb_info *sbi, next: if (nsearched >= p.max_search) { if (!sm->last_victim[p.gc_mode] && segno <= last_victim) - sm->last_victim[p.gc_mode] = last_victim + 1; + sm->last_victim[p.gc_mode] = + last_victim + p.ofs_unit; else - sm->last_victim[p.gc_mode] = segno + 1; + sm->last_victim[p.gc_mode] = segno + p.ofs_unit; sm->last_victim[p.gc_mode] %= (MAIN_SECS(sbi) * sbi->segs_per_sec); break; @@ -440,6 +455,7 @@ got_result: else set_bit(secno, dirty_i->victim_secmap); } + ret = 0; } out: @@ -449,7 +465,7 @@ out: prefree_segments(sbi), free_segments(sbi)); mutex_unlock(&dirty_i->seglist_lock); - return (p.min_segno == NULL_SEGNO) ? 0 : 1; + return ret; } static const struct victim_selection default_v_ops = { @@ -833,8 +849,10 @@ static int move_data_block(struct inode *inode, block_t bidx, mpage = f2fs_grab_cache_page(META_MAPPING(fio.sbi), fio.old_blkaddr, false); - if (!mpage) + if (!mpage) { + err = -ENOMEM; goto up_out; + } fio.encrypted_page = mpage; @@ -859,7 +877,7 @@ static int move_data_block(struct inode *inode, block_t bidx, } f2fs_allocate_data_block(fio.sbi, NULL, fio.old_blkaddr, &newaddr, - &sum, CURSEG_COLD_DATA, NULL, false); + &sum, CURSEG_COLD_DATA, NULL); fio.encrypted_page = f2fs_pagecache_get_page(META_MAPPING(fio.sbi), newaddr, FGP_LOCK | FGP_CREAT, GFP_NOFS); @@ -1333,10 +1351,9 @@ gc_more: ret = -EINVAL; goto stop; } - if (!__get_victim(sbi, &segno, gc_type)) { - ret = -ENODATA; + ret = __get_victim(sbi, &segno, gc_type); + if (ret) goto stop; - } seg_freed = do_garbage_collect(sbi, segno, &gc_list, gc_type); if (gc_type == FG_GC && seg_freed == sbi->segs_per_sec) @@ -1434,7 +1451,7 @@ static int free_segment_range(struct f2fs_sb_info *sbi, /* Move out cursegs from the target range */ for (type = CURSEG_HOT_DATA; type < NR_CURSEG_TYPE; type++) - allocate_segment_for_resize(sbi, type, start, end); + f2fs_allocate_segment_for_resize(sbi, type, start, end); /* do GC to move out valid blocks in the range */ for (segno = start; segno <= end; segno += sbi->segs_per_sec) { diff --git a/fs/f2fs/inline.c b/fs/f2fs/inline.c index dbade310dc79..102df444f623 100644 --- a/fs/f2fs/inline.c +++ b/fs/f2fs/inline.c @@ -12,6 +12,7 @@ #include "f2fs.h" #include "node.h" +#include <trace/events/f2fs.h> bool f2fs_may_inline_data(struct inode *inode) { @@ -253,7 +254,7 @@ int f2fs_write_inline_data(struct inode *inode, struct page *page) return 0; } -bool f2fs_recover_inline_data(struct inode *inode, struct page *npage) +int f2fs_recover_inline_data(struct inode *inode, struct page *npage) { struct f2fs_sb_info *sbi = F2FS_I_SB(inode); struct f2fs_inode *ri = NULL; @@ -275,7 +276,8 @@ bool f2fs_recover_inline_data(struct inode *inode, struct page *npage) ri && (ri->i_inline & F2FS_INLINE_DATA)) { process_inline: ipage = f2fs_get_node_page(sbi, inode->i_ino); - f2fs_bug_on(sbi, IS_ERR(ipage)); + if (IS_ERR(ipage)) + return PTR_ERR(ipage); f2fs_wait_on_page_writeback(ipage, NODE, true, true); @@ -288,21 +290,25 @@ process_inline: set_page_dirty(ipage); f2fs_put_page(ipage, 1); - return true; + return 1; } if (f2fs_has_inline_data(inode)) { ipage = f2fs_get_node_page(sbi, inode->i_ino); - f2fs_bug_on(sbi, IS_ERR(ipage)); + if (IS_ERR(ipage)) + return PTR_ERR(ipage); f2fs_truncate_inline_inode(inode, ipage, 0); clear_inode_flag(inode, FI_INLINE_DATA); f2fs_put_page(ipage, 1); } else if (ri && (ri->i_inline & F2FS_INLINE_DATA)) { - if (f2fs_truncate_blocks(inode, 0, false)) - return false; + int ret; + + ret = f2fs_truncate_blocks(inode, 0, false); + if (ret) + return ret; goto process_inline; } - return false; + return 0; } struct f2fs_dir_entry *f2fs_find_in_inline_dir(struct inode *dir, @@ -776,6 +782,7 @@ int f2fs_inline_data_fiemap(struct inode *inode, byteaddr += (char *)inline_data_addr(inode, ipage) - (char *)F2FS_INODE(ipage); err = fiemap_fill_next_extent(fieinfo, start, byteaddr, ilen, flags); + trace_f2fs_fiemap(inode, start, byteaddr, ilen, flags, err); out: f2fs_put_page(ipage, 1); return err; diff --git a/fs/f2fs/inode.c b/fs/f2fs/inode.c index 44582a4db513..66969ae852b9 100644 --- a/fs/f2fs/inode.c +++ b/fs/f2fs/inode.c @@ -367,8 +367,7 @@ static int do_read_inode(struct inode *inode) fi->i_pino = le32_to_cpu(ri->i_pino); fi->i_dir_level = ri->i_dir_level; - if (f2fs_init_extent_tree(inode, &ri->i_ext)) - set_page_dirty(node_page); + f2fs_init_extent_tree(inode, node_page); get_inline_info(inode, ri); @@ -402,6 +401,7 @@ static int do_read_inode(struct inode *inode) /* try to recover cold bit for non-dir inode */ if (!S_ISDIR(inode->i_mode) && !is_cold_node(node_page)) { + f2fs_wait_on_page_writeback(node_page, NODE, true, true); set_cold_node(node_page, false); set_page_dirty(node_page); } diff --git a/fs/f2fs/namei.c b/fs/f2fs/namei.c index e94e02c6580a..84e4bbc1a64d 100644 --- a/fs/f2fs/namei.c +++ b/fs/f2fs/namei.c @@ -569,15 +569,17 @@ static int f2fs_unlink(struct inode *dir, struct dentry *dentry) trace_f2fs_unlink_enter(dir, dentry); - if (unlikely(f2fs_cp_error(sbi))) - return -EIO; + if (unlikely(f2fs_cp_error(sbi))) { + err = -EIO; + goto fail; + } err = dquot_initialize(dir); if (err) - return err; + goto fail; err = dquot_initialize(inode); if (err) - return err; + goto fail; de = f2fs_find_entry(dir, &dentry->d_name, &page); if (!de) { @@ -600,7 +602,7 @@ static int f2fs_unlink(struct inode *dir, struct dentry *dentry) /* VFS negative dentries are incompatible with Encoding and * Case-insensitiveness. Eventually we'll want avoid * invalidating the dentries here, alongside with returning the - * negative dentries at f2fs_lookup(), when it is better + * negative dentries at f2fs_lookup(), when it is better * supported by the VFS for the CI case. */ if (IS_CASEFOLDED(dir)) @@ -1285,7 +1287,7 @@ static const char *f2fs_encrypted_get_link(struct dentry *dentry, } const struct inode_operations f2fs_encrypted_symlink_inode_operations = { - .get_link = f2fs_encrypted_get_link, + .get_link = f2fs_encrypted_get_link, .getattr = f2fs_getattr, .setattr = f2fs_setattr, .listxattr = f2fs_listxattr, @@ -1311,7 +1313,7 @@ const struct inode_operations f2fs_dir_inode_operations = { }; const struct inode_operations f2fs_symlink_inode_operations = { - .get_link = f2fs_get_link, + .get_link = f2fs_get_link, .getattr = f2fs_getattr, .setattr = f2fs_setattr, .listxattr = f2fs_listxattr, @@ -1319,7 +1321,7 @@ const struct inode_operations f2fs_symlink_inode_operations = { const struct inode_operations f2fs_special_inode_operations = { .getattr = f2fs_getattr, - .setattr = f2fs_setattr, + .setattr = f2fs_setattr, .get_acl = f2fs_get_acl, .set_acl = f2fs_set_acl, .listxattr = f2fs_listxattr, diff --git a/fs/f2fs/node.c b/fs/f2fs/node.c index 03e24df1c84f..cb1b5b61a1da 100644 --- a/fs/f2fs/node.c +++ b/fs/f2fs/node.c @@ -618,10 +618,10 @@ pgoff_t f2fs_get_next_page_offset(struct dnode_of_data *dn, pgoff_t pgofs) switch (dn->max_level) { case 3: base += 2 * indirect_blks; - /* fall through */ + fallthrough; case 2: base += 2 * direct_blks; - /* fall through */ + fallthrough; case 1: base += direct_index; break; @@ -1041,8 +1041,10 @@ int f2fs_truncate_inode_blocks(struct inode *inode, pgoff_t from) trace_f2fs_truncate_inode_blocks_enter(inode, from); level = get_node_path(inode, from, offset, noffset); - if (level < 0) + if (level < 0) { + trace_f2fs_truncate_inode_blocks_exit(inode, level); return level; + } page = f2fs_get_node_page(sbi, inode->i_ino); if (IS_ERR(page)) { @@ -1726,7 +1728,7 @@ continue_unlock: set_dentry_mark(page, f2fs_need_dentry_mark(sbi, ino)); } - /* may be written by other thread */ + /* may be written by other thread */ if (!PageDirty(page)) set_page_dirty(page); } @@ -1814,12 +1816,11 @@ static bool flush_dirty_inode(struct page *page) return true; } -int f2fs_flush_inline_data(struct f2fs_sb_info *sbi) +void f2fs_flush_inline_data(struct f2fs_sb_info *sbi) { pgoff_t index = 0; struct pagevec pvec; int nr_pages; - int ret = 0; pagevec_init(&pvec); @@ -1858,7 +1859,6 @@ continue_unlock: pagevec_release(&pvec); cond_resched(); } - return ret; } int f2fs_sync_node_pages(struct f2fs_sb_info *sbi, @@ -1924,8 +1924,12 @@ continue_unlock: goto continue_unlock; } - /* flush inline_data, if it's async context. */ - if (do_balance && is_inline_node(page)) { + /* flush inline_data/inode, if it's async context. */ + if (!do_balance) + goto write_node; + + /* flush inline_data */ + if (is_inline_node(page)) { clear_inline_node(page); unlock_page(page); flush_inline_data(sbi, ino_of_node(page)); @@ -1938,7 +1942,7 @@ continue_unlock: if (flush_dirty_inode(page)) goto lock_node; } - +write_node: f2fs_wait_on_page_writeback(page, NODE, true, true); if (!clear_page_dirty_for_io(page)) @@ -2097,7 +2101,7 @@ const struct address_space_operations f2fs_node_aops = { .invalidatepage = f2fs_invalidate_page, .releasepage = f2fs_release_page, #ifdef CONFIG_MIGRATION - .migratepage = f2fs_migrate_page, + .migratepage = f2fs_migrate_page, #endif }; @@ -2108,7 +2112,7 @@ static struct free_nid *__lookup_free_nid_list(struct f2fs_nm_info *nm_i, } static int __insert_free_nid(struct f2fs_sb_info *sbi, - struct free_nid *i, enum nid_state state) + struct free_nid *i) { struct f2fs_nm_info *nm_i = NM_I(sbi); @@ -2116,10 +2120,8 @@ static int __insert_free_nid(struct f2fs_sb_info *sbi, if (err) return err; - f2fs_bug_on(sbi, state != i->state); - nm_i->nid_cnt[state]++; - if (state == FREE_NID) - list_add_tail(&i->list, &nm_i->free_nid_list); + nm_i->nid_cnt[FREE_NID]++; + list_add_tail(&i->list, &nm_i->free_nid_list); return 0; } @@ -2241,7 +2243,7 @@ static bool add_free_nid(struct f2fs_sb_info *sbi, } } ret = true; - err = __insert_free_nid(sbi, i, FREE_NID); + err = __insert_free_nid(sbi, i); err_out: if (update) { update_free_nid_bitmap(sbi, nid, ret, build); @@ -2371,6 +2373,9 @@ static int __f2fs_build_free_nids(struct f2fs_sb_info *sbi, if (unlikely(nid >= nm_i->max_nid)) nid = 0; + if (unlikely(nid % NAT_ENTRY_PER_BLOCK)) + nid = NAT_BLOCK_OFFSET(nid) * NAT_ENTRY_PER_BLOCK; + /* Enough entries */ if (nm_i->nid_cnt[FREE_NID] >= NAT_ENTRY_PER_BLOCK) return 0; @@ -2572,7 +2577,7 @@ int f2fs_try_to_free_nids(struct f2fs_sb_info *sbi, int nr_shrink) return nr - nr_shrink; } -void f2fs_recover_inline_xattr(struct inode *inode, struct page *page) +int f2fs_recover_inline_xattr(struct inode *inode, struct page *page) { void *src_addr, *dst_addr; size_t inline_size; @@ -2580,7 +2585,8 @@ void f2fs_recover_inline_xattr(struct inode *inode, struct page *page) struct f2fs_inode *ri; ipage = f2fs_get_node_page(F2FS_I_SB(inode), inode->i_ino); - f2fs_bug_on(F2FS_I_SB(inode), IS_ERR(ipage)); + if (IS_ERR(ipage)) + return PTR_ERR(ipage); ri = F2FS_INODE(page); if (ri->i_inline & F2FS_INLINE_XATTR) { @@ -2599,6 +2605,7 @@ void f2fs_recover_inline_xattr(struct inode *inode, struct page *page) update_inode: f2fs_update_inode(inode, ipage); f2fs_put_page(ipage, 1); + return 0; } int f2fs_recover_xattr_data(struct inode *inode, struct page *page) diff --git a/fs/f2fs/recovery.c b/fs/f2fs/recovery.c index ae5310f02e7f..4f12ade6410a 100644 --- a/fs/f2fs/recovery.c +++ b/fs/f2fs/recovery.c @@ -544,7 +544,9 @@ static int do_recover_data(struct f2fs_sb_info *sbi, struct inode *inode, /* step 1: recover xattr */ if (IS_INODE(page)) { - f2fs_recover_inline_xattr(inode, page); + err = f2fs_recover_inline_xattr(inode, page); + if (err) + goto out; } else if (f2fs_has_xattr_block(ofs_of_node(page))) { err = f2fs_recover_xattr_data(inode, page); if (!err) @@ -553,8 +555,12 @@ static int do_recover_data(struct f2fs_sb_info *sbi, struct inode *inode, } /* step 2: recover inline data */ - if (f2fs_recover_inline_data(inode, page)) + err = f2fs_recover_inline_data(inode, page); + if (err) { + if (err == 1) + err = 0; goto out; + } /* step 3: recover data indices */ start = f2fs_start_bidx_of_node(ofs_of_node(page), inode); @@ -742,7 +748,7 @@ next: f2fs_put_page(page, 1); } if (!err) - f2fs_allocate_new_segments(sbi, NO_CHECK_TYPE); + f2fs_allocate_new_segments(sbi); return err; } diff --git a/fs/f2fs/segment.c b/fs/f2fs/segment.c index 196f31503511..e247a5ef3713 100644 --- a/fs/f2fs/segment.c +++ b/fs/f2fs/segment.c @@ -174,7 +174,7 @@ bool f2fs_need_SSR(struct f2fs_sb_info *sbi) if (f2fs_lfs_mode(sbi)) return false; - if (sbi->gc_mode == GC_URGENT) + if (sbi->gc_mode == GC_URGENT_HIGH) return true; if (unlikely(is_sbi_flag_set(sbi, SBI_CP_DISABLED))) return true; @@ -796,6 +796,18 @@ static void __locate_dirty_segment(struct f2fs_sb_info *sbi, unsigned int segno, } if (!test_and_set_bit(segno, dirty_i->dirty_segmap[t])) dirty_i->nr_dirty[t]++; + + if (__is_large_section(sbi)) { + unsigned int secno = GET_SEC_FROM_SEG(sbi, segno); + block_t valid_blocks = + get_valid_blocks(sbi, segno, true); + + f2fs_bug_on(sbi, unlikely(!valid_blocks || + valid_blocks == BLKS_PER_SEC(sbi))); + + if (!IS_CURSEC(sbi, secno)) + set_bit(secno, dirty_i->dirty_secmap); + } } } @@ -803,6 +815,7 @@ static void __remove_dirty_segment(struct f2fs_sb_info *sbi, unsigned int segno, enum dirty_type dirty_type) { struct dirty_seglist_info *dirty_i = DIRTY_I(sbi); + block_t valid_blocks; if (test_and_clear_bit(segno, dirty_i->dirty_segmap[dirty_type])) dirty_i->nr_dirty[dirty_type]--; @@ -814,13 +827,26 @@ static void __remove_dirty_segment(struct f2fs_sb_info *sbi, unsigned int segno, if (test_and_clear_bit(segno, dirty_i->dirty_segmap[t])) dirty_i->nr_dirty[t]--; - if (get_valid_blocks(sbi, segno, true) == 0) { + valid_blocks = get_valid_blocks(sbi, segno, true); + if (valid_blocks == 0) { clear_bit(GET_SEC_FROM_SEG(sbi, segno), dirty_i->victim_secmap); #ifdef CONFIG_F2FS_CHECK_FS clear_bit(segno, SIT_I(sbi)->invalid_segmap); #endif } + if (__is_large_section(sbi)) { + unsigned int secno = GET_SEC_FROM_SEG(sbi, segno); + + if (!valid_blocks || + valid_blocks == BLKS_PER_SEC(sbi)) { + clear_bit(secno, dirty_i->dirty_secmap); + return; + } + + if (!IS_CURSEC(sbi, secno)) + set_bit(secno, dirty_i->dirty_secmap); + } } } @@ -1733,7 +1759,7 @@ static int issue_discard_thread(void *data) continue; } - if (sbi->gc_mode == GC_URGENT) + if (sbi->gc_mode == GC_URGENT_HIGH) __init_discard_policy(sbi, &dpolicy, DPOLICY_FORCE, 1); sb_start_intwrite(sbi->sb); @@ -2140,7 +2166,7 @@ static void update_sit_entry(struct f2fs_sb_info *sbi, block_t blkaddr, int del) new_vblocks = se->valid_blocks + del; offset = GET_BLKOFF_FROM_SEG0(sbi, blkaddr); - f2fs_bug_on(sbi, (new_vblocks >> (sizeof(unsigned short) << 3) || + f2fs_bug_on(sbi, (new_vblocks < 0 || (new_vblocks > sbi->blocks_per_seg))); se->valid_blocks = new_vblocks; @@ -2605,7 +2631,7 @@ static int get_ssr_segment(struct f2fs_sb_info *sbi, int type) bool reversed = false; /* f2fs_need_SSR() already forces to do this */ - if (v_ops->get_victim(sbi, &segno, BG_GC, type, SSR)) { + if (!v_ops->get_victim(sbi, &segno, BG_GC, type, SSR)) { curseg->next_segno = segno; return 1; } @@ -2632,7 +2658,7 @@ static int get_ssr_segment(struct f2fs_sb_info *sbi, int type) for (; cnt-- > 0; reversed ? i-- : i++) { if (i == type) continue; - if (v_ops->get_victim(sbi, &segno, BG_GC, i, SSR)) { + if (!v_ops->get_victim(sbi, &segno, BG_GC, i, SSR)) { curseg->next_segno = segno; return 1; } @@ -2674,7 +2700,7 @@ static void allocate_segment_by_default(struct f2fs_sb_info *sbi, stat_inc_seg_type(sbi, curseg); } -void allocate_segment_for_resize(struct f2fs_sb_info *sbi, int type, +void f2fs_allocate_segment_for_resize(struct f2fs_sb_info *sbi, int type, unsigned int start, unsigned int end) { struct curseg_info *curseg = CURSEG_I(sbi, type); @@ -2707,28 +2733,35 @@ unlock: up_read(&SM_I(sbi)->curseg_lock); } -void f2fs_allocate_new_segments(struct f2fs_sb_info *sbi, int type) +static void __allocate_new_segment(struct f2fs_sb_info *sbi, int type) { - struct curseg_info *curseg; + struct curseg_info *curseg = CURSEG_I(sbi, type); unsigned int old_segno; - int i; - down_write(&SIT_I(sbi)->sentry_lock); + if (!curseg->next_blkoff && + !get_valid_blocks(sbi, curseg->segno, false) && + !get_ckpt_valid_blocks(sbi, curseg->segno)) + return; - for (i = CURSEG_HOT_DATA; i <= CURSEG_COLD_DATA; i++) { - if (type != NO_CHECK_TYPE && i != type) - continue; + old_segno = curseg->segno; + SIT_I(sbi)->s_ops->allocate_segment(sbi, type, true); + locate_dirty_segment(sbi, old_segno); +} - curseg = CURSEG_I(sbi, i); - if (type == NO_CHECK_TYPE || curseg->next_blkoff || - get_valid_blocks(sbi, curseg->segno, false) || - get_ckpt_valid_blocks(sbi, curseg->segno)) { - old_segno = curseg->segno; - SIT_I(sbi)->s_ops->allocate_segment(sbi, i, true); - locate_dirty_segment(sbi, old_segno); - } - } +void f2fs_allocate_new_segment(struct f2fs_sb_info *sbi, int type) +{ + down_write(&SIT_I(sbi)->sentry_lock); + __allocate_new_segment(sbi, type); + up_write(&SIT_I(sbi)->sentry_lock); +} + +void f2fs_allocate_new_segments(struct f2fs_sb_info *sbi) +{ + int i; + down_write(&SIT_I(sbi)->sentry_lock); + for (i = CURSEG_HOT_DATA; i <= CURSEG_COLD_DATA; i++) + __allocate_new_segment(sbi, i); up_write(&SIT_I(sbi)->sentry_lock); } @@ -3089,7 +3122,7 @@ static int __get_segment_type(struct f2fs_io_info *fio) void f2fs_allocate_data_block(struct f2fs_sb_info *sbi, struct page *page, block_t old_blkaddr, block_t *new_blkaddr, struct f2fs_summary *sum, int type, - struct f2fs_io_info *fio, bool add_list) + struct f2fs_io_info *fio) { struct sit_info *sit_i = SIT_I(sbi); struct curseg_info *curseg = CURSEG_I(sbi, type); @@ -3107,14 +3140,6 @@ void f2fs_allocate_data_block(struct f2fs_sb_info *sbi, struct page *page, type = CURSEG_COLD_DATA; } - /* - * We need to wait for node_write to avoid block allocation during - * checkpoint. This can only happen to quota writes which can cause - * the below discard race condition. - */ - if (IS_DATASEG(type)) - down_write(&sbi->node_write); - down_read(&SM_I(sbi)->curseg_lock); mutex_lock(&curseg->curseg_mutex); @@ -3165,7 +3190,7 @@ void f2fs_allocate_data_block(struct f2fs_sb_info *sbi, struct page *page, if (F2FS_IO_ALIGNED(sbi)) fio->retry = false; - if (add_list) { + if (fio) { struct f2fs_bio_info *io; INIT_LIST_HEAD(&fio->list); @@ -3180,9 +3205,6 @@ void f2fs_allocate_data_block(struct f2fs_sb_info *sbi, struct page *page, up_read(&SM_I(sbi)->curseg_lock); - if (IS_DATASEG(type)) - up_write(&sbi->node_write); - if (put_pin_sem) up_read(&sbi->pin_sem); } @@ -3217,7 +3239,7 @@ static void do_write_page(struct f2fs_summary *sum, struct f2fs_io_info *fio) down_read(&fio->sbi->io_order_lock); reallocate: f2fs_allocate_data_block(fio->sbi, fio->page, fio->old_blkaddr, - &fio->new_blkaddr, sum, type, fio, true); + &fio->new_blkaddr, sum, type, fio); if (GET_SEGNO(fio->sbi, fio->old_blkaddr) != NULL_SEGNO) invalidate_mapping_pages(META_MAPPING(fio->sbi), fio->old_blkaddr, fio->old_blkaddr); @@ -4293,8 +4315,9 @@ static void init_dirty_segmap(struct f2fs_sb_info *sbi) { struct dirty_seglist_info *dirty_i = DIRTY_I(sbi); struct free_segmap_info *free_i = FREE_I(sbi); - unsigned int segno = 0, offset = 0; - unsigned short valid_blocks; + unsigned int segno = 0, offset = 0, secno; + block_t valid_blocks; + block_t blks_per_sec = BLKS_PER_SEC(sbi); while (1) { /* find dirty segment based on free segmap */ @@ -4313,6 +4336,22 @@ static void init_dirty_segmap(struct f2fs_sb_info *sbi) __locate_dirty_segment(sbi, segno, DIRTY); mutex_unlock(&dirty_i->seglist_lock); } + + if (!__is_large_section(sbi)) + return; + + mutex_lock(&dirty_i->seglist_lock); + for (segno = 0; segno < MAIN_SECS(sbi); segno += blks_per_sec) { + valid_blocks = get_valid_blocks(sbi, segno, true); + secno = GET_SEC_FROM_SEG(sbi, segno); + + if (!valid_blocks || valid_blocks == blks_per_sec) + continue; + if (IS_CURSEC(sbi, secno)) + continue; + set_bit(secno, dirty_i->dirty_secmap); + } + mutex_unlock(&dirty_i->seglist_lock); } static int init_victim_secmap(struct f2fs_sb_info *sbi) @@ -4349,6 +4388,14 @@ static int build_dirty_segmap(struct f2fs_sb_info *sbi) return -ENOMEM; } + if (__is_large_section(sbi)) { + bitmap_size = f2fs_bitmap_size(MAIN_SECS(sbi)); + dirty_i->dirty_secmap = f2fs_kvzalloc(sbi, + bitmap_size, GFP_KERNEL); + if (!dirty_i->dirty_secmap) + return -ENOMEM; + } + init_dirty_segmap(sbi); return init_victim_secmap(sbi); } @@ -4775,6 +4822,12 @@ static void destroy_dirty_segmap(struct f2fs_sb_info *sbi) for (i = 0; i < NR_DIRTY_TYPE; i++) discard_dirty_segmap(sbi, i); + if (__is_large_section(sbi)) { + mutex_lock(&dirty_i->seglist_lock); + kvfree(dirty_i->dirty_secmap); + mutex_unlock(&dirty_i->seglist_lock); + } + destroy_victim_secmap(sbi); SM_I(sbi)->dirty_info = NULL; kvfree(dirty_i); diff --git a/fs/f2fs/segment.h b/fs/f2fs/segment.h index cba16cca5189..752b177073b2 100644 --- a/fs/f2fs/segment.h +++ b/fs/f2fs/segment.h @@ -166,8 +166,11 @@ enum { struct victim_sel_policy { int alloc_mode; /* LFS or SSR */ int gc_mode; /* GC_CB or GC_GREEDY */ - unsigned long *dirty_segmap; /* dirty segment bitmap */ - unsigned int max_search; /* maximum # of segments to search */ + unsigned long *dirty_bitmap; /* dirty segment/section bitmap */ + unsigned int max_search; /* + * maximum # of segments/sections + * to search + */ unsigned int offset; /* last scanned bitmap offset */ unsigned int ofs_unit; /* bitmap search unit */ unsigned int min_cost; /* minimum cost */ @@ -184,7 +187,7 @@ struct seg_entry { unsigned char *cur_valid_map_mir; /* mirror of current valid bitmap */ #endif /* - * # of valid blocks and the validity bitmap stored in the the last + * # of valid blocks and the validity bitmap stored in the last * checkpoint pack. This information is used by the SSR mode. */ unsigned char *ckpt_valid_map; /* validity bitmap of blocks last cp */ @@ -266,6 +269,7 @@ enum dirty_type { struct dirty_seglist_info { const struct victim_selection *v_ops; /* victim selction operation */ unsigned long *dirty_segmap[NR_DIRTY_TYPE]; + unsigned long *dirty_secmap; struct mutex seglist_lock; /* lock for segment bitmaps */ int nr_dirty[NR_DIRTY_TYPE]; /* # of dirty segments */ unsigned long *victim_secmap; /* background GC victims */ diff --git a/fs/f2fs/super.c b/fs/f2fs/super.c index 20e56b0fa46a..dfa072fa8081 100644 --- a/fs/f2fs/super.c +++ b/fs/f2fs/super.c @@ -138,6 +138,7 @@ enum { Opt_alloc, Opt_fsync, Opt_test_dummy_encryption, + Opt_inlinecrypt, Opt_checkpoint_disable, Opt_checkpoint_disable_cap, Opt_checkpoint_disable_cap_perc, @@ -204,6 +205,7 @@ static match_table_t f2fs_tokens = { {Opt_fsync, "fsync_mode=%s"}, {Opt_test_dummy_encryption, "test_dummy_encryption=%s"}, {Opt_test_dummy_encryption, "test_dummy_encryption"}, + {Opt_inlinecrypt, "inlinecrypt"}, {Opt_checkpoint_disable, "checkpoint=disable"}, {Opt_checkpoint_disable_cap, "checkpoint=disable:%u"}, {Opt_checkpoint_disable_cap_perc, "checkpoint=disable:%u%%"}, @@ -348,7 +350,7 @@ static int f2fs_set_qf_name(struct super_block *sb, int qtype, set_opt(sbi, QUOTA); return 0; errout: - kvfree(qname); + kfree(qname); return ret; } @@ -360,7 +362,7 @@ static int f2fs_clear_qf_name(struct super_block *sb, int qtype) f2fs_err(sbi, "Cannot change journaled quota options when quota turned on"); return -EINVAL; } - kvfree(F2FS_OPTION(sbi).s_qf_names[qtype]); + kfree(F2FS_OPTION(sbi).s_qf_names[qtype]); F2FS_OPTION(sbi).s_qf_names[qtype] = NULL; return 0; } @@ -460,9 +462,12 @@ static int parse_options(struct super_block *sb, char *options, bool is_remount) { struct f2fs_sb_info *sbi = F2FS_SB(sb); substring_t args[MAX_OPT_ARGS]; +#ifdef CONFIG_F2FS_FS_COMPRESSION unsigned char (*ext)[F2FS_EXTENSION_LEN]; + int ext_cnt; +#endif char *p, *name; - int arg = 0, ext_cnt; + int arg = 0; kuid_t uid; kgid_t gid; int ret; @@ -494,10 +499,10 @@ static int parse_options(struct super_block *sb, char *options, bool is_remount) } else if (!strcmp(name, "sync")) { F2FS_OPTION(sbi).bggc_mode = BGGC_MODE_SYNC; } else { - kvfree(name); + kfree(name); return -EINVAL; } - kvfree(name); + kfree(name); break; case Opt_disable_roll_forward: set_opt(sbi, DISABLE_ROLL_FORWARD); @@ -654,17 +659,17 @@ static int parse_options(struct super_block *sb, char *options, bool is_remount) if (!strcmp(name, "adaptive")) { if (f2fs_sb_has_blkzoned(sbi)) { f2fs_warn(sbi, "adaptive mode is not allowed with zoned block device feature"); - kvfree(name); + kfree(name); return -EINVAL; } F2FS_OPTION(sbi).fs_mode = FS_MODE_ADAPTIVE; } else if (!strcmp(name, "lfs")) { F2FS_OPTION(sbi).fs_mode = FS_MODE_LFS; } else { - kvfree(name); + kfree(name); return -EINVAL; } - kvfree(name); + kfree(name); break; case Opt_io_size_bits: if (args->from && match_int(args, &arg)) @@ -790,10 +795,10 @@ static int parse_options(struct super_block *sb, char *options, bool is_remount) } else if (!strcmp(name, "fs-based")) { F2FS_OPTION(sbi).whint_mode = WHINT_MODE_FS; } else { - kvfree(name); + kfree(name); return -EINVAL; } - kvfree(name); + kfree(name); break; case Opt_alloc: name = match_strdup(&args[0]); @@ -805,10 +810,10 @@ static int parse_options(struct super_block *sb, char *options, bool is_remount) } else if (!strcmp(name, "reuse")) { F2FS_OPTION(sbi).alloc_mode = ALLOC_MODE_REUSE; } else { - kvfree(name); + kfree(name); return -EINVAL; } - kvfree(name); + kfree(name); break; case Opt_fsync: name = match_strdup(&args[0]); @@ -822,10 +827,10 @@ static int parse_options(struct super_block *sb, char *options, bool is_remount) F2FS_OPTION(sbi).fsync_mode = FSYNC_MODE_NOBARRIER; } else { - kvfree(name); + kfree(name); return -EINVAL; } - kvfree(name); + kfree(name); break; case Opt_test_dummy_encryption: ret = f2fs_set_test_dummy_encryption(sb, p, &args[0], @@ -833,6 +838,13 @@ static int parse_options(struct super_block *sb, char *options, bool is_remount) if (ret) return ret; break; + case Opt_inlinecrypt: +#ifdef CONFIG_FS_ENCRYPTION_INLINE_CRYPT + sb->s_flags |= SB_INLINECRYPT; +#else + f2fs_info(sbi, "inline encryption not supported"); +#endif + break; case Opt_checkpoint_disable_cap_perc: if (args->from && match_int(args, &arg)) return -EINVAL; @@ -853,6 +865,7 @@ static int parse_options(struct super_block *sb, char *options, bool is_remount) case Opt_checkpoint_enable: clear_opt(sbi, DISABLE_CHECKPOINT); break; +#ifdef CONFIG_F2FS_FS_COMPRESSION case Opt_compress_algorithm: if (!f2fs_sb_has_compression(sbi)) { f2fs_err(sbi, "Compression feature if off"); @@ -918,6 +931,13 @@ static int parse_options(struct super_block *sb, char *options, bool is_remount) F2FS_OPTION(sbi).compress_ext_cnt++; kfree(name); break; +#else + case Opt_compress_algorithm: + case Opt_compress_log_size: + case Opt_compress_extension: + f2fs_info(sbi, "compression options not supported"); + break; +#endif default: f2fs_err(sbi, "Unrecognized mount option \"%s\" or missing value", p); @@ -1015,6 +1035,8 @@ static struct inode *f2fs_alloc_inode(struct super_block *sb) /* Will be used by directory only */ fi->i_dir_level = F2FS_SB(sb)->dir_level; + fi->ra_offset = -1; + return &fi->vfs_inode; } @@ -1173,6 +1195,9 @@ static void f2fs_put_super(struct super_block *sb) int i; bool dropped; + /* unregister procfs/sysfs entries in advance to avoid race case */ + f2fs_unregister_sysfs(sbi); + f2fs_quota_off_umount(sb); /* prevent remaining shrinker jobs */ @@ -1238,19 +1263,17 @@ static void f2fs_put_super(struct super_block *sb) kvfree(sbi->ckpt); - f2fs_unregister_sysfs(sbi); - sb->s_fs_info = NULL; if (sbi->s_chksum_driver) crypto_free_shash(sbi->s_chksum_driver); - kvfree(sbi->raw_super); + kfree(sbi->raw_super); destroy_device_list(sbi); f2fs_destroy_xattr_caches(sbi); mempool_destroy(sbi->write_io_dummy); #ifdef CONFIG_QUOTA for (i = 0; i < MAXQUOTAS; i++) - kvfree(F2FS_OPTION(sbi).s_qf_names[i]); + kfree(F2FS_OPTION(sbi).s_qf_names[i]); #endif fscrypt_free_dummy_context(&F2FS_OPTION(sbi).dummy_enc_ctx); destroy_percpu_info(sbi); @@ -1259,7 +1282,7 @@ static void f2fs_put_super(struct super_block *sb) #ifdef CONFIG_UNICODE utf8_unload(sbi->s_encoding); #endif - kvfree(sbi); + kfree(sbi); } int f2fs_sync_fs(struct super_block *sb, int sync) @@ -1590,6 +1613,9 @@ static int f2fs_show_options(struct seq_file *seq, struct dentry *root) fscrypt_show_test_dummy_encryption(seq, ',', sbi->sb); + if (sbi->sb->s_flags & SB_INLINECRYPT) + seq_puts(seq, ",inlinecrypt"); + if (F2FS_OPTION(sbi).alloc_mode == ALLOC_MODE_DEFAULT) seq_printf(seq, ",alloc_mode=%s", "default"); else if (F2FS_OPTION(sbi).alloc_mode == ALLOC_MODE_REUSE) @@ -1605,7 +1631,9 @@ static int f2fs_show_options(struct seq_file *seq, struct dentry *root) else if (F2FS_OPTION(sbi).fsync_mode == FSYNC_MODE_NOBARRIER) seq_printf(seq, ",fsync_mode=%s", "nobarrier"); +#ifdef CONFIG_F2FS_FS_COMPRESSION f2fs_show_compress_options(seq, sbi->sb); +#endif return 0; } @@ -1624,6 +1652,8 @@ static void default_options(struct f2fs_sb_info *sbi) F2FS_OPTION(sbi).compress_ext_cnt = 0; F2FS_OPTION(sbi).bggc_mode = BGGC_MODE_ON; + sbi->sb->s_flags &= ~SB_INLINECRYPT; + set_opt(sbi, INLINE_XATTR); set_opt(sbi, INLINE_DATA); set_opt(sbi, INLINE_DENTRY); @@ -1754,7 +1784,7 @@ static int f2fs_remount(struct super_block *sb, int *flags, char *data) GFP_KERNEL); if (!org_mount_opt.s_qf_names[i]) { for (j = 0; j < i; j++) - kvfree(org_mount_opt.s_qf_names[j]); + kfree(org_mount_opt.s_qf_names[j]); return -ENOMEM; } } else { @@ -1879,7 +1909,7 @@ skip: #ifdef CONFIG_QUOTA /* Release old quota file names */ for (i = 0; i < MAXQUOTAS; i++) - kvfree(org_mount_opt.s_qf_names[i]); + kfree(org_mount_opt.s_qf_names[i]); #endif /* Update the POSIXACL Flag */ sb->s_flags = (sb->s_flags & ~SB_POSIXACL) | @@ -1900,7 +1930,7 @@ restore_opts: #ifdef CONFIG_QUOTA F2FS_OPTION(sbi).s_jquota_fmt = org_mount_opt.s_jquota_fmt; for (i = 0; i < MAXQUOTAS; i++) { - kvfree(F2FS_OPTION(sbi).s_qf_names[i]); + kfree(F2FS_OPTION(sbi).s_qf_names[i]); F2FS_OPTION(sbi).s_qf_names[i] = org_mount_opt.s_qf_names[i]; } #endif @@ -2470,6 +2500,25 @@ static void f2fs_get_ino_and_lblk_bits(struct super_block *sb, *lblk_bits_ret = 8 * sizeof(block_t); } +static int f2fs_get_num_devices(struct super_block *sb) +{ + struct f2fs_sb_info *sbi = F2FS_SB(sb); + + if (f2fs_is_multi_device(sbi)) + return sbi->s_ndevs; + return 1; +} + +static void f2fs_get_devices(struct super_block *sb, + struct request_queue **devs) +{ + struct f2fs_sb_info *sbi = F2FS_SB(sb); + int i; + + for (i = 0; i < sbi->s_ndevs; i++) + devs[i] = bdev_get_queue(FDEV(i).bdev); +} + static const struct fscrypt_operations f2fs_cryptops = { .key_prefix = "f2fs:", .get_context = f2fs_get_context, @@ -2479,6 +2528,8 @@ static const struct fscrypt_operations f2fs_cryptops = { .max_namelen = F2FS_NAME_LEN, .has_stable_inodes = f2fs_has_stable_inodes, .get_ino_and_lblk_bits = f2fs_get_ino_and_lblk_bits, + .get_num_devices = f2fs_get_num_devices, + .get_devices = f2fs_get_devices, }; #endif @@ -3137,7 +3188,7 @@ static int read_raw_super_block(struct f2fs_sb_info *sbi, /* No valid superblock */ if (!*raw_super) - kvfree(super); + kfree(super); else err = 0; @@ -3811,16 +3862,16 @@ free_bio_info: free_options: #ifdef CONFIG_QUOTA for (i = 0; i < MAXQUOTAS; i++) - kvfree(F2FS_OPTION(sbi).s_qf_names[i]); + kfree(F2FS_OPTION(sbi).s_qf_names[i]); #endif fscrypt_free_dummy_context(&F2FS_OPTION(sbi).dummy_enc_ctx); kvfree(options); free_sb_buf: - kvfree(raw_super); + kfree(raw_super); free_sbi: if (sbi->s_chksum_driver) crypto_free_shash(sbi->s_chksum_driver); - kvfree(sbi); + kfree(sbi); /* give only one another chance */ if (retry_cnt > 0 && skip_recovery) { diff --git a/fs/f2fs/sysfs.c b/fs/f2fs/sysfs.c index e877c59b9fdb..88ed9969cc86 100644 --- a/fs/f2fs/sysfs.c +++ b/fs/f2fs/sysfs.c @@ -27,7 +27,7 @@ enum { NM_INFO, /* struct f2fs_nm_info */ F2FS_SBI, /* struct f2fs_sb_info */ #ifdef CONFIG_F2FS_STAT_FS - STAT_INFO, /* struct f2fs_stat_info */ + STAT_INFO, /* struct f2fs_stat_info */ #endif #ifdef CONFIG_F2FS_FAULT_INJECTION FAULT_INFO_RATE, /* struct f2fs_fault_info */ @@ -223,6 +223,13 @@ static ssize_t avg_vblocks_show(struct f2fs_attr *a, } #endif +static ssize_t main_blkaddr_show(struct f2fs_attr *a, + struct f2fs_sb_info *sbi, char *buf) +{ + return snprintf(buf, PAGE_SIZE, "%llu\n", + (unsigned long long)MAIN_BLKADDR(sbi)); +} + static ssize_t f2fs_sbi_show(struct f2fs_attr *a, struct f2fs_sb_info *sbi, char *buf) { @@ -350,16 +357,20 @@ out: return -EINVAL; if (!strcmp(a->attr.name, "gc_urgent")) { - if (t >= 1) { - sbi->gc_mode = GC_URGENT; + if (t == 0) { + sbi->gc_mode = GC_NORMAL; + } else if (t == 1) { + sbi->gc_mode = GC_URGENT_HIGH; if (sbi->gc_thread) { sbi->gc_thread->gc_wake = 1; wake_up_interruptible_all( &sbi->gc_thread->gc_wait_queue_head); wake_up_discard_thread(sbi, true); } + } else if (t == 2) { + sbi->gc_mode = GC_URGENT_LOW; } else { - sbi->gc_mode = GC_NORMAL; + return -EINVAL; } return count; } @@ -522,7 +533,6 @@ F2FS_RW_ATTR(GC_THREAD, f2fs_gc_kthread, gc_no_gc_sleep_time, no_gc_sleep_time); F2FS_RW_ATTR(F2FS_SBI, f2fs_sb_info, gc_idle, gc_mode); F2FS_RW_ATTR(F2FS_SBI, f2fs_sb_info, gc_urgent, gc_mode); F2FS_RW_ATTR(SM_INFO, f2fs_sm_info, reclaim_segments, rec_prefree_segments); -F2FS_RW_ATTR(SM_INFO, f2fs_sm_info, main_blkaddr, main_blkaddr); F2FS_RW_ATTR(DCC_INFO, discard_cmd_control, max_small_discards, max_discards); F2FS_RW_ATTR(DCC_INFO, discard_cmd_control, discard_granularity, discard_granularity); F2FS_RW_ATTR(RESERVED_BLOCKS, f2fs_sb_info, reserved_blocks, reserved_blocks); @@ -565,6 +575,7 @@ F2FS_GENERAL_RO_ATTR(current_reserved_blocks); F2FS_GENERAL_RO_ATTR(unusable); F2FS_GENERAL_RO_ATTR(encoding); F2FS_GENERAL_RO_ATTR(mounted_time_sec); +F2FS_GENERAL_RO_ATTR(main_blkaddr); #ifdef CONFIG_F2FS_STAT_FS F2FS_STAT_ATTR(STAT_INFO, f2fs_stat_info, cp_foreground_calls, cp_count); F2FS_STAT_ATTR(STAT_INFO, f2fs_stat_info, cp_background_calls, bg_cp_count); @@ -706,7 +717,7 @@ static struct kobj_type f2fs_ktype = { }; static struct kset f2fs_kset = { - .kobj = {.ktype = &f2fs_ktype}, + .kobj = {.ktype = &f2fs_ktype}, }; static struct kobj_type f2fs_feat_ktype = { diff --git a/fs/f2fs/verity.c b/fs/f2fs/verity.c index 865c9fb774fb..9eb0dba851e8 100644 --- a/fs/f2fs/verity.c +++ b/fs/f2fs/verity.c @@ -29,6 +29,8 @@ #include "f2fs.h" #include "xattr.h" +#define F2FS_VERIFY_VER (1) + static inline loff_t f2fs_verity_metadata_pos(const struct inode *inode) { return round_up(inode->i_size, 65536); @@ -152,7 +154,7 @@ static int f2fs_end_enable_verity(struct file *filp, const void *desc, struct inode *inode = file_inode(filp); u64 desc_pos = f2fs_verity_metadata_pos(inode) + merkle_tree_size; struct fsverity_descriptor_location dloc = { - .version = cpu_to_le32(1), + .version = cpu_to_le32(F2FS_VERIFY_VER), .size = cpu_to_le32(desc_size), .pos = cpu_to_le64(desc_pos), }; @@ -199,7 +201,7 @@ static int f2fs_get_verity_descriptor(struct inode *inode, void *buf, F2FS_XATTR_NAME_VERITY, &dloc, sizeof(dloc), NULL); if (res < 0 && res != -ERANGE) return res; - if (res != sizeof(dloc) || dloc.version != cpu_to_le32(1)) { + if (res != sizeof(dloc) || dloc.version != cpu_to_le32(F2FS_VERIFY_VER)) { f2fs_warn(F2FS_I_SB(inode), "unknown verity xattr format"); return -EINVAL; } diff --git a/fs/f2fs/xattr.c b/fs/f2fs/xattr.c index 4f6582ef7ee3..1b0736ce0918 100644 --- a/fs/f2fs/xattr.c +++ b/fs/f2fs/xattr.c @@ -175,8 +175,8 @@ const struct xattr_handler f2fs_xattr_trusted_handler = { const struct xattr_handler f2fs_xattr_advise_handler = { .name = F2FS_SYSTEM_ADVISE_NAME, .flags = F2FS_XATTR_INDEX_ADVISE, - .get = f2fs_xattr_advise_get, - .set = f2fs_xattr_advise_set, + .get = f2fs_xattr_advise_get, + .set = f2fs_xattr_advise_set, }; const struct xattr_handler f2fs_xattr_security_handler = { diff --git a/fs/fat/Kconfig b/fs/fat/Kconfig index ca31993dcb47..66532a71e8fd 100644 --- a/fs/fat/Kconfig +++ b/fs/fat/Kconfig @@ -41,7 +41,7 @@ config MSDOS_FS they are compressed; to access compressed MSDOS partitions under Linux, you can either use the DOS emulator DOSEMU, described in the DOSEMU-HOWTO, available from - <http://www.tldp.org/docs.html#howto>, or try dmsdosfs in + <https://www.tldp.org/docs.html#howto>, or try dmsdosfs in <ftp://ibiblio.org/pub/Linux/system/filesystems/dosfs/>. If you intend to use dosemu with a non-compressed MSDOS partition, say Y here) and MSDOS floppies. This means that file access becomes diff --git a/fs/fat/dir.c b/fs/fat/dir.c index b4ddf48fa444..c4a274285858 100644 --- a/fs/fat/dir.c +++ b/fs/fat/dir.c @@ -1284,7 +1284,7 @@ int fat_add_entries(struct inode *dir, void *slots, int nr_slots, struct super_block *sb = dir->i_sb; struct msdos_sb_info *sbi = MSDOS_SB(sb); struct buffer_head *bh, *prev, *bhs[3]; /* 32*slots (672bytes) */ - struct msdos_dir_entry *uninitialized_var(de); + struct msdos_dir_entry *de; int err, free_slots, i, nr_bhs; loff_t pos, i_pos; diff --git a/fs/fat/fatent.c b/fs/fat/fatent.c index bbfe18c07417..f7e3304b7802 100644 --- a/fs/fat/fatent.c +++ b/fs/fat/fatent.c @@ -657,6 +657,9 @@ static void fat_ra_init(struct super_block *sb, struct fatent_ra *ra, unsigned long ra_pages = sb->s_bdi->ra_pages; unsigned int reada_blocks; + if (fatent->entry >= ent_limit) + return; + if (ra_pages > sb->s_bdi->io_pages) ra_pages = rounddown(ra_pages, sb->s_bdi->io_pages); reada_blocks = ra_pages << (PAGE_SHIFT - sb->s_blocksize_bits + 1); diff --git a/fs/fat/file.c b/fs/fat/file.c index 42134c58c87e..f9ee27cf4d7c 100644 --- a/fs/fat/file.c +++ b/fs/fat/file.c @@ -25,9 +25,9 @@ static int fat_ioctl_get_attributes(struct inode *inode, u32 __user *user_attr) { u32 attr; - inode_lock(inode); + inode_lock_shared(inode); attr = fat_make_attrs(inode); - inode_unlock(inode); + inode_unlock_shared(inode); return put_user(attr, user_attr); } diff --git a/fs/fcntl.c b/fs/fcntl.c index 2e4c0fa2074b..19ac5baad50f 100644 --- a/fs/fcntl.c +++ b/fs/fcntl.c @@ -362,7 +362,7 @@ static long do_fcntl(int fd, unsigned int cmd, unsigned long arg, case F_OFD_SETLK: case F_OFD_SETLKW: #endif - /* Fallthrough */ + fallthrough; case F_SETLK: case F_SETLKW: if (copy_from_user(&flock, argp, sizeof(flock))) @@ -771,7 +771,7 @@ static void send_sigio_to_task(struct task_struct *p, if (!do_send_sig_info(signum, &si, p, type)) break; } - /* fall-through - fall back on the old plain SIGIO signal */ + fallthrough; /* fall back on the old plain SIGIO signal */ case 0: do_send_sig_info(SIGIO, SEND_SIG_PRIV, p, type); } diff --git a/fs/file.c b/fs/file.c index abb8b7081d7a..21c0893f2f1d 100644 --- a/fs/file.c +++ b/fs/file.c @@ -10,6 +10,7 @@ #include <linux/syscalls.h> #include <linux/export.h> #include <linux/fs.h> +#include <linux/kernel.h> #include <linux/mm.h> #include <linux/sched/signal.h> #include <linux/slab.h> @@ -18,6 +19,8 @@ #include <linux/bitops.h> #include <linux/spinlock.h> #include <linux/rcupdate.h> +#include <linux/close_range.h> +#include <net/sock.h> unsigned int sysctl_nr_open __read_mostly = 1024*1024; unsigned int sysctl_nr_open_min = BITS_PER_LONG; @@ -264,12 +267,22 @@ static unsigned int count_open_files(struct fdtable *fdt) return i; } +static unsigned int sane_fdtable_size(struct fdtable *fdt, unsigned int max_fds) +{ + unsigned int count; + + count = count_open_files(fdt); + if (max_fds < NR_OPEN_DEFAULT) + max_fds = NR_OPEN_DEFAULT; + return min(count, max_fds); +} + /* * Allocate a new files structure and copy contents from the * passed in files structure. * errorp will be valid only when the returned files_struct is NULL. */ -struct files_struct *dup_fd(struct files_struct *oldf, int *errorp) +struct files_struct *dup_fd(struct files_struct *oldf, unsigned int max_fds, int *errorp) { struct files_struct *newf; struct file **old_fds, **new_fds; @@ -296,7 +309,7 @@ struct files_struct *dup_fd(struct files_struct *oldf, int *errorp) spin_lock(&oldf->file_lock); old_fdt = files_fdtable(oldf); - open_files = count_open_files(old_fdt); + open_files = sane_fdtable_size(old_fdt, max_fds); /* * Check whether we need to allocate a larger fd array and fd set. @@ -327,7 +340,7 @@ struct files_struct *dup_fd(struct files_struct *oldf, int *errorp) */ spin_lock(&oldf->file_lock); old_fdt = files_fdtable(oldf); - open_files = count_open_files(old_fdt); + open_files = sane_fdtable_size(old_fdt, max_fds); } copy_fd_bitmaps(new_fdt, old_fdt, open_files); @@ -613,6 +626,10 @@ void __fd_install(struct files_struct *files, unsigned int fd, rcu_read_unlock_sched(); } +/* + * This consumes the "file" refcount, so callers should treat it + * as if they had called fput(file). + */ void fd_install(unsigned int fd, struct file *file) { __fd_install(current->files, fd, file); @@ -620,12 +637,9 @@ void fd_install(unsigned int fd, struct file *file) EXPORT_SYMBOL(fd_install); -/* - * The same warnings as for __alloc_fd()/__fd_install() apply here... - */ -int __close_fd(struct files_struct *files, unsigned fd) +static struct file *pick_file(struct files_struct *files, unsigned fd) { - struct file *file; + struct file *file = NULL; struct fdtable *fdt; spin_lock(&files->file_lock); @@ -637,15 +651,105 @@ int __close_fd(struct files_struct *files, unsigned fd) goto out_unlock; rcu_assign_pointer(fdt->fd[fd], NULL); __put_unused_fd(files, fd); - spin_unlock(&files->file_lock); - return filp_close(file, files); out_unlock: spin_unlock(&files->file_lock); - return -EBADF; + return file; +} + +/* + * The same warnings as for __alloc_fd()/__fd_install() apply here... + */ +int __close_fd(struct files_struct *files, unsigned fd) +{ + struct file *file; + + file = pick_file(files, fd); + if (!file) + return -EBADF; + + return filp_close(file, files); } EXPORT_SYMBOL(__close_fd); /* for ksys_close() */ +/** + * __close_range() - Close all file descriptors in a given range. + * + * @fd: starting file descriptor to close + * @max_fd: last file descriptor to close + * + * This closes a range of file descriptors. All file descriptors + * from @fd up to and including @max_fd are closed. + */ +int __close_range(unsigned fd, unsigned max_fd, unsigned int flags) +{ + unsigned int cur_max; + struct task_struct *me = current; + struct files_struct *cur_fds = me->files, *fds = NULL; + + if (flags & ~CLOSE_RANGE_UNSHARE) + return -EINVAL; + + if (fd > max_fd) + return -EINVAL; + + rcu_read_lock(); + cur_max = files_fdtable(cur_fds)->max_fds; + rcu_read_unlock(); + + /* cap to last valid index into fdtable */ + cur_max--; + + if (flags & CLOSE_RANGE_UNSHARE) { + int ret; + unsigned int max_unshare_fds = NR_OPEN_MAX; + + /* + * If the requested range is greater than the current maximum, + * we're closing everything so only copy all file descriptors + * beneath the lowest file descriptor. + */ + if (max_fd >= cur_max) + max_unshare_fds = fd; + + ret = unshare_fd(CLONE_FILES, max_unshare_fds, &fds); + if (ret) + return ret; + + /* + * We used to share our file descriptor table, and have now + * created a private one, make sure we're using it below. + */ + if (fds) + swap(cur_fds, fds); + } + + max_fd = min(max_fd, cur_max); + while (fd <= max_fd) { + struct file *file; + + file = pick_file(cur_fds, fd++); + if (!file) + continue; + + filp_close(file, cur_fds); + cond_resched(); + } + + if (fds) { + /* + * We're done closing the files we were supposed to. Time to install + * the new file descriptor table and drop the old one. + */ + task_lock(me); + me->files = cur_fds; + task_unlock(me); + put_files_struct(fds); + } + + return 0; +} + /* * variant of __close_fd that gets a ref on the file for later fput. * The caller must ensure that filp_close() called on the file, and then @@ -931,6 +1035,62 @@ out_unlock: return err; } +/** + * __receive_fd() - Install received file into file descriptor table + * + * @fd: fd to install into (if negative, a new fd will be allocated) + * @file: struct file that was received from another process + * @ufd: __user pointer to write new fd number to + * @o_flags: the O_* flags to apply to the new fd entry + * + * Installs a received file into the file descriptor table, with appropriate + * checks and count updates. Optionally writes the fd number to userspace, if + * @ufd is non-NULL. + * + * This helper handles its own reference counting of the incoming + * struct file. + * + * Returns newly install fd or -ve on error. + */ +int __receive_fd(int fd, struct file *file, int __user *ufd, unsigned int o_flags) +{ + int new_fd; + int error; + + error = security_file_receive(file); + if (error) + return error; + + if (fd < 0) { + new_fd = get_unused_fd_flags(o_flags); + if (new_fd < 0) + return new_fd; + } else { + new_fd = fd; + } + + if (ufd) { + error = put_user(new_fd, ufd); + if (error) { + if (fd < 0) + put_unused_fd(new_fd); + return error; + } + } + + if (fd < 0) { + fd_install(new_fd, get_file(file)); + } else { + error = replace_fd(new_fd, file, o_flags); + if (error) + return error; + } + + /* Bump the sock usage counts, if any. */ + __receive_sock(file); + return new_fd; +} + static int ksys_dup3(unsigned int oldfd, unsigned int newfd, int flags) { int err = -EBADF; @@ -985,7 +1145,7 @@ SYSCALL_DEFINE2(dup2, unsigned int, oldfd, unsigned int, newfd) return ksys_dup3(oldfd, newfd, 0); } -int ksys_dup(unsigned int fildes) +SYSCALL_DEFINE1(dup, unsigned int, fildes) { int ret = -EBADF; struct file *file = fget_raw(fildes); @@ -1000,11 +1160,6 @@ int ksys_dup(unsigned int fildes) return ret; } -SYSCALL_DEFINE1(dup, unsigned int, fildes) -{ - return ksys_dup(fildes); -} - int f_dupfd(unsigned int from, struct file *file, unsigned flags) { int err; diff --git a/fs/fs-writeback.c b/fs/fs-writeback.c index a605c3dddabc..149227160ff0 100644 --- a/fs/fs-writeback.c +++ b/fs/fs-writeback.c @@ -42,7 +42,6 @@ struct wb_writeback_work { long nr_pages; struct super_block *sb; - unsigned long *older_than_this; enum writeback_sync_modes sync_mode; unsigned int tagged_writepages:1; unsigned int for_kupdate:1; @@ -144,7 +143,9 @@ static void inode_io_list_del_locked(struct inode *inode, struct bdi_writeback *wb) { assert_spin_locked(&wb->list_lock); + assert_spin_locked(&inode->i_lock); + inode->i_state &= ~I_SYNC_QUEUED; list_del_init(&inode->i_io_list); wb_io_lists_depopulated(wb); } @@ -1122,7 +1123,9 @@ void inode_io_list_del(struct inode *inode) struct bdi_writeback *wb; wb = inode_to_wb_and_lock_list(inode); + spin_lock(&inode->i_lock); inode_io_list_del_locked(inode, wb); + spin_unlock(&inode->i_lock); spin_unlock(&wb->list_lock); } EXPORT_SYMBOL(inode_io_list_del); @@ -1172,8 +1175,10 @@ void sb_clear_inode_writeback(struct inode *inode) * the case then the inode must have been redirtied while it was being written * out and we don't reset its dirtied_when. */ -static void redirty_tail(struct inode *inode, struct bdi_writeback *wb) +static void redirty_tail_locked(struct inode *inode, struct bdi_writeback *wb) { + assert_spin_locked(&inode->i_lock); + if (!list_empty(&wb->b_dirty)) { struct inode *tail; @@ -1182,6 +1187,14 @@ static void redirty_tail(struct inode *inode, struct bdi_writeback *wb) inode->dirtied_when = jiffies; } inode_io_list_move_locked(inode, wb, &wb->b_dirty); + inode->i_state &= ~I_SYNC_QUEUED; +} + +static void redirty_tail(struct inode *inode, struct bdi_writeback *wb) +{ + spin_lock(&inode->i_lock); + redirty_tail_locked(inode, wb); + spin_unlock(&inode->i_lock); } /* @@ -1220,16 +1233,13 @@ static bool inode_dirtied_after(struct inode *inode, unsigned long t) #define EXPIRE_DIRTY_ATIME 0x0001 /* - * Move expired (dirtied before work->older_than_this) dirty inodes from + * Move expired (dirtied before dirtied_before) dirty inodes from * @delaying_queue to @dispatch_queue. */ static int move_expired_inodes(struct list_head *delaying_queue, struct list_head *dispatch_queue, - int flags, - struct wb_writeback_work *work) + unsigned long dirtied_before) { - unsigned long *older_than_this = NULL; - unsigned long expire_time; LIST_HEAD(tmp); struct list_head *pos, *node; struct super_block *sb = NULL; @@ -1237,21 +1247,15 @@ static int move_expired_inodes(struct list_head *delaying_queue, int do_sb_sort = 0; int moved = 0; - if ((flags & EXPIRE_DIRTY_ATIME) == 0) - older_than_this = work->older_than_this; - else if (!work->for_sync) { - expire_time = jiffies - (dirtytime_expire_interval * HZ); - older_than_this = &expire_time; - } while (!list_empty(delaying_queue)) { inode = wb_inode(delaying_queue->prev); - if (older_than_this && - inode_dirtied_after(inode, *older_than_this)) + if (inode_dirtied_after(inode, dirtied_before)) break; list_move(&inode->i_io_list, &tmp); moved++; - if (flags & EXPIRE_DIRTY_ATIME) - set_bit(__I_DIRTY_TIME_EXPIRED, &inode->i_state); + spin_lock(&inode->i_lock); + inode->i_state |= I_SYNC_QUEUED; + spin_unlock(&inode->i_lock); if (sb_is_blkdev_sb(inode->i_sb)) continue; if (sb && sb != inode->i_sb) @@ -1289,18 +1293,22 @@ out: * | * +--> dequeue for IO */ -static void queue_io(struct bdi_writeback *wb, struct wb_writeback_work *work) +static void queue_io(struct bdi_writeback *wb, struct wb_writeback_work *work, + unsigned long dirtied_before) { int moved; + unsigned long time_expire_jif = dirtied_before; assert_spin_locked(&wb->list_lock); list_splice_init(&wb->b_more_io, &wb->b_io); - moved = move_expired_inodes(&wb->b_dirty, &wb->b_io, 0, work); + moved = move_expired_inodes(&wb->b_dirty, &wb->b_io, dirtied_before); + if (!work->for_sync) + time_expire_jif = jiffies - dirtytime_expire_interval * HZ; moved += move_expired_inodes(&wb->b_dirty_time, &wb->b_io, - EXPIRE_DIRTY_ATIME, work); + time_expire_jif); if (moved) wb_io_lists_populated(wb); - trace_writeback_queue_io(wb, work, moved); + trace_writeback_queue_io(wb, work, dirtied_before, moved); } static int write_inode(struct inode *inode, struct writeback_control *wbc) @@ -1394,7 +1402,7 @@ static void requeue_inode(struct inode *inode, struct bdi_writeback *wb, * writeback is not making progress due to locked * buffers. Skip this inode for now. */ - redirty_tail(inode, wb); + redirty_tail_locked(inode, wb); return; } @@ -1414,7 +1422,7 @@ static void requeue_inode(struct inode *inode, struct bdi_writeback *wb, * retrying writeback of the dirty page/inode * that cannot be performed immediately. */ - redirty_tail(inode, wb); + redirty_tail_locked(inode, wb); } } else if (inode->i_state & I_DIRTY) { /* @@ -1422,10 +1430,11 @@ static void requeue_inode(struct inode *inode, struct bdi_writeback *wb, * such as delayed allocation during submission or metadata * updates after data IO completion. */ - redirty_tail(inode, wb); + redirty_tail_locked(inode, wb); } else if (inode->i_state & I_DIRTY_TIME) { inode->dirtied_when = jiffies; inode_io_list_move_locked(inode, wb, &wb->b_dirty_time); + inode->i_state &= ~I_SYNC_QUEUED; } else { /* The inode is clean. Remove from writeback lists. */ inode_io_list_del_locked(inode, wb); @@ -1472,18 +1481,14 @@ __writeback_single_inode(struct inode *inode, struct writeback_control *wbc) spin_lock(&inode->i_lock); dirty = inode->i_state & I_DIRTY; - if (inode->i_state & I_DIRTY_TIME) { - if ((dirty & I_DIRTY_INODE) || - wbc->sync_mode == WB_SYNC_ALL || - unlikely(inode->i_state & I_DIRTY_TIME_EXPIRED) || - unlikely(time_after(jiffies, - (inode->dirtied_time_when + - dirtytime_expire_interval * HZ)))) { - dirty |= I_DIRTY_TIME | I_DIRTY_TIME_EXPIRED; - trace_writeback_lazytime(inode); - } - } else - inode->i_state &= ~I_DIRTY_TIME_EXPIRED; + if ((inode->i_state & I_DIRTY_TIME) && + ((dirty & I_DIRTY_INODE) || + wbc->sync_mode == WB_SYNC_ALL || wbc->for_sync || + time_after(jiffies, inode->dirtied_time_when + + dirtytime_expire_interval * HZ))) { + dirty |= I_DIRTY_TIME; + trace_writeback_lazytime(inode); + } inode->i_state &= ~dirty; /* @@ -1669,8 +1674,8 @@ static long writeback_sb_inodes(struct super_block *sb, */ spin_lock(&inode->i_lock); if (inode->i_state & (I_NEW | I_FREEING | I_WILL_FREE)) { + redirty_tail_locked(inode, wb); spin_unlock(&inode->i_lock); - redirty_tail(inode, wb); continue; } if ((inode->i_state & I_SYNC) && wbc.sync_mode != WB_SYNC_ALL) { @@ -1811,7 +1816,7 @@ static long writeback_inodes_wb(struct bdi_writeback *wb, long nr_pages, blk_start_plug(&plug); spin_lock(&wb->list_lock); if (list_empty(&wb->b_io)) - queue_io(wb, &work); + queue_io(wb, &work, jiffies); __writeback_inodes_wb(wb, &work); spin_unlock(&wb->list_lock); blk_finish_plug(&plug); @@ -1831,7 +1836,7 @@ static long writeback_inodes_wb(struct bdi_writeback *wb, long nr_pages, * takes longer than a dirty_writeback_interval interval, then leave a * one-second gap. * - * older_than_this takes precedence over nr_to_write. So we'll only write back + * dirtied_before takes precedence over nr_to_write. So we'll only write back * all dirty pages if they are all attached to "old" mappings. */ static long wb_writeback(struct bdi_writeback *wb, @@ -1839,14 +1844,11 @@ static long wb_writeback(struct bdi_writeback *wb, { unsigned long wb_start = jiffies; long nr_pages = work->nr_pages; - unsigned long oldest_jif; + unsigned long dirtied_before = jiffies; struct inode *inode; long progress; struct blk_plug plug; - oldest_jif = jiffies; - work->older_than_this = &oldest_jif; - blk_start_plug(&plug); spin_lock(&wb->list_lock); for (;;) { @@ -1880,14 +1882,14 @@ static long wb_writeback(struct bdi_writeback *wb, * safe. */ if (work->for_kupdate) { - oldest_jif = jiffies - + dirtied_before = jiffies - msecs_to_jiffies(dirty_expire_interval * 10); } else if (work->for_background) - oldest_jif = jiffies; + dirtied_before = jiffies; trace_writeback_start(wb, work); if (list_empty(&wb->b_io)) - queue_io(wb, work); + queue_io(wb, work, dirtied_before); if (work->sb) progress = writeback_sb_inodes(work->sb, wb, work); else @@ -2289,11 +2291,12 @@ void __mark_inode_dirty(struct inode *inode, int flags) inode->i_state |= flags; /* - * If the inode is being synced, just update its dirty state. - * The unlocker will place the inode on the appropriate - * superblock list, based upon its state. + * If the inode is queued for writeback by flush worker, just + * update its dirty state. Once the flush worker is done with + * the inode it will place it on the appropriate superblock + * list, based upon its state. */ - if (inode->i_state & I_SYNC) + if (inode->i_state & I_SYNC_QUEUED) goto out_unlock_inode; /* diff --git a/fs/fs_context.c b/fs/fs_context.c index 7d5c5dd2b1d5..2834d1afa6e8 100644 --- a/fs/fs_context.c +++ b/fs/fs_context.c @@ -521,7 +521,7 @@ static int legacy_parse_param(struct fs_context *fc, struct fs_parameter *param) switch (param->type) { case fs_value_is_string: len = 1 + param->size; - /* Fall through */ + fallthrough; case fs_value_is_flag: len += strlen(param->key); break; diff --git a/fs/fs_struct.c b/fs/fs_struct.c index ca639ed967b7..04b3f5b9c629 100644 --- a/fs/fs_struct.c +++ b/fs/fs_struct.c @@ -117,7 +117,7 @@ struct fs_struct *copy_fs_struct(struct fs_struct *old) fs->users = 1; fs->in_exec = 0; spin_lock_init(&fs->lock); - seqcount_init(&fs->seq); + seqcount_spinlock_init(&fs->seq, &fs->lock); fs->umask = old->umask; spin_lock(&old->lock); @@ -163,6 +163,6 @@ EXPORT_SYMBOL(current_umask); struct fs_struct init_fs = { .users = 1, .lock = __SPIN_LOCK_UNLOCKED(init_fs.lock), - .seq = SEQCNT_ZERO(init_fs.seq), + .seq = SEQCNT_SPINLOCK_ZERO(init_fs.seq, &init_fs.lock), .umask = 0022, }; diff --git a/fs/fsopen.c b/fs/fsopen.c index 2fa3f241b762..27a890aa493a 100644 --- a/fs/fsopen.c +++ b/fs/fsopen.c @@ -412,7 +412,7 @@ SYSCALL_DEFINE5(fsconfig, break; case FSCONFIG_SET_PATH_EMPTY: lookup_flags = LOOKUP_EMPTY; - /* fallthru */ + fallthrough; case FSCONFIG_SET_PATH: param.type = fs_value_is_filename; param.name = getname_flags(_value, lookup_flags, NULL); diff --git a/fs/fuse/control.c b/fs/fuse/control.c index c23f6f243ad4..a1303ad303ba 100644 --- a/fs/fuse/control.c +++ b/fs/fuse/control.c @@ -120,7 +120,7 @@ static ssize_t fuse_conn_max_background_write(struct file *file, const char __user *buf, size_t count, loff_t *ppos) { - unsigned uninitialized_var(val); + unsigned val; ssize_t ret; ret = fuse_conn_limit_write(file, buf, count, ppos, &val, @@ -162,7 +162,7 @@ static ssize_t fuse_conn_congestion_threshold_write(struct file *file, const char __user *buf, size_t count, loff_t *ppos) { - unsigned uninitialized_var(val); + unsigned val; struct fuse_conn *fc; ssize_t ret; diff --git a/fs/fuse/cuse.c b/fs/fuse/cuse.c index 030f094910c3..2cc17816d7b1 100644 --- a/fs/fuse/cuse.c +++ b/fs/fuse/cuse.c @@ -270,7 +270,7 @@ static int cuse_parse_one(char **pp, char *end, char **keyp, char **valp) static int cuse_parse_devinfo(char *p, size_t len, struct cuse_devinfo *devinfo) { char *end = p + len; - char *uninitialized_var(key), *uninitialized_var(val); + char *key, *val; int rc; while (true) { diff --git a/fs/fuse/file.c b/fs/fuse/file.c index 83d917f7e542..6611ef3269a8 100644 --- a/fs/fuse/file.c +++ b/fs/fuse/file.c @@ -2987,7 +2987,7 @@ static void fuse_register_polled_file(struct fuse_conn *fc, { spin_lock(&fc->lock); if (RB_EMPTY_NODE(&ff->polled_node)) { - struct rb_node **link, *uninitialized_var(parent); + struct rb_node **link, *parent; link = fuse_find_polled_node(fc, ff->kh, &parent); BUG_ON(*link); diff --git a/fs/fuse/virtio_fs.c b/fs/fuse/virtio_fs.c index 4c4ef5d69298..104f35de5270 100644 --- a/fs/fuse/virtio_fs.c +++ b/fs/fuse/virtio_fs.c @@ -606,8 +606,8 @@ static int virtio_fs_setup_vqs(struct virtio_device *vdev, unsigned int i; int ret = 0; - virtio_cread(vdev, struct virtio_fs_config, num_request_queues, - &fs->num_request_queues); + virtio_cread_le(vdev, struct virtio_fs_config, num_request_queues, + &fs->num_request_queues); if (fs->num_request_queues == 0) return -EINVAL; diff --git a/fs/gfs2/aops.c b/fs/gfs2/aops.c index 68cd700a2719..d4af283fc888 100644 --- a/fs/gfs2/aops.c +++ b/fs/gfs2/aops.c @@ -335,7 +335,7 @@ static int gfs2_write_cache_jdata(struct address_space *mapping, int done = 0; struct pagevec pvec; int nr_pages; - pgoff_t uninitialized_var(writeback_index); + pgoff_t writeback_index; pgoff_t index; pgoff_t end; pgoff_t done_index; diff --git a/fs/gfs2/bmap.c b/fs/gfs2/bmap.c index 6306eaae378b..0f69fbd4af66 100644 --- a/fs/gfs2/bmap.c +++ b/fs/gfs2/bmap.c @@ -746,7 +746,7 @@ static int gfs2_iomap_alloc(struct inode *inode, struct iomap *iomap, } if (n == 0) break; - /* fall through - To branching from existing tree */ + fallthrough; /* To branching from existing tree */ case ALLOC_GROW_DEPTH: if (i > 1 && i < mp->mp_fheight) gfs2_trans_add_meta(ip->i_gl, mp->mp_bh[i-1]); @@ -757,7 +757,7 @@ static int gfs2_iomap_alloc(struct inode *inode, struct iomap *iomap, state = ALLOC_DATA; if (n == 0) break; - /* fall through - To tree complete, adding data blocks */ + fallthrough; /* To tree complete, adding data blocks */ case ALLOC_DATA: BUG_ON(n > dblks); BUG_ON(mp->mp_bh[end_of_metadata] == NULL); @@ -1351,9 +1351,15 @@ int gfs2_extent_map(struct inode *inode, u64 lblock, int *new, u64 *dblock, unsi return ret; } +/* + * NOTE: Never call gfs2_block_zero_range with an open transaction because it + * uses iomap write to perform its actions, which begin their own transactions + * (iomap_begin, page_prepare, etc.) + */ static int gfs2_block_zero_range(struct inode *inode, loff_t from, unsigned int length) { + BUG_ON(current->journal_info); return iomap_zero_range(inode, from, length, NULL, &gfs2_iomap_ops); } @@ -1414,6 +1420,16 @@ static int trunc_start(struct inode *inode, u64 newsize) u64 oldsize = inode->i_size; int error; + if (!gfs2_is_stuffed(ip)) { + unsigned int blocksize = i_blocksize(inode); + unsigned int offs = newsize & (blocksize - 1); + if (offs) { + error = gfs2_block_zero_range(inode, newsize, + blocksize - offs); + if (error) + return error; + } + } if (journaled) error = gfs2_trans_begin(sdp, RES_DINODE + RES_JDATA, GFS2_JTRUNC_REVOKES); else @@ -1427,19 +1443,10 @@ static int trunc_start(struct inode *inode, u64 newsize) gfs2_trans_add_meta(ip->i_gl, dibh); - if (gfs2_is_stuffed(ip)) { + if (gfs2_is_stuffed(ip)) gfs2_buffer_clear_tail(dibh, sizeof(struct gfs2_dinode) + newsize); - } else { - unsigned int blocksize = i_blocksize(inode); - unsigned int offs = newsize & (blocksize - 1); - if (offs) { - error = gfs2_block_zero_range(inode, newsize, - blocksize - offs); - if (error) - goto out; - } + else ip->i_diskflags |= GFS2_DIF_TRUNC_IN_PROG; - } i_size_write(inode, newsize); ip->i_inode.i_mtime = ip->i_inode.i_ctime = current_time(&ip->i_inode); @@ -1761,7 +1768,7 @@ static int punch_hole(struct gfs2_inode *ip, u64 offset, u64 length) u64 lblock = (offset + (1 << bsize_shift) - 1) >> bsize_shift; __u16 start_list[GFS2_MAX_META_HEIGHT]; __u16 __end_list[GFS2_MAX_META_HEIGHT], *end_list = NULL; - unsigned int start_aligned, uninitialized_var(end_aligned); + unsigned int start_aligned, end_aligned; unsigned int strip_h = ip->i_height - 1; u32 btotal = 0; int ret, state; @@ -2448,25 +2455,7 @@ int __gfs2_punch_hole(struct file *file, loff_t offset, loff_t length) loff_t start, end; int error; - start = round_down(offset, blocksize); - end = round_up(offset + length, blocksize) - 1; - error = filemap_write_and_wait_range(inode->i_mapping, start, end); - if (error) - return error; - - if (gfs2_is_jdata(ip)) - error = gfs2_trans_begin(sdp, RES_DINODE + 2 * RES_JDATA, - GFS2_JTRUNC_REVOKES); - else - error = gfs2_trans_begin(sdp, RES_DINODE, 0); - if (error) - return error; - - if (gfs2_is_stuffed(ip)) { - error = stuffed_zero_range(inode, offset, length); - if (error) - goto out; - } else { + if (!gfs2_is_stuffed(ip)) { unsigned int start_off, end_len; start_off = offset & (blocksize - 1); @@ -2489,6 +2478,26 @@ int __gfs2_punch_hole(struct file *file, loff_t offset, loff_t length) } } + start = round_down(offset, blocksize); + end = round_up(offset + length, blocksize) - 1; + error = filemap_write_and_wait_range(inode->i_mapping, start, end); + if (error) + return error; + + if (gfs2_is_jdata(ip)) + error = gfs2_trans_begin(sdp, RES_DINODE + 2 * RES_JDATA, + GFS2_JTRUNC_REVOKES); + else + error = gfs2_trans_begin(sdp, RES_DINODE, 0); + if (error) + return error; + + if (gfs2_is_stuffed(ip)) { + error = stuffed_zero_range(inode, offset, length); + if (error) + goto out; + } + if (gfs2_is_jdata(ip)) { BUG_ON(!current->journal_info); gfs2_journaled_truncate_range(inode, offset, length); diff --git a/fs/gfs2/file.c b/fs/gfs2/file.c index bebde537ac8c..b39b339feddc 100644 --- a/fs/gfs2/file.c +++ b/fs/gfs2/file.c @@ -781,39 +781,39 @@ static int gfs2_fsync(struct file *file, loff_t start, loff_t end, return ret ? ret : ret1; } -static ssize_t gfs2_file_direct_read(struct kiocb *iocb, struct iov_iter *to) +static ssize_t gfs2_file_direct_read(struct kiocb *iocb, struct iov_iter *to, + struct gfs2_holder *gh) { struct file *file = iocb->ki_filp; struct gfs2_inode *ip = GFS2_I(file->f_mapping->host); size_t count = iov_iter_count(to); - struct gfs2_holder gh; ssize_t ret; if (!count) return 0; /* skip atime */ - gfs2_holder_init(ip->i_gl, LM_ST_DEFERRED, 0, &gh); - ret = gfs2_glock_nq(&gh); + gfs2_holder_init(ip->i_gl, LM_ST_DEFERRED, 0, gh); + ret = gfs2_glock_nq(gh); if (ret) goto out_uninit; ret = iomap_dio_rw(iocb, to, &gfs2_iomap_ops, NULL, is_sync_kiocb(iocb)); - gfs2_glock_dq(&gh); + gfs2_glock_dq(gh); out_uninit: - gfs2_holder_uninit(&gh); + gfs2_holder_uninit(gh); return ret; } -static ssize_t gfs2_file_direct_write(struct kiocb *iocb, struct iov_iter *from) +static ssize_t gfs2_file_direct_write(struct kiocb *iocb, struct iov_iter *from, + struct gfs2_holder *gh) { struct file *file = iocb->ki_filp; struct inode *inode = file->f_mapping->host; struct gfs2_inode *ip = GFS2_I(inode); size_t len = iov_iter_count(from); loff_t offset = iocb->ki_pos; - struct gfs2_holder gh; ssize_t ret; /* @@ -824,8 +824,8 @@ static ssize_t gfs2_file_direct_write(struct kiocb *iocb, struct iov_iter *from) * unfortunately, have the option of only flushing a range like the * VFS does. */ - gfs2_holder_init(ip->i_gl, LM_ST_DEFERRED, 0, &gh); - ret = gfs2_glock_nq(&gh); + gfs2_holder_init(ip->i_gl, LM_ST_DEFERRED, 0, gh); + ret = gfs2_glock_nq(gh); if (ret) goto out_uninit; @@ -835,11 +835,12 @@ static ssize_t gfs2_file_direct_write(struct kiocb *iocb, struct iov_iter *from) ret = iomap_dio_rw(iocb, from, &gfs2_iomap_ops, NULL, is_sync_kiocb(iocb)); - + if (ret == -ENOTBLK) + ret = 0; out: - gfs2_glock_dq(&gh); + gfs2_glock_dq(gh); out_uninit: - gfs2_holder_uninit(&gh); + gfs2_holder_uninit(gh); return ret; } @@ -851,7 +852,7 @@ static ssize_t gfs2_file_read_iter(struct kiocb *iocb, struct iov_iter *to) ssize_t ret; if (iocb->ki_flags & IOCB_DIRECT) { - ret = gfs2_file_direct_read(iocb, to); + ret = gfs2_file_direct_read(iocb, to, &gh); if (likely(ret != -ENOTBLK)) return ret; iocb->ki_flags &= ~IOCB_DIRECT; @@ -900,13 +901,12 @@ static ssize_t gfs2_file_write_iter(struct kiocb *iocb, struct iov_iter *from) struct file *file = iocb->ki_filp; struct inode *inode = file_inode(file); struct gfs2_inode *ip = GFS2_I(inode); + struct gfs2_holder gh; ssize_t ret; gfs2_size_hint(file, iocb->ki_pos, iov_iter_count(from)); if (iocb->ki_flags & IOCB_APPEND) { - struct gfs2_holder gh; - ret = gfs2_glock_nq_init(ip->i_gl, LM_ST_SHARED, 0, &gh); if (ret) return ret; @@ -930,7 +930,7 @@ static ssize_t gfs2_file_write_iter(struct kiocb *iocb, struct iov_iter *from) struct address_space *mapping = file->f_mapping; ssize_t buffered, ret2; - ret = gfs2_file_direct_write(iocb, from); + ret = gfs2_file_direct_write(iocb, from, &gh); if (ret < 0 || !iov_iter_count(from)) goto out_unlock; diff --git a/fs/gfs2/glock.c b/fs/gfs2/glock.c index 8545024a1401..f13b136654ca 100644 --- a/fs/gfs2/glock.c +++ b/fs/gfs2/glock.c @@ -790,9 +790,11 @@ static void gfs2_glock_poke(struct gfs2_glock *gl) struct gfs2_holder gh; int error; - error = gfs2_glock_nq_init(gl, LM_ST_SHARED, flags, &gh); + gfs2_holder_init(gl, LM_ST_SHARED, flags, &gh); + error = gfs2_glock_nq(&gh); if (!error) gfs2_glock_dq(&gh); + gfs2_holder_uninit(&gh); } static bool gfs2_try_evict(struct gfs2_glock *gl) @@ -2106,6 +2108,12 @@ static const char *gflags2str(char *buf, const struct gfs2_glock *gl) *p++ = 'o'; if (test_bit(GLF_BLOCKING, gflags)) *p++ = 'b'; + if (test_bit(GLF_INODE_CREATING, gflags)) + *p++ = 'c'; + if (test_bit(GLF_PENDING_DELETE, gflags)) + *p++ = 'P'; + if (test_bit(GLF_FREEING, gflags)) + *p++ = 'x'; *p = 0; return buf; } diff --git a/fs/gfs2/log.c b/fs/gfs2/log.c index a76e55bc28eb..3763c9ff1406 100644 --- a/fs/gfs2/log.c +++ b/fs/gfs2/log.c @@ -902,6 +902,36 @@ static void empty_ail1_list(struct gfs2_sbd *sdp) } /** + * drain_bd - drain the buf and databuf queue for a failed transaction + * @tr: the transaction to drain + * + * When this is called, we're taking an error exit for a log write that failed + * but since we bypassed the after_commit functions, we need to remove the + * items from the buf and databuf queue. + */ +static void trans_drain(struct gfs2_trans *tr) +{ + struct gfs2_bufdata *bd; + struct list_head *head; + + if (!tr) + return; + + head = &tr->tr_buf; + while (!list_empty(head)) { + bd = list_first_entry(head, struct gfs2_bufdata, bd_list); + list_del_init(&bd->bd_list); + kmem_cache_free(gfs2_bufdata_cachep, bd); + } + head = &tr->tr_databuf; + while (!list_empty(head)) { + bd = list_first_entry(head, struct gfs2_bufdata, bd_list); + list_del_init(&bd->bd_list); + kmem_cache_free(gfs2_bufdata_cachep, bd); + } +} + +/** * gfs2_log_flush - flush incore transaction(s) * @sdp: the filesystem * @gl: The glock structure to flush. If NULL, flush the whole incore log @@ -1005,6 +1035,7 @@ void gfs2_log_flush(struct gfs2_sbd *sdp, struct gfs2_glock *gl, u32 flags) out: if (gfs2_withdrawn(sdp)) { + trans_drain(tr); /** * If the tr_list is empty, we're withdrawing during a log * flush that targets a transaction, but the transaction was @@ -1092,7 +1123,7 @@ static void log_refund(struct gfs2_sbd *sdp, struct gfs2_trans *tr) * or the total number of used blocks (pinned blocks plus AIL blocks) * is greater than thresh2. * - * At mount time thresh1 is 1/3rd of journal size, thresh2 is 2/3rd of + * At mount time thresh1 is 2/5ths of journal size, thresh2 is 4/5ths of * journal size. * * Returns: errno diff --git a/fs/gfs2/lops.c b/fs/gfs2/lops.c index cb2a11b458c6..ed1da4323967 100644 --- a/fs/gfs2/lops.c +++ b/fs/gfs2/lops.c @@ -419,7 +419,7 @@ static bool gfs2_jhead_pg_srch(struct gfs2_jdesc *jd, struct page *page) { struct gfs2_sbd *sdp = GFS2_SB(jd->jd_inode); - struct gfs2_log_header_host uninitialized_var(lh); + struct gfs2_log_header_host lh; void *kaddr = kmap_atomic(page); unsigned int offset; bool ret = false; diff --git a/fs/gfs2/quota.c b/fs/gfs2/quota.c index 4b67d47a7e00..6e173ae378c4 100644 --- a/fs/gfs2/quota.c +++ b/fs/gfs2/quota.c @@ -1599,7 +1599,7 @@ static int gfs2_quota_get_state(struct super_block *sb, struct qc_state *state) case GFS2_QUOTA_ON: state->s_state[USRQUOTA].flags |= QCI_LIMITS_ENFORCED; state->s_state[GRPQUOTA].flags |= QCI_LIMITS_ENFORCED; - /*FALLTHRU*/ + fallthrough; case GFS2_QUOTA_ACCOUNT: state->s_state[USRQUOTA].flags |= QCI_ACCT_ENABLED | QCI_SYSFILE; diff --git a/fs/gfs2/super.c b/fs/gfs2/super.c index 47d0ae158b69..9f4d9e7be839 100644 --- a/fs/gfs2/super.c +++ b/fs/gfs2/super.c @@ -566,6 +566,7 @@ static void gfs2_dirty_inode(struct inode *inode, int flags) ret = gfs2_glock_nq_init(ip->i_gl, LM_ST_EXCLUSIVE, 0, &gh); if (ret) { fs_err(sdp, "dirty_inode: glock %d\n", ret); + gfs2_dump_glock(NULL, ip->i_gl, true); return; } need_unlock = 1; diff --git a/fs/gfs2/trans.c b/fs/gfs2/trans.c index a3dfa3aa87ad..6d4bf7ea7b3b 100644 --- a/fs/gfs2/trans.c +++ b/fs/gfs2/trans.c @@ -25,13 +25,28 @@ #include "util.h" #include "trace_gfs2.h" +static void gfs2_print_trans(struct gfs2_sbd *sdp, const struct gfs2_trans *tr) +{ + fs_warn(sdp, "Transaction created at: %pSR\n", (void *)tr->tr_ip); + fs_warn(sdp, "blocks=%u revokes=%u reserved=%u touched=%u\n", + tr->tr_blocks, tr->tr_revokes, tr->tr_reserved, + test_bit(TR_TOUCHED, &tr->tr_flags)); + fs_warn(sdp, "Buf %u/%u Databuf %u/%u Revoke %u/%u\n", + tr->tr_num_buf_new, tr->tr_num_buf_rm, + tr->tr_num_databuf_new, tr->tr_num_databuf_rm, + tr->tr_num_revoke, tr->tr_num_revoke_rm); +} + int gfs2_trans_begin(struct gfs2_sbd *sdp, unsigned int blocks, unsigned int revokes) { struct gfs2_trans *tr; int error; - BUG_ON(current->journal_info); + if (current->journal_info) { + gfs2_print_trans(sdp, current->journal_info); + BUG(); + } BUG_ON(blocks == 0 && revokes == 0); if (!test_bit(SDF_JOURNAL_LIVE, &sdp->sd_flags)) @@ -52,6 +67,7 @@ int gfs2_trans_begin(struct gfs2_sbd *sdp, unsigned int blocks, tr->tr_reserved += gfs2_struct2blk(sdp, revokes); INIT_LIST_HEAD(&tr->tr_databuf); INIT_LIST_HEAD(&tr->tr_buf); + INIT_LIST_HEAD(&tr->tr_list); INIT_LIST_HEAD(&tr->tr_ail1_list); INIT_LIST_HEAD(&tr->tr_ail2_list); @@ -72,18 +88,6 @@ fail: return error; } -static void gfs2_print_trans(struct gfs2_sbd *sdp, const struct gfs2_trans *tr) -{ - fs_warn(sdp, "Transaction created at: %pSR\n", (void *)tr->tr_ip); - fs_warn(sdp, "blocks=%u revokes=%u reserved=%u touched=%u\n", - tr->tr_blocks, tr->tr_revokes, tr->tr_reserved, - test_bit(TR_TOUCHED, &tr->tr_flags)); - fs_warn(sdp, "Buf %u/%u Databuf %u/%u Revoke %u/%u\n", - tr->tr_num_buf_new, tr->tr_num_buf_rm, - tr->tr_num_databuf_new, tr->tr_num_databuf_rm, - tr->tr_num_revoke, tr->tr_num_revoke_rm); -} - void gfs2_trans_end(struct gfs2_sbd *sdp) { struct gfs2_trans *tr = current->journal_info; diff --git a/fs/hfs/inode.c b/fs/hfs/inode.c index 2f224b98ee94..f35a37c65e5f 100644 --- a/fs/hfs/inode.c +++ b/fs/hfs/inode.c @@ -17,6 +17,7 @@ #include <linux/cred.h> #include <linux/uio.h> #include <linux/xattr.h> +#include <linux/blkdev.h> #include "hfs_fs.h" #include "btree.h" diff --git a/fs/hfsplus/unicode.c b/fs/hfsplus/unicode.c index c8d1b2be7854..73342c925a4b 100644 --- a/fs/hfsplus/unicode.c +++ b/fs/hfsplus/unicode.c @@ -398,7 +398,7 @@ int hfsplus_hash_dentry(const struct dentry *dentry, struct qstr *str) astr = str->name; len = str->len; while (len > 0) { - int uninitialized_var(dsize); + int dsize; size = asc2unichar(sb, astr, len, &c); astr += size; len -= size; diff --git a/fs/hfsplus/wrapper.c b/fs/hfsplus/wrapper.c index 61eec628805d..0350dc7821bf 100644 --- a/fs/hfsplus/wrapper.c +++ b/fs/hfsplus/wrapper.c @@ -195,7 +195,7 @@ reread: switch (sbi->s_vhdr->signature) { case cpu_to_be16(HFSPLUS_VOLHEAD_SIGX): set_bit(HFSPLUS_SB_HFSX, &sbi->flags); - /*FALLTHRU*/ + fallthrough; case cpu_to_be16(HFSPLUS_VOLHEAD_SIG): break; case cpu_to_be16(HFSP_WRAP_MAGIC): diff --git a/fs/hugetlbfs/inode.c b/fs/hugetlbfs/inode.c index ef5313f9c78f..b5c109703daa 100644 --- a/fs/hugetlbfs/inode.c +++ b/fs/hugetlbfs/inode.c @@ -140,7 +140,7 @@ static int hugetlbfs_file_mmap(struct file *file, struct vm_area_struct *vma) * already been checked by prepare_hugepage_range. If you add * any error returns here, do so after setting VM_HUGETLB, so * is_vm_hugetlb_page tests below unmap_region go the right - * way when do_mmap_pgoff unwinds (may be important on powerpc + * way when do_mmap unwinds (may be important on powerpc * and ia64). */ vma->vm_flags |= VM_HUGETLB | VM_DONTEXPAND; @@ -1364,6 +1364,12 @@ hugetlbfs_fill_super(struct super_block *sb, struct fs_context *fc) sb->s_magic = HUGETLBFS_MAGIC; sb->s_op = &hugetlbfs_ops; sb->s_time_gran = 1; + + /* + * Due to the special and limited functionality of hugetlbfs, it does + * not work well as a stacking filesystem. + */ + sb->s_stack_depth = FILESYSTEM_MAX_STACK_DEPTH; sb->s_root = d_make_root(hugetlbfs_get_root(sb, ctx)); if (!sb->s_root) goto out_free; diff --git a/fs/init.c b/fs/init.c new file mode 100644 index 000000000000..e9c320a48cf1 --- /dev/null +++ b/fs/init.c @@ -0,0 +1,265 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Routines that mimic syscalls, but don't use the user address space or file + * descriptors. Only for init/ and related early init code. + */ +#include <linux/init.h> +#include <linux/mount.h> +#include <linux/namei.h> +#include <linux/fs.h> +#include <linux/fs_struct.h> +#include <linux/file.h> +#include <linux/init_syscalls.h> +#include <linux/security.h> +#include "internal.h" + +int __init init_mount(const char *dev_name, const char *dir_name, + const char *type_page, unsigned long flags, void *data_page) +{ + struct path path; + int ret; + + ret = kern_path(dir_name, LOOKUP_FOLLOW, &path); + if (ret) + return ret; + ret = path_mount(dev_name, &path, type_page, flags, data_page); + path_put(&path); + return ret; +} + +int __init init_umount(const char *name, int flags) +{ + int lookup_flags = LOOKUP_MOUNTPOINT; + struct path path; + int ret; + + if (!(flags & UMOUNT_NOFOLLOW)) + lookup_flags |= LOOKUP_FOLLOW; + ret = kern_path(name, lookup_flags, &path); + if (ret) + return ret; + return path_umount(&path, flags); +} + +int __init init_chdir(const char *filename) +{ + struct path path; + int error; + + error = kern_path(filename, LOOKUP_FOLLOW | LOOKUP_DIRECTORY, &path); + if (error) + return error; + error = inode_permission(path.dentry->d_inode, MAY_EXEC | MAY_CHDIR); + if (!error) + set_fs_pwd(current->fs, &path); + path_put(&path); + return error; +} + +int __init init_chroot(const char *filename) +{ + struct path path; + int error; + + error = kern_path(filename, LOOKUP_FOLLOW | LOOKUP_DIRECTORY, &path); + if (error) + return error; + error = inode_permission(path.dentry->d_inode, MAY_EXEC | MAY_CHDIR); + if (error) + goto dput_and_out; + error = -EPERM; + if (!ns_capable(current_user_ns(), CAP_SYS_CHROOT)) + goto dput_and_out; + error = security_path_chroot(&path); + if (error) + goto dput_and_out; + set_fs_root(current->fs, &path); +dput_and_out: + path_put(&path); + return error; +} + +int __init init_chown(const char *filename, uid_t user, gid_t group, int flags) +{ + int lookup_flags = (flags & AT_SYMLINK_NOFOLLOW) ? 0 : LOOKUP_FOLLOW; + struct path path; + int error; + + error = kern_path(filename, lookup_flags, &path); + if (error) + return error; + error = mnt_want_write(path.mnt); + if (!error) { + error = chown_common(&path, user, group); + mnt_drop_write(path.mnt); + } + path_put(&path); + return error; +} + +int __init init_chmod(const char *filename, umode_t mode) +{ + struct path path; + int error; + + error = kern_path(filename, LOOKUP_FOLLOW, &path); + if (error) + return error; + error = chmod_common(&path, mode); + path_put(&path); + return error; +} + +int __init init_eaccess(const char *filename) +{ + struct path path; + int error; + + error = kern_path(filename, LOOKUP_FOLLOW, &path); + if (error) + return error; + error = inode_permission(d_inode(path.dentry), MAY_ACCESS); + path_put(&path); + return error; +} + +int __init init_stat(const char *filename, struct kstat *stat, int flags) +{ + int lookup_flags = (flags & AT_SYMLINK_NOFOLLOW) ? 0 : LOOKUP_FOLLOW; + struct path path; + int error; + + error = kern_path(filename, lookup_flags, &path); + if (error) + return error; + error = vfs_getattr(&path, stat, STATX_BASIC_STATS, + flags | AT_NO_AUTOMOUNT); + path_put(&path); + return error; +} + +int __init init_mknod(const char *filename, umode_t mode, unsigned int dev) +{ + struct dentry *dentry; + struct path path; + int error; + + if (S_ISFIFO(mode) || S_ISSOCK(mode)) + dev = 0; + else if (!(S_ISBLK(mode) || S_ISCHR(mode))) + return -EINVAL; + + dentry = kern_path_create(AT_FDCWD, filename, &path, 0); + if (IS_ERR(dentry)) + return PTR_ERR(dentry); + + if (!IS_POSIXACL(path.dentry->d_inode)) + mode &= ~current_umask(); + error = security_path_mknod(&path, dentry, mode, dev); + if (!error) + error = vfs_mknod(path.dentry->d_inode, dentry, mode, + new_decode_dev(dev)); + done_path_create(&path, dentry); + return error; +} + +int __init init_link(const char *oldname, const char *newname) +{ + struct dentry *new_dentry; + struct path old_path, new_path; + int error; + + error = kern_path(oldname, 0, &old_path); + if (error) + return error; + + new_dentry = kern_path_create(AT_FDCWD, newname, &new_path, 0); + error = PTR_ERR(new_dentry); + if (IS_ERR(new_dentry)) + goto out; + + error = -EXDEV; + if (old_path.mnt != new_path.mnt) + goto out_dput; + error = may_linkat(&old_path); + if (unlikely(error)) + goto out_dput; + error = security_path_link(old_path.dentry, &new_path, new_dentry); + if (error) + goto out_dput; + error = vfs_link(old_path.dentry, new_path.dentry->d_inode, new_dentry, + NULL); +out_dput: + done_path_create(&new_path, new_dentry); +out: + path_put(&old_path); + return error; +} + +int __init init_symlink(const char *oldname, const char *newname) +{ + struct dentry *dentry; + struct path path; + int error; + + dentry = kern_path_create(AT_FDCWD, newname, &path, 0); + if (IS_ERR(dentry)) + return PTR_ERR(dentry); + error = security_path_symlink(&path, dentry, oldname); + if (!error) + error = vfs_symlink(path.dentry->d_inode, dentry, oldname); + done_path_create(&path, dentry); + return error; +} + +int __init init_unlink(const char *pathname) +{ + return do_unlinkat(AT_FDCWD, getname_kernel(pathname)); +} + +int __init init_mkdir(const char *pathname, umode_t mode) +{ + struct dentry *dentry; + struct path path; + int error; + + dentry = kern_path_create(AT_FDCWD, pathname, &path, LOOKUP_DIRECTORY); + if (IS_ERR(dentry)) + return PTR_ERR(dentry); + if (!IS_POSIXACL(path.dentry->d_inode)) + mode &= ~current_umask(); + error = security_path_mkdir(&path, dentry, mode); + if (!error) + error = vfs_mkdir(path.dentry->d_inode, dentry, mode); + done_path_create(&path, dentry); + return error; +} + +int __init init_rmdir(const char *pathname) +{ + return do_rmdir(AT_FDCWD, getname_kernel(pathname)); +} + +int __init init_utimes(char *filename, struct timespec64 *ts) +{ + struct path path; + int error; + + error = kern_path(filename, 0, &path); + if (error) + return error; + error = vfs_utimes(&path, ts); + path_put(&path); + return error; +} + +int __init init_dup(struct file *file) +{ + int fd; + + fd = get_unused_fd_flags(0); + if (fd < 0) + return fd; + fd_install(fd, get_file(file)); + return 0; +} diff --git a/fs/internal.h b/fs/internal.h index 9b863a7bd708..10517ece4516 100644 --- a/fs/internal.h +++ b/fs/internal.h @@ -23,7 +23,9 @@ struct user_namespace; extern void __init bdev_cache_init(void); extern int __sync_blockdev(struct block_device *bdev, int wait); - +void iterate_bdevs(void (*)(struct block_device *, void *), void *); +void emergency_thaw_bdev(struct super_block *sb); +void bd_forget(struct inode *inode); #else static inline void bdev_cache_init(void) { @@ -33,7 +35,18 @@ static inline int __sync_blockdev(struct block_device *bdev, int wait) { return 0; } -#endif +static inline void iterate_bdevs(void (*f)(struct block_device *, void *), + void *arg) +{ +} +static inline int emergency_thaw_bdev(struct super_block *sb) +{ + return 0; +} +static inline void bd_forget(struct inode *inode) +{ +} +#endif /* CONFIG_BLOCK */ /* * buffer.c @@ -62,15 +75,9 @@ extern int filename_lookup(int dfd, struct filename *name, unsigned flags, struct path *path, struct path *root); extern int vfs_path_lookup(struct dentry *, struct vfsmount *, const char *, unsigned int, struct path *); -long do_mknodat(int dfd, const char __user *filename, umode_t mode, - unsigned int dev); -long do_mkdirat(int dfd, const char __user *pathname, umode_t mode); -long do_rmdir(int dfd, const char __user *pathname); +long do_rmdir(int dfd, struct filename *name); long do_unlinkat(int dfd, struct filename *name); -long do_symlinkat(const char __user *oldname, int newdfd, - const char __user *newname); -int do_linkat(int olddfd, const char __user *oldname, int newdfd, - const char __user *newname, int flags); +int may_linkat(struct path *link); /* * namespace.c @@ -89,6 +96,11 @@ extern int __mnt_want_write_file(struct file *); extern void __mnt_drop_write_file(struct file *); extern void dissolve_on_fput(struct vfsmount *); + +int path_mount(const char *dev_name, struct path *path, + const char *type_page, unsigned long flags, void *data_page); +int path_umount(struct path *path, int flags); + /* * fs_struct.c */ @@ -126,10 +138,10 @@ extern struct open_how build_open_how(int flags, umode_t mode); extern int build_open_flags(const struct open_how *how, struct open_flags *op); long do_sys_ftruncate(unsigned int fd, loff_t length, int small); -int do_fchmodat(int dfd, const char __user *filename, umode_t mode); +int chmod_common(const struct path *path, umode_t mode); int do_fchownat(int dfd, const char __user *filename, uid_t user, gid_t group, int flag); - +int chown_common(const struct path *path, uid_t user, gid_t group); extern int vfs_open(const struct path *, struct file *); /* diff --git a/fs/io-wq.c b/fs/io-wq.c index 47c5f3aeb460..414beb543883 100644 --- a/fs/io-wq.c +++ b/fs/io-wq.c @@ -462,6 +462,7 @@ static void io_impersonate_work(struct io_worker *worker, io_wq_switch_mm(worker, work); if (worker->cur_creds != work->creds) io_wq_switch_creds(worker, work); + current->signal->rlim[RLIMIT_FSIZE].rlim_cur = work->fsize; } static void io_assign_current_work(struct io_worker *worker, @@ -489,7 +490,6 @@ static void io_worker_handle_work(struct io_worker *worker) do { struct io_wq_work *work; - unsigned int hash; get_next: /* * If we got some work, mark us as busy. If we didn't, but @@ -512,6 +512,7 @@ get_next: /* handle a whole dependent link */ do { struct io_wq_work *old_work, *next_hashed, *linked; + unsigned int hash = io_get_work_hash(work); next_hashed = wq_next_work(work); io_impersonate_work(worker, work); @@ -522,10 +523,8 @@ get_next: if (test_bit(IO_WQ_BIT_CANCEL, &wq->state)) work->flags |= IO_WQ_WORK_CANCEL; - hash = io_get_work_hash(work); - linked = old_work = work; - wq->do_work(&linked); - linked = (old_work == linked) ? NULL : linked; + old_work = work; + linked = wq->do_work(work); work = next_hashed; if (!work && linked && !io_wq_is_hashed(linked)) { @@ -542,8 +541,6 @@ get_next: spin_lock_irq(&wqe->lock); wqe->hash_map &= ~BIT_ULL(hash); wqe->flags &= ~IO_WQE_FLAG_STALLED; - /* dependent work is not hashed */ - hash = -1U; /* skip unnecessary unlock-lock wqe->lock */ if (!work) goto get_next; @@ -781,8 +778,7 @@ static void io_run_cancel(struct io_wq_work *work, struct io_wqe *wqe) struct io_wq_work *old_work = work; work->flags |= IO_WQ_WORK_CANCEL; - wq->do_work(&work); - work = (work == old_work) ? NULL : work; + work = wq->do_work(work); wq->free_work(old_work); } while (work); } @@ -929,6 +925,24 @@ static bool io_wq_worker_cancel(struct io_worker *worker, void *data) return match->nr_running && !match->cancel_all; } +static inline void io_wqe_remove_pending(struct io_wqe *wqe, + struct io_wq_work *work, + struct io_wq_work_node *prev) +{ + unsigned int hash = io_get_work_hash(work); + struct io_wq_work *prev_work = NULL; + + if (io_wq_is_hashed(work) && work == wqe->hash_tail[hash]) { + if (prev) + prev_work = container_of(prev, struct io_wq_work, list); + if (prev_work && io_get_work_hash(prev_work) == hash) + wqe->hash_tail[hash] = prev_work; + else + wqe->hash_tail[hash] = NULL; + } + wq_list_del(&wqe->work_list, &work->list, prev); +} + static void io_wqe_cancel_pending_work(struct io_wqe *wqe, struct io_cb_cancel_data *match) { @@ -942,8 +956,7 @@ retry: work = container_of(node, struct io_wq_work, list); if (!match->fn(work, match->data)) continue; - - wq_list_del(&wqe->work_list, node, prev); + io_wqe_remove_pending(wqe, work, prev); spin_unlock_irqrestore(&wqe->lock, flags); io_run_cancel(work, wqe); match->nr_pending++; diff --git a/fs/io-wq.h b/fs/io-wq.h index 071f1a997800..ddaf9614cf9b 100644 --- a/fs/io-wq.h +++ b/fs/io-wq.h @@ -5,10 +5,10 @@ struct io_wq; enum { IO_WQ_WORK_CANCEL = 1, - IO_WQ_WORK_HASHED = 4, - IO_WQ_WORK_UNBOUND = 32, - IO_WQ_WORK_NO_CANCEL = 256, - IO_WQ_WORK_CONCURRENT = 512, + IO_WQ_WORK_HASHED = 2, + IO_WQ_WORK_UNBOUND = 4, + IO_WQ_WORK_NO_CANCEL = 8, + IO_WQ_WORK_CONCURRENT = 16, IO_WQ_HASH_SHIFT = 24, /* upper 8 bits are used for hash key */ }; @@ -89,6 +89,7 @@ struct io_wq_work { struct mm_struct *mm; const struct cred *creds; struct fs_struct *fs; + unsigned long fsize; unsigned flags; }; @@ -101,7 +102,7 @@ static inline struct io_wq_work *wq_next_work(struct io_wq_work *work) } typedef void (free_work_fn)(struct io_wq_work *); -typedef void (io_wq_work_fn)(struct io_wq_work **); +typedef struct io_wq_work *(io_wq_work_fn)(struct io_wq_work *); struct io_wq_data { struct user_struct *user; diff --git a/fs/io_uring.c b/fs/io_uring.c index 32b0064f806e..3790c7fe9fee 100644 --- a/fs/io_uring.c +++ b/fs/io_uring.c @@ -78,6 +78,7 @@ #include <linux/fs_struct.h> #include <linux/splice.h> #include <linux/task_work.h> +#include <linux/pagemap.h> #define CREATE_TRACE_POINTS #include <trace/events/io_uring.h> @@ -226,7 +227,7 @@ struct io_ring_ctx { struct { unsigned int flags; unsigned int compat: 1; - unsigned int account_mem: 1; + unsigned int limit_mem: 1; unsigned int cq_overflow_flushed: 1; unsigned int drain_next: 1; unsigned int eventfd_async: 1; @@ -319,12 +320,12 @@ struct io_ring_ctx { spinlock_t completion_lock; /* - * ->poll_list is protected by the ctx->uring_lock for + * ->iopoll_list is protected by the ctx->uring_lock for * io_uring instances that don't use IORING_SETUP_SQPOLL. * For SQPOLL, only the single threaded io_sq_thread() will * manipulate the list, hence no extra locking is needed there. */ - struct list_head poll_list; + struct list_head iopoll_list; struct hlist_head *cancel_hash; unsigned cancel_hash_bits; bool poll_multi_file; @@ -395,6 +396,7 @@ struct io_timeout { int flags; u32 off; u32 target_seq; + struct list_head list; }; struct io_rw { @@ -413,7 +415,7 @@ struct io_connect { struct io_sr_msg { struct file *file; union { - struct user_msghdr __user *msg; + struct user_msghdr __user *umsg; void __user *buf; }; int msg_flags; @@ -486,6 +488,12 @@ struct io_statx { struct statx __user *buffer; }; +struct io_completion { + struct file *file; + struct list_head list; + int cflags; +}; + struct io_async_connect { struct sockaddr_storage address; }; @@ -500,9 +508,10 @@ struct io_async_msghdr { struct io_async_rw { struct iovec fast_iov[UIO_FASTIOV]; - struct iovec *iov; - ssize_t nr_segs; - ssize_t size; + const struct iovec *free_iovec; + struct iov_iter iter; + size_t bytes_done; + struct wait_page_queue wpq; }; struct io_async_ctx { @@ -523,23 +532,17 @@ enum { REQ_F_BUFFER_SELECT_BIT = IOSQE_BUFFER_SELECT_BIT, REQ_F_LINK_HEAD_BIT, - REQ_F_LINK_NEXT_BIT, REQ_F_FAIL_LINK_BIT, REQ_F_INFLIGHT_BIT, REQ_F_CUR_POS_BIT, REQ_F_NOWAIT_BIT, REQ_F_LINK_TIMEOUT_BIT, - REQ_F_TIMEOUT_BIT, REQ_F_ISREG_BIT, - REQ_F_MUST_PUNT_BIT, - REQ_F_TIMEOUT_NOSEQ_BIT, REQ_F_COMP_LOCKED_BIT, REQ_F_NEED_CLEANUP_BIT, - REQ_F_OVERFLOW_BIT, REQ_F_POLLED_BIT, REQ_F_BUFFER_SELECTED_BIT, REQ_F_NO_FILE_TABLE_BIT, - REQ_F_QUEUE_TIMEOUT_BIT, REQ_F_WORK_INITIALIZED_BIT, REQ_F_TASK_PINNED_BIT, @@ -563,8 +566,6 @@ enum { /* head of a link */ REQ_F_LINK_HEAD = BIT(REQ_F_LINK_HEAD_BIT), - /* already grabbed next link */ - REQ_F_LINK_NEXT = BIT(REQ_F_LINK_NEXT_BIT), /* fail rest of links */ REQ_F_FAIL_LINK = BIT(REQ_F_FAIL_LINK_BIT), /* on inflight list */ @@ -575,28 +576,18 @@ enum { REQ_F_NOWAIT = BIT(REQ_F_NOWAIT_BIT), /* has linked timeout */ REQ_F_LINK_TIMEOUT = BIT(REQ_F_LINK_TIMEOUT_BIT), - /* timeout request */ - REQ_F_TIMEOUT = BIT(REQ_F_TIMEOUT_BIT), /* regular file */ REQ_F_ISREG = BIT(REQ_F_ISREG_BIT), - /* must be punted even for NONBLOCK */ - REQ_F_MUST_PUNT = BIT(REQ_F_MUST_PUNT_BIT), - /* no timeout sequence */ - REQ_F_TIMEOUT_NOSEQ = BIT(REQ_F_TIMEOUT_NOSEQ_BIT), /* completion under lock */ REQ_F_COMP_LOCKED = BIT(REQ_F_COMP_LOCKED_BIT), /* needs cleanup */ REQ_F_NEED_CLEANUP = BIT(REQ_F_NEED_CLEANUP_BIT), - /* in overflow list */ - REQ_F_OVERFLOW = BIT(REQ_F_OVERFLOW_BIT), /* already went through poll handler */ REQ_F_POLLED = BIT(REQ_F_POLLED_BIT), /* buffer already selected */ REQ_F_BUFFER_SELECTED = BIT(REQ_F_BUFFER_SELECTED_BIT), /* doesn't need file table for this request */ REQ_F_NO_FILE_TABLE = BIT(REQ_F_NO_FILE_TABLE_BIT), - /* needs to queue linked timeout */ - REQ_F_QUEUE_TIMEOUT = BIT(REQ_F_QUEUE_TIMEOUT_BIT), /* io_wq_work is initialized */ REQ_F_WORK_INITIALIZED = BIT(REQ_F_WORK_INITIALIZED_BIT), /* req->task is refcounted */ @@ -606,7 +597,6 @@ enum { struct async_poll { struct io_poll_iocb poll; struct io_poll_iocb *double_poll; - struct io_wq_work work; }; /* @@ -635,51 +625,54 @@ struct io_kiocb { struct io_splice splice; struct io_provide_buf pbuf; struct io_statx statx; + /* use only after cleaning per-op data, see io_clean_op() */ + struct io_completion compl; }; struct io_async_ctx *io; - int cflags; u8 opcode; /* polled IO has completed */ u8 iopoll_completed; u16 buf_index; + u32 result; - struct io_ring_ctx *ctx; - struct list_head list; - unsigned int flags; - refcount_t refs; - struct task_struct *task; - unsigned long fsize; - u64 user_data; - u32 result; - u32 sequence; - - struct list_head link_list; + struct io_ring_ctx *ctx; + unsigned int flags; + refcount_t refs; + struct task_struct *task; + u64 user_data; - struct list_head inflight_entry; + struct list_head link_list; - struct percpu_ref *fixed_file_refs; + /* + * 1. used with ctx->iopoll_list with reads/writes + * 2. to track reqs with ->files (see io_op_def::file_table) + */ + struct list_head inflight_entry; + + struct percpu_ref *fixed_file_refs; + struct callback_head task_work; + /* for polled requests, i.e. IORING_OP_POLL_ADD and async armed poll */ + struct hlist_node hash_node; + struct async_poll *apoll; + struct io_wq_work work; +}; - union { - /* - * Only commands that never go async can use the below fields, - * obviously. Right now only IORING_OP_POLL_ADD uses them, and - * async armed poll handlers for regular commands. The latter - * restore the work, if needed. - */ - struct { - struct callback_head task_work; - struct hlist_node hash_node; - struct async_poll *apoll; - }; - struct io_wq_work work; - }; +struct io_defer_entry { + struct list_head list; + struct io_kiocb *req; + u32 seq; }; -#define IO_PLUG_THRESHOLD 2 #define IO_IOPOLL_BATCH 8 +struct io_comp_state { + unsigned int nr; + struct list_head list; + struct io_ring_ctx *ctx; +}; + struct io_submit_state { struct blk_plug plug; @@ -690,12 +683,16 @@ struct io_submit_state { unsigned int free_reqs; /* + * Batch completion logic + */ + struct io_comp_state comp; + + /* * File reference cache */ struct file *file; unsigned int fd; unsigned int has_refs; - unsigned int used_refs; unsigned int ios_left; }; @@ -723,6 +720,7 @@ struct io_op_def { unsigned pollout : 1; /* op supports buffer selection */ unsigned buffer_select : 1; + unsigned needs_fsize : 1; }; static const struct io_op_def io_op_defs[] = { @@ -742,6 +740,7 @@ static const struct io_op_def io_op_defs[] = { .hash_reg_file = 1, .unbound_nonreg_file = 1, .pollout = 1, + .needs_fsize = 1, }, [IORING_OP_FSYNC] = { .needs_file = 1, @@ -756,6 +755,7 @@ static const struct io_op_def io_op_defs[] = { .hash_reg_file = 1, .unbound_nonreg_file = 1, .pollout = 1, + .needs_fsize = 1, }, [IORING_OP_POLL_ADD] = { .needs_file = 1, @@ -808,6 +808,7 @@ static const struct io_op_def io_op_defs[] = { }, [IORING_OP_FALLOCATE] = { .needs_file = 1, + .needs_fsize = 1, }, [IORING_OP_OPENAT] = { .file_table = 1, @@ -839,6 +840,7 @@ static const struct io_op_def io_op_defs[] = { .needs_file = 1, .unbound_nonreg_file = 1, .pollout = 1, + .needs_fsize = 1, }, [IORING_OP_FADVISE] = { .needs_file = 1, @@ -881,22 +883,38 @@ static const struct io_op_def io_op_defs[] = { }, }; -static void io_wq_submit_work(struct io_wq_work **workptr); +enum io_mem_account { + ACCT_LOCKED, + ACCT_PINNED, +}; + +static void __io_complete_rw(struct io_kiocb *req, long res, long res2, + struct io_comp_state *cs); static void io_cqring_fill_event(struct io_kiocb *req, long res); static void io_put_req(struct io_kiocb *req); +static void io_double_put_req(struct io_kiocb *req); static void __io_double_put_req(struct io_kiocb *req); static struct io_kiocb *io_prep_linked_timeout(struct io_kiocb *req); +static void __io_queue_linked_timeout(struct io_kiocb *req); static void io_queue_linked_timeout(struct io_kiocb *req); static int __io_sqe_files_update(struct io_ring_ctx *ctx, struct io_uring_files_update *ip, unsigned nr_args); -static int io_grab_files(struct io_kiocb *req); -static void io_complete_rw_common(struct kiocb *kiocb, long res); -static void io_cleanup_req(struct io_kiocb *req); +static int io_prep_work_files(struct io_kiocb *req); +static void __io_clean_op(struct io_kiocb *req); static int io_file_get(struct io_submit_state *state, struct io_kiocb *req, int fd, struct file **out_file, bool fixed); static void __io_queue_sqe(struct io_kiocb *req, - const struct io_uring_sqe *sqe); + const struct io_uring_sqe *sqe, + struct io_comp_state *cs); +static void io_file_put_work(struct work_struct *work); + +static ssize_t io_import_iovec(int rw, struct io_kiocb *req, + struct iovec **iovec, struct iov_iter *iter, + bool needs_lock); +static int io_setup_async_rw(struct io_kiocb *req, const struct iovec *iovec, + const struct iovec *fast_iov, + struct iov_iter *iter, bool force); static struct kmem_cache *req_cachep; @@ -923,6 +941,13 @@ static void io_get_req_task(struct io_kiocb *req) req->flags |= REQ_F_TASK_PINNED; } +static inline void io_clean_op(struct io_kiocb *req) +{ + if (req->flags & (REQ_F_NEED_CLEANUP | REQ_F_BUFFER_SELECTED | + REQ_F_INFLIGHT)) + __io_clean_op(req); +} + /* not idempotent -- it doesn't clear REQ_F_TASK_PINNED */ static void __io_put_req_task(struct io_kiocb *req) { @@ -930,7 +955,41 @@ static void __io_put_req_task(struct io_kiocb *req) put_task_struct(req->task); } -static void io_file_put_work(struct work_struct *work); +static void io_sq_thread_drop_mm(void) +{ + struct mm_struct *mm = current->mm; + + if (mm) { + kthread_unuse_mm(mm); + mmput(mm); + } +} + +static int __io_sq_thread_acquire_mm(struct io_ring_ctx *ctx) +{ + if (!current->mm) { + if (unlikely(!(ctx->flags & IORING_SETUP_SQPOLL) || + !mmget_not_zero(ctx->sqo_mm))) + return -EFAULT; + kthread_use_mm(ctx->sqo_mm); + } + + return 0; +} + +static int io_sq_thread_acquire_mm(struct io_ring_ctx *ctx, + struct io_kiocb *req) +{ + if (!io_op_defs[req->opcode].needs_mm) + return 0; + return __io_sq_thread_acquire_mm(ctx); +} + +static inline void req_set_fail_links(struct io_kiocb *req) +{ + if ((req->flags & (REQ_F_LINK | REQ_F_HARDLINK)) == REQ_F_LINK) + req->flags |= REQ_F_FAIL_LINK; +} /* * Note: must call io_req_init_async() for the first time you @@ -957,6 +1016,11 @@ static void io_ring_ctx_ref_free(struct percpu_ref *ref) complete(&ctx->ref_comp); } +static inline bool io_is_timeout_noseq(struct io_kiocb *req) +{ + return !req->timeout.off; +} + static struct io_ring_ctx *io_ring_ctx_alloc(struct io_uring_params *p) { struct io_ring_ctx *ctx; @@ -1000,7 +1064,7 @@ static struct io_ring_ctx *io_ring_ctx_alloc(struct io_uring_params *p) mutex_init(&ctx->uring_lock); init_waitqueue_head(&ctx->wait); spin_lock_init(&ctx->completion_lock); - INIT_LIST_HEAD(&ctx->poll_list); + INIT_LIST_HEAD(&ctx->iopoll_list); INIT_LIST_HEAD(&ctx->defer_list); INIT_LIST_HEAD(&ctx->timeout_list); init_waitqueue_head(&ctx->inflight_wait); @@ -1017,18 +1081,14 @@ err: return NULL; } -static inline bool __req_need_defer(struct io_kiocb *req) +static bool req_need_defer(struct io_kiocb *req, u32 seq) { - struct io_ring_ctx *ctx = req->ctx; + if (unlikely(req->flags & REQ_F_IO_DRAIN)) { + struct io_ring_ctx *ctx = req->ctx; - return req->sequence != ctx->cached_cq_tail + return seq != ctx->cached_cq_tail + atomic_read(&ctx->cached_cq_overflow); -} - -static inline bool req_need_defer(struct io_kiocb *req) -{ - if (unlikely(req->flags & REQ_F_IO_DRAIN)) - return __req_need_defer(req); + } return false; } @@ -1046,31 +1106,16 @@ static void __io_commit_cqring(struct io_ring_ctx *ctx) } } -static inline void io_req_work_grab_env(struct io_kiocb *req, - const struct io_op_def *def) -{ - if (!req->work.mm && def->needs_mm) { - mmgrab(current->mm); - req->work.mm = current->mm; - } - if (!req->work.creds) - req->work.creds = get_current_cred(); - if (!req->work.fs && def->needs_fs) { - spin_lock(¤t->fs->lock); - if (!current->fs->in_exec) { - req->work.fs = current->fs; - req->work.fs->users++; - } else { - req->work.flags |= IO_WQ_WORK_CANCEL; - } - spin_unlock(¤t->fs->lock); - } -} - -static inline void io_req_work_drop_env(struct io_kiocb *req) +/* + * Returns true if we need to defer file table putting. This can only happen + * from the error path with REQ_F_COMP_LOCKED set. + */ +static bool io_req_clean_work(struct io_kiocb *req) { if (!(req->flags & REQ_F_WORK_INITIALIZED)) - return; + return false; + + req->flags &= ~REQ_F_WORK_INITIALIZED; if (req->work.mm) { mmdrop(req->work.mm); @@ -1083,45 +1128,84 @@ static inline void io_req_work_drop_env(struct io_kiocb *req) if (req->work.fs) { struct fs_struct *fs = req->work.fs; + if (req->flags & REQ_F_COMP_LOCKED) + return true; + spin_lock(&req->work.fs->lock); if (--fs->users) fs = NULL; spin_unlock(&req->work.fs->lock); if (fs) free_fs_struct(fs); + req->work.fs = NULL; } + + return false; } -static inline void io_prep_async_work(struct io_kiocb *req, - struct io_kiocb **link) +static void io_prep_async_work(struct io_kiocb *req) { const struct io_op_def *def = &io_op_defs[req->opcode]; io_req_init_async(req); if (req->flags & REQ_F_ISREG) { - if (def->hash_reg_file) + if (def->hash_reg_file || (req->ctx->flags & IORING_SETUP_IOPOLL)) io_wq_hash_work(&req->work, file_inode(req->file)); } else { if (def->unbound_nonreg_file) req->work.flags |= IO_WQ_WORK_UNBOUND; } + if (!req->work.mm && def->needs_mm) { + mmgrab(current->mm); + req->work.mm = current->mm; + } + if (!req->work.creds) + req->work.creds = get_current_cred(); + if (!req->work.fs && def->needs_fs) { + spin_lock(¤t->fs->lock); + if (!current->fs->in_exec) { + req->work.fs = current->fs; + req->work.fs->users++; + } else { + req->work.flags |= IO_WQ_WORK_CANCEL; + } + spin_unlock(¤t->fs->lock); + } + if (def->needs_fsize) + req->work.fsize = rlimit(RLIMIT_FSIZE); + else + req->work.fsize = RLIM_INFINITY; +} - io_req_work_grab_env(req, def); +static void io_prep_async_link(struct io_kiocb *req) +{ + struct io_kiocb *cur; - *link = io_prep_linked_timeout(req); + io_prep_async_work(req); + if (req->flags & REQ_F_LINK_HEAD) + list_for_each_entry(cur, &req->link_list, link_list) + io_prep_async_work(cur); } -static inline void io_queue_async_work(struct io_kiocb *req) +static struct io_kiocb *__io_queue_async_work(struct io_kiocb *req) { struct io_ring_ctx *ctx = req->ctx; - struct io_kiocb *link; - - io_prep_async_work(req, &link); + struct io_kiocb *link = io_prep_linked_timeout(req); trace_io_uring_queue_async_work(ctx, io_wq_is_hashed(&req->work), req, &req->work, req->flags); io_wq_enqueue(ctx->io_wq, &req->work); + return link; +} + +static void io_queue_async_work(struct io_kiocb *req) +{ + struct io_kiocb *link; + + /* init ->work of the whole link before punting */ + io_prep_async_link(req); + link = __io_queue_async_work(req); if (link) io_queue_linked_timeout(link); @@ -1133,8 +1217,9 @@ static void io_kill_timeout(struct io_kiocb *req) ret = hrtimer_try_to_cancel(&req->io->timeout.timer); if (ret != -1) { - atomic_inc(&req->ctx->cq_timeouts); - list_del_init(&req->list); + atomic_set(&req->ctx->cq_timeouts, + atomic_read(&req->ctx->cq_timeouts) + 1); + list_del_init(&req->timeout.list); req->flags |= REQ_F_COMP_LOCKED; io_cqring_fill_event(req, 0); io_put_req(req); @@ -1146,7 +1231,7 @@ static void io_kill_timeouts(struct io_ring_ctx *ctx) struct io_kiocb *req, *tmp; spin_lock_irq(&ctx->completion_lock); - list_for_each_entry_safe(req, tmp, &ctx->timeout_list, list) + list_for_each_entry_safe(req, tmp, &ctx->timeout_list, timeout.list) io_kill_timeout(req); spin_unlock_irq(&ctx->completion_lock); } @@ -1154,13 +1239,22 @@ static void io_kill_timeouts(struct io_ring_ctx *ctx) static void __io_queue_deferred(struct io_ring_ctx *ctx) { do { - struct io_kiocb *req = list_first_entry(&ctx->defer_list, - struct io_kiocb, list); + struct io_defer_entry *de = list_first_entry(&ctx->defer_list, + struct io_defer_entry, list); + struct io_kiocb *link; - if (req_need_defer(req)) + if (req_need_defer(de->req, de->seq)) break; - list_del_init(&req->list); - io_queue_async_work(req); + list_del_init(&de->list); + /* punt-init is done before queueing for defer */ + link = __io_queue_async_work(de->req); + if (link) { + __io_queue_linked_timeout(link); + /* drop submission reference */ + link->flags |= REQ_F_COMP_LOCKED; + io_put_req(link); + } + kfree(de); } while (!list_empty(&ctx->defer_list)); } @@ -1168,15 +1262,15 @@ static void io_flush_timeouts(struct io_ring_ctx *ctx) { while (!list_empty(&ctx->timeout_list)) { struct io_kiocb *req = list_first_entry(&ctx->timeout_list, - struct io_kiocb, list); + struct io_kiocb, timeout.list); - if (req->flags & REQ_F_TIMEOUT_NOSEQ) + if (io_is_timeout_noseq(req)) break; if (req->timeout.target_seq != ctx->cached_cq_tail - atomic_read(&ctx->cq_timeouts)) break; - list_del_init(&req->list); + list_del_init(&req->timeout.list); io_kill_timeout(req); } } @@ -1229,6 +1323,15 @@ static void io_cqring_ev_posted(struct io_ring_ctx *ctx) eventfd_signal(ctx->cq_ev_fd, 1); } +static void io_cqring_mark_overflow(struct io_ring_ctx *ctx) +{ + if (list_empty(&ctx->cq_overflow_list)) { + clear_bit(0, &ctx->sq_check_overflow); + clear_bit(0, &ctx->cq_check_overflow); + ctx->rings->sq_flags &= ~IORING_SQ_CQ_OVERFLOW; + } +} + /* Returns true if there are no backlogged entries after the flush */ static bool io_cqring_overflow_flush(struct io_ring_ctx *ctx, bool force) { @@ -1259,13 +1362,12 @@ static bool io_cqring_overflow_flush(struct io_ring_ctx *ctx, bool force) break; req = list_first_entry(&ctx->cq_overflow_list, struct io_kiocb, - list); - list_move(&req->list, &list); - req->flags &= ~REQ_F_OVERFLOW; + compl.list); + list_move(&req->compl.list, &list); if (cqe) { WRITE_ONCE(cqe->user_data, req->user_data); WRITE_ONCE(cqe->res, req->result); - WRITE_ONCE(cqe->flags, req->cflags); + WRITE_ONCE(cqe->flags, req->compl.cflags); } else { WRITE_ONCE(ctx->rings->cq_overflow, atomic_inc_return(&ctx->cached_cq_overflow)); @@ -1273,17 +1375,14 @@ static bool io_cqring_overflow_flush(struct io_ring_ctx *ctx, bool force) } io_commit_cqring(ctx); - if (cqe) { - clear_bit(0, &ctx->sq_check_overflow); - clear_bit(0, &ctx->cq_check_overflow); - ctx->rings->sq_flags &= ~IORING_SQ_CQ_OVERFLOW; - } + io_cqring_mark_overflow(ctx); + spin_unlock_irqrestore(&ctx->completion_lock, flags); io_cqring_ev_posted(ctx); while (!list_empty(&list)) { - req = list_first_entry(&list, struct io_kiocb, list); - list_del(&req->list); + req = list_first_entry(&list, struct io_kiocb, compl.list); + list_del(&req->compl.list); io_put_req(req); } @@ -1316,11 +1415,11 @@ static void __io_cqring_fill_event(struct io_kiocb *req, long res, long cflags) set_bit(0, &ctx->cq_check_overflow); ctx->rings->sq_flags |= IORING_SQ_CQ_OVERFLOW; } - req->flags |= REQ_F_OVERFLOW; - refcount_inc(&req->refs); + io_clean_op(req); req->result = res; - req->cflags = cflags; - list_add_tail(&req->list, &ctx->cq_overflow_list); + req->compl.cflags = cflags; + refcount_inc(&req->refs); + list_add_tail(&req->compl.list, &ctx->cq_overflow_list); } } @@ -1329,7 +1428,7 @@ static void io_cqring_fill_event(struct io_kiocb *req, long res) __io_cqring_fill_event(req, res, 0); } -static void __io_cqring_add_event(struct io_kiocb *req, long res, long cflags) +static void io_cqring_add_event(struct io_kiocb *req, long res, long cflags) { struct io_ring_ctx *ctx = req->ctx; unsigned long flags; @@ -1342,9 +1441,52 @@ static void __io_cqring_add_event(struct io_kiocb *req, long res, long cflags) io_cqring_ev_posted(ctx); } -static void io_cqring_add_event(struct io_kiocb *req, long res) +static void io_submit_flush_completions(struct io_comp_state *cs) +{ + struct io_ring_ctx *ctx = cs->ctx; + + spin_lock_irq(&ctx->completion_lock); + while (!list_empty(&cs->list)) { + struct io_kiocb *req; + + req = list_first_entry(&cs->list, struct io_kiocb, compl.list); + list_del(&req->compl.list); + __io_cqring_fill_event(req, req->result, req->compl.cflags); + if (!(req->flags & REQ_F_LINK_HEAD)) { + req->flags |= REQ_F_COMP_LOCKED; + io_put_req(req); + } else { + spin_unlock_irq(&ctx->completion_lock); + io_put_req(req); + spin_lock_irq(&ctx->completion_lock); + } + } + io_commit_cqring(ctx); + spin_unlock_irq(&ctx->completion_lock); + + io_cqring_ev_posted(ctx); + cs->nr = 0; +} + +static void __io_req_complete(struct io_kiocb *req, long res, unsigned cflags, + struct io_comp_state *cs) { - __io_cqring_add_event(req, res, 0); + if (!cs) { + io_cqring_add_event(req, res, cflags); + io_put_req(req); + } else { + io_clean_op(req); + req->result = res; + req->compl.cflags = cflags; + list_add_tail(&req->compl.list, &cs->list); + if (++cs->nr >= 32) + io_submit_flush_completions(cs); + } +} + +static void io_req_complete(struct io_kiocb *req, long res) +{ + __io_req_complete(req, res, 0, NULL); } static inline bool io_is_fallback_req(struct io_kiocb *req) @@ -1370,11 +1512,7 @@ static struct io_kiocb *io_alloc_req(struct io_ring_ctx *ctx, gfp_t gfp = GFP_KERNEL | __GFP_NOWARN; struct io_kiocb *req; - if (!state) { - req = kmem_cache_alloc(req_cachep, gfp); - if (unlikely(!req)) - goto fallback; - } else if (!state->free_reqs) { + if (!state->free_reqs) { size_t sz; int ret; @@ -1412,83 +1550,61 @@ static inline void io_put_file(struct io_kiocb *req, struct file *file, fput(file); } -static void __io_req_aux_free(struct io_kiocb *req) +static bool io_dismantle_req(struct io_kiocb *req) { - if (req->flags & REQ_F_NEED_CLEANUP) - io_cleanup_req(req); + io_clean_op(req); - kfree(req->io); + if (req->io) + kfree(req->io); if (req->file) io_put_file(req, req->file, (req->flags & REQ_F_FIXED_FILE)); - __io_put_req_task(req); - io_req_work_drop_env(req); + + return io_req_clean_work(req); } -static void __io_free_req(struct io_kiocb *req) +static void __io_free_req_finish(struct io_kiocb *req) { - __io_req_aux_free(req); - - if (req->flags & REQ_F_INFLIGHT) { - struct io_ring_ctx *ctx = req->ctx; - unsigned long flags; - - spin_lock_irqsave(&ctx->inflight_lock, flags); - list_del(&req->inflight_entry); - if (waitqueue_active(&ctx->inflight_wait)) - wake_up(&ctx->inflight_wait); - spin_unlock_irqrestore(&ctx->inflight_lock, flags); - } + struct io_ring_ctx *ctx = req->ctx; - percpu_ref_put(&req->ctx->refs); + __io_put_req_task(req); if (likely(!io_is_fallback_req(req))) kmem_cache_free(req_cachep, req); else - clear_bit_unlock(0, (unsigned long *) &req->ctx->fallback_req); + clear_bit_unlock(0, (unsigned long *) &ctx->fallback_req); + percpu_ref_put(&ctx->refs); } -struct req_batch { - void *reqs[IO_IOPOLL_BATCH]; - int to_free; - int need_iter; -}; - -static void io_free_req_many(struct io_ring_ctx *ctx, struct req_batch *rb) +static void io_req_task_file_table_put(struct callback_head *cb) { - if (!rb->to_free) - return; - if (rb->need_iter) { - int i, inflight = 0; - unsigned long flags; + struct io_kiocb *req = container_of(cb, struct io_kiocb, task_work); + struct fs_struct *fs = req->work.fs; - for (i = 0; i < rb->to_free; i++) { - struct io_kiocb *req = rb->reqs[i]; + spin_lock(&req->work.fs->lock); + if (--fs->users) + fs = NULL; + spin_unlock(&req->work.fs->lock); + if (fs) + free_fs_struct(fs); + req->work.fs = NULL; + __io_free_req_finish(req); +} - if (req->flags & REQ_F_INFLIGHT) - inflight++; - __io_req_aux_free(req); - } - if (!inflight) - goto do_free; +static void __io_free_req(struct io_kiocb *req) +{ + if (!io_dismantle_req(req)) { + __io_free_req_finish(req); + } else { + int ret; - spin_lock_irqsave(&ctx->inflight_lock, flags); - for (i = 0; i < rb->to_free; i++) { - struct io_kiocb *req = rb->reqs[i]; + init_task_work(&req->task_work, io_req_task_file_table_put); + ret = task_work_add(req->task, &req->task_work, TWA_RESUME); + if (unlikely(ret)) { + struct task_struct *tsk; - if (req->flags & REQ_F_INFLIGHT) { - list_del(&req->inflight_entry); - if (!--inflight) - break; - } + tsk = io_wq_get_task(req->ctx->io_wq); + task_work_add(tsk, &req->task_work, 0); } - spin_unlock_irqrestore(&ctx->inflight_lock, flags); - - if (waitqueue_active(&ctx->inflight_wait)) - wake_up(&ctx->inflight_wait); } -do_free: - kmem_cache_free_bulk(req_cachep, rb->to_free, rb->reqs); - percpu_ref_put_many(&ctx->refs, rb->to_free); - rb->to_free = rb->need_iter = 0; } static bool io_link_cancel_timeout(struct io_kiocb *req) @@ -1508,53 +1624,68 @@ static bool io_link_cancel_timeout(struct io_kiocb *req) return false; } -static void io_req_link_next(struct io_kiocb *req, struct io_kiocb **nxtptr) +static bool __io_kill_linked_timeout(struct io_kiocb *req) +{ + struct io_kiocb *link; + bool wake_ev; + + if (list_empty(&req->link_list)) + return false; + link = list_first_entry(&req->link_list, struct io_kiocb, link_list); + if (link->opcode != IORING_OP_LINK_TIMEOUT) + return false; + + list_del_init(&link->link_list); + link->flags |= REQ_F_COMP_LOCKED; + wake_ev = io_link_cancel_timeout(link); + req->flags &= ~REQ_F_LINK_TIMEOUT; + return wake_ev; +} + +static void io_kill_linked_timeout(struct io_kiocb *req) { struct io_ring_ctx *ctx = req->ctx; - bool wake_ev = false; + bool wake_ev; - /* Already got next link */ - if (req->flags & REQ_F_LINK_NEXT) - return; + if (!(req->flags & REQ_F_COMP_LOCKED)) { + unsigned long flags; + + spin_lock_irqsave(&ctx->completion_lock, flags); + wake_ev = __io_kill_linked_timeout(req); + spin_unlock_irqrestore(&ctx->completion_lock, flags); + } else { + wake_ev = __io_kill_linked_timeout(req); + } + + if (wake_ev) + io_cqring_ev_posted(ctx); +} + +static struct io_kiocb *io_req_link_next(struct io_kiocb *req) +{ + struct io_kiocb *nxt; /* * The list should never be empty when we are called here. But could * potentially happen if the chain is messed up, check to be on the * safe side. */ - while (!list_empty(&req->link_list)) { - struct io_kiocb *nxt = list_first_entry(&req->link_list, - struct io_kiocb, link_list); - - if (unlikely((req->flags & REQ_F_LINK_TIMEOUT) && - (nxt->flags & REQ_F_TIMEOUT))) { - list_del_init(&nxt->link_list); - wake_ev |= io_link_cancel_timeout(nxt); - req->flags &= ~REQ_F_LINK_TIMEOUT; - continue; - } - - list_del_init(&req->link_list); - if (!list_empty(&nxt->link_list)) - nxt->flags |= REQ_F_LINK_HEAD; - *nxtptr = nxt; - break; - } + if (unlikely(list_empty(&req->link_list))) + return NULL; - req->flags |= REQ_F_LINK_NEXT; - if (wake_ev) - io_cqring_ev_posted(ctx); + nxt = list_first_entry(&req->link_list, struct io_kiocb, link_list); + list_del_init(&req->link_list); + if (!list_empty(&nxt->link_list)) + nxt->flags |= REQ_F_LINK_HEAD; + return nxt; } /* * Called if REQ_F_LINK_HEAD is set, and we fail the head request */ -static void io_fail_links(struct io_kiocb *req) +static void __io_fail_links(struct io_kiocb *req) { struct io_ring_ctx *ctx = req->ctx; - unsigned long flags; - - spin_lock_irqsave(&ctx->completion_lock, flags); while (!list_empty(&req->link_list)) { struct io_kiocb *link = list_first_entry(&req->link_list, @@ -1563,25 +1694,38 @@ static void io_fail_links(struct io_kiocb *req) list_del_init(&link->link_list); trace_io_uring_fail_link(req, link); - if ((req->flags & REQ_F_LINK_TIMEOUT) && - link->opcode == IORING_OP_LINK_TIMEOUT) { - io_link_cancel_timeout(link); - } else { - io_cqring_fill_event(link, -ECANCELED); - __io_double_put_req(link); - } + io_cqring_fill_event(link, -ECANCELED); + link->flags |= REQ_F_COMP_LOCKED; + __io_double_put_req(link); req->flags &= ~REQ_F_LINK_TIMEOUT; } io_commit_cqring(ctx); - spin_unlock_irqrestore(&ctx->completion_lock, flags); io_cqring_ev_posted(ctx); } -static void io_req_find_next(struct io_kiocb *req, struct io_kiocb **nxt) +static void io_fail_links(struct io_kiocb *req) { - if (likely(!(req->flags & REQ_F_LINK_HEAD))) - return; + struct io_ring_ctx *ctx = req->ctx; + + if (!(req->flags & REQ_F_COMP_LOCKED)) { + unsigned long flags; + + spin_lock_irqsave(&ctx->completion_lock, flags); + __io_fail_links(req); + spin_unlock_irqrestore(&ctx->completion_lock, flags); + } else { + __io_fail_links(req); + } + + io_cqring_ev_posted(ctx); +} + +static struct io_kiocb *__io_req_find_next(struct io_kiocb *req) +{ + req->flags &= ~REQ_F_LINK_HEAD; + if (req->flags & REQ_F_LINK_TIMEOUT) + io_kill_linked_timeout(req); /* * If LINK is set, we have dependent requests in this chain. If we @@ -1589,62 +1733,191 @@ static void io_req_find_next(struct io_kiocb *req, struct io_kiocb **nxt) * dependencies to the next request. In case of failure, fail the rest * of the chain. */ - if (req->flags & REQ_F_FAIL_LINK) { - io_fail_links(req); - } else if ((req->flags & (REQ_F_LINK_TIMEOUT | REQ_F_COMP_LOCKED)) == - REQ_F_LINK_TIMEOUT) { - struct io_ring_ctx *ctx = req->ctx; - unsigned long flags; + if (likely(!(req->flags & REQ_F_FAIL_LINK))) + return io_req_link_next(req); + io_fail_links(req); + return NULL; +} - /* - * If this is a timeout link, we could be racing with the - * timeout timer. Grab the completion lock for this case to - * protect against that. - */ - spin_lock_irqsave(&ctx->completion_lock, flags); - io_req_link_next(req, nxt); - spin_unlock_irqrestore(&ctx->completion_lock, flags); +static struct io_kiocb *io_req_find_next(struct io_kiocb *req) +{ + if (likely(!(req->flags & REQ_F_LINK_HEAD))) + return NULL; + return __io_req_find_next(req); +} + +static int io_req_task_work_add(struct io_kiocb *req, struct callback_head *cb, + bool twa_signal_ok) +{ + struct task_struct *tsk = req->task; + struct io_ring_ctx *ctx = req->ctx; + int ret, notify; + + /* + * SQPOLL kernel thread doesn't need notification, just a wakeup. For + * all other cases, use TWA_SIGNAL unconditionally to ensure we're + * processing task_work. There's no reliable way to tell if TWA_RESUME + * will do the job. + */ + notify = 0; + if (!(ctx->flags & IORING_SETUP_SQPOLL) && twa_signal_ok) + notify = TWA_SIGNAL; + + ret = task_work_add(tsk, cb, notify); + if (!ret) + wake_up_process(tsk); + + return ret; +} + +static void __io_req_task_cancel(struct io_kiocb *req, int error) +{ + struct io_ring_ctx *ctx = req->ctx; + + spin_lock_irq(&ctx->completion_lock); + io_cqring_fill_event(req, error); + io_commit_cqring(ctx); + spin_unlock_irq(&ctx->completion_lock); + + io_cqring_ev_posted(ctx); + req_set_fail_links(req); + io_double_put_req(req); +} + +static void io_req_task_cancel(struct callback_head *cb) +{ + struct io_kiocb *req = container_of(cb, struct io_kiocb, task_work); + + __io_req_task_cancel(req, -ECANCELED); +} + +static void __io_req_task_submit(struct io_kiocb *req) +{ + struct io_ring_ctx *ctx = req->ctx; + + if (!__io_sq_thread_acquire_mm(ctx)) { + mutex_lock(&ctx->uring_lock); + __io_queue_sqe(req, NULL, NULL); + mutex_unlock(&ctx->uring_lock); } else { - io_req_link_next(req, nxt); + __io_req_task_cancel(req, -EFAULT); } } -static void io_free_req(struct io_kiocb *req) +static void io_req_task_submit(struct callback_head *cb) { - struct io_kiocb *nxt = NULL; + struct io_kiocb *req = container_of(cb, struct io_kiocb, task_work); + struct io_ring_ctx *ctx = req->ctx; - io_req_find_next(req, &nxt); - __io_free_req(req); + __io_req_task_submit(req); + percpu_ref_put(&ctx->refs); +} + +static void io_req_task_queue(struct io_kiocb *req) +{ + int ret; + + init_task_work(&req->task_work, io_req_task_submit); + percpu_ref_get(&req->ctx->refs); + + ret = io_req_task_work_add(req, &req->task_work, true); + if (unlikely(ret)) { + struct task_struct *tsk; + + init_task_work(&req->task_work, io_req_task_cancel); + tsk = io_wq_get_task(req->ctx->io_wq); + task_work_add(tsk, &req->task_work, 0); + wake_up_process(tsk); + } +} + +static void io_queue_next(struct io_kiocb *req) +{ + struct io_kiocb *nxt = io_req_find_next(req); if (nxt) - io_queue_async_work(nxt); + io_req_task_queue(nxt); } -static void io_wq_assign_next(struct io_wq_work **workptr, struct io_kiocb *nxt) +static void io_free_req(struct io_kiocb *req) { - struct io_kiocb *link; - const struct io_op_def *def = &io_op_defs[nxt->opcode]; + io_queue_next(req); + __io_free_req(req); +} - if ((nxt->flags & REQ_F_ISREG) && def->hash_reg_file) - io_wq_hash_work(&nxt->work, file_inode(nxt->file)); +struct req_batch { + void *reqs[IO_IOPOLL_BATCH]; + int to_free; - *workptr = &nxt->work; - link = io_prep_linked_timeout(nxt); - if (link) - nxt->flags |= REQ_F_QUEUE_TIMEOUT; + struct task_struct *task; + int task_refs; +}; + +static inline void io_init_req_batch(struct req_batch *rb) +{ + rb->to_free = 0; + rb->task_refs = 0; + rb->task = NULL; +} + +static void __io_req_free_batch_flush(struct io_ring_ctx *ctx, + struct req_batch *rb) +{ + kmem_cache_free_bulk(req_cachep, rb->to_free, rb->reqs); + percpu_ref_put_many(&ctx->refs, rb->to_free); + rb->to_free = 0; +} + +static void io_req_free_batch_finish(struct io_ring_ctx *ctx, + struct req_batch *rb) +{ + if (rb->to_free) + __io_req_free_batch_flush(ctx, rb); + if (rb->task) { + put_task_struct_many(rb->task, rb->task_refs); + rb->task = NULL; + } +} + +static void io_req_free_batch(struct req_batch *rb, struct io_kiocb *req) +{ + if (unlikely(io_is_fallback_req(req))) { + io_free_req(req); + return; + } + if (req->flags & REQ_F_LINK_HEAD) + io_queue_next(req); + + if (req->flags & REQ_F_TASK_PINNED) { + if (req->task != rb->task) { + if (rb->task) + put_task_struct_many(rb->task, rb->task_refs); + rb->task = req->task; + rb->task_refs = 0; + } + rb->task_refs++; + req->flags &= ~REQ_F_TASK_PINNED; + } + + WARN_ON_ONCE(io_dismantle_req(req)); + rb->reqs[rb->to_free++] = req; + if (unlikely(rb->to_free == ARRAY_SIZE(rb->reqs))) + __io_req_free_batch_flush(req->ctx, rb); } /* * Drop reference to request, return next in chain (if there is one) if this * was the last reference to this request. */ -__attribute__((nonnull)) -static void io_put_req_find_next(struct io_kiocb *req, struct io_kiocb **nxtptr) +static struct io_kiocb *io_put_req_find_next(struct io_kiocb *req) { + struct io_kiocb *nxt = NULL; + if (refcount_dec_and_test(&req->refs)) { - io_req_find_next(req, nxtptr); + nxt = io_req_find_next(req); __io_free_req(req); } + return nxt; } static void io_put_req(struct io_kiocb *req) @@ -1653,24 +1926,20 @@ static void io_put_req(struct io_kiocb *req) io_free_req(req); } -static void io_steal_work(struct io_kiocb *req, - struct io_wq_work **workptr) +static struct io_wq_work *io_steal_work(struct io_kiocb *req) { + struct io_kiocb *nxt; + /* - * It's in an io-wq worker, so there always should be at least - * one reference, which will be dropped in io_put_work() just - * after the current handler returns. - * - * It also means, that if the counter dropped to 1, then there is - * no asynchronous users left, so it's safe to steal the next work. + * A ref is owned by io-wq in which context we're. So, if that's the + * last one, it's safe to steal next work. False negatives are Ok, + * it just will be re-punted async in io_put_work() */ - if (refcount_read(&req->refs) == 1) { - struct io_kiocb *nxt = NULL; + if (refcount_read(&req->refs) != 1) + return NULL; - io_req_find_next(req, &nxt); - if (nxt) - io_wq_assign_next(workptr, nxt); - } + nxt = io_req_find_next(req); + return nxt ? &nxt->work : NULL; } /* @@ -1720,31 +1989,34 @@ static inline unsigned int io_sqring_entries(struct io_ring_ctx *ctx) return smp_load_acquire(&rings->sq.tail) - ctx->cached_sq_head; } -static inline bool io_req_multi_free(struct req_batch *rb, struct io_kiocb *req) +static unsigned int io_put_kbuf(struct io_kiocb *req, struct io_buffer *kbuf) { - if ((req->flags & REQ_F_LINK_HEAD) || io_is_fallback_req(req)) - return false; - - if (req->file || req->io) - rb->need_iter++; + unsigned int cflags; - rb->reqs[rb->to_free++] = req; - if (unlikely(rb->to_free == ARRAY_SIZE(rb->reqs))) - io_free_req_many(req->ctx, rb); - return true; + cflags = kbuf->bid << IORING_CQE_BUFFER_SHIFT; + cflags |= IORING_CQE_F_BUFFER; + req->flags &= ~REQ_F_BUFFER_SELECTED; + kfree(kbuf); + return cflags; } -static int io_put_kbuf(struct io_kiocb *req) +static inline unsigned int io_put_rw_kbuf(struct io_kiocb *req) { struct io_buffer *kbuf; - int cflags; kbuf = (struct io_buffer *) (unsigned long) req->rw.addr; - cflags = kbuf->bid << IORING_CQE_BUFFER_SHIFT; - cflags |= IORING_CQE_F_BUFFER; - req->rw.addr = 0; - kfree(kbuf); - return cflags; + return io_put_kbuf(req, kbuf); +} + +static inline bool io_run_task_work(void) +{ + if (current->task_works) { + __set_current_state(TASK_RUNNING); + task_work_run(); + return true; + } + + return false; } static void io_iopoll_queue(struct list_head *again) @@ -1752,18 +2024,9 @@ static void io_iopoll_queue(struct list_head *again) struct io_kiocb *req; do { - req = list_first_entry(again, struct io_kiocb, list); - list_del(&req->list); - - /* shouldn't happen unless io_uring is dying, cancel reqs */ - if (unlikely(!current->mm)) { - io_complete_rw_common(&req->rw.kiocb, -EAGAIN); - io_put_req(req); - continue; - } - - refcount_inc(&req->refs); - io_queue_async_work(req); + req = list_first_entry(again, struct io_kiocb, inflight_entry); + list_del(&req->inflight_entry); + __io_complete_rw(req, -EAGAIN, 0, NULL); } while (!list_empty(again)); } @@ -1780,33 +2043,33 @@ static void io_iopoll_complete(struct io_ring_ctx *ctx, unsigned int *nr_events, /* order with ->result store in io_complete_rw_iopoll() */ smp_rmb(); - rb.to_free = rb.need_iter = 0; + io_init_req_batch(&rb); while (!list_empty(done)) { int cflags = 0; - req = list_first_entry(done, struct io_kiocb, list); + req = list_first_entry(done, struct io_kiocb, inflight_entry); if (READ_ONCE(req->result) == -EAGAIN) { + req->result = 0; req->iopoll_completed = 0; - list_move_tail(&req->list, &again); + list_move_tail(&req->inflight_entry, &again); continue; } - list_del(&req->list); + list_del(&req->inflight_entry); if (req->flags & REQ_F_BUFFER_SELECTED) - cflags = io_put_kbuf(req); + cflags = io_put_rw_kbuf(req); __io_cqring_fill_event(req, req->result, cflags); (*nr_events)++; - if (refcount_dec_and_test(&req->refs) && - !io_req_multi_free(&rb, req)) - io_free_req(req); + if (refcount_dec_and_test(&req->refs)) + io_req_free_batch(&rb, req); } io_commit_cqring(ctx); if (ctx->flags & IORING_SETUP_SQPOLL) io_cqring_ev_posted(ctx); - io_free_req_many(ctx, &rb); + io_req_free_batch_finish(ctx, &rb); if (!list_empty(&again)) io_iopoll_queue(&again); @@ -1827,7 +2090,7 @@ static int io_do_iopoll(struct io_ring_ctx *ctx, unsigned int *nr_events, spin = !ctx->poll_multi_file && *nr_events < min; ret = 0; - list_for_each_entry_safe(req, tmp, &ctx->poll_list, list) { + list_for_each_entry_safe(req, tmp, &ctx->iopoll_list, inflight_entry) { struct kiocb *kiocb = &req->rw.kiocb; /* @@ -1836,7 +2099,7 @@ static int io_do_iopoll(struct io_ring_ctx *ctx, unsigned int *nr_events, * and complete those lists first, if we have entries there. */ if (READ_ONCE(req->iopoll_completed)) { - list_move_tail(&req->list, &done); + list_move_tail(&req->inflight_entry, &done); continue; } if (!list_empty(&done)) @@ -1846,6 +2109,10 @@ static int io_do_iopoll(struct io_ring_ctx *ctx, unsigned int *nr_events, if (ret < 0) break; + /* iopoll may have completed current req */ + if (READ_ONCE(req->iopoll_completed)) + list_move_tail(&req->inflight_entry, &done); + if (ret && spin) spin = false; ret = 0; @@ -1865,13 +2132,13 @@ static int io_do_iopoll(struct io_ring_ctx *ctx, unsigned int *nr_events, static int io_iopoll_getevents(struct io_ring_ctx *ctx, unsigned int *nr_events, long min) { - while (!list_empty(&ctx->poll_list) && !need_resched()) { + while (!list_empty(&ctx->iopoll_list) && !need_resched()) { int ret; ret = io_do_iopoll(ctx, nr_events, min); if (ret < 0) return ret; - if (!min || *nr_events >= min) + if (*nr_events >= min) return 0; } @@ -1882,29 +2149,37 @@ static int io_iopoll_getevents(struct io_ring_ctx *ctx, unsigned int *nr_events, * We can't just wait for polled events to come to us, we have to actively * find and complete them. */ -static void io_iopoll_reap_events(struct io_ring_ctx *ctx) +static void io_iopoll_try_reap_events(struct io_ring_ctx *ctx) { if (!(ctx->flags & IORING_SETUP_IOPOLL)) return; mutex_lock(&ctx->uring_lock); - while (!list_empty(&ctx->poll_list)) { + while (!list_empty(&ctx->iopoll_list)) { unsigned int nr_events = 0; - io_iopoll_getevents(ctx, &nr_events, 1); + io_do_iopoll(ctx, &nr_events, 0); + /* let it sleep and repeat later if can't complete a request */ + if (nr_events == 0) + break; /* * Ensure we allow local-to-the-cpu processing to take place, * in this case we need to ensure that we reap all events. + * Also let task_work, etc. to progress by releasing the mutex */ - cond_resched(); + if (need_resched()) { + mutex_unlock(&ctx->uring_lock); + cond_resched(); + mutex_lock(&ctx->uring_lock); + } } mutex_unlock(&ctx->uring_lock); } -static int io_iopoll_check(struct io_ring_ctx *ctx, unsigned *nr_events, - long min) +static int io_iopoll_check(struct io_ring_ctx *ctx, long min) { + unsigned int nr_events = 0; int iters = 0, ret = 0; /* @@ -1914,8 +2189,6 @@ static int io_iopoll_check(struct io_ring_ctx *ctx, unsigned *nr_events, */ mutex_lock(&ctx->uring_lock); do { - int tmin = 0; - /* * Don't enter poll loop if we already have events pending. * If we do, we can potentially be spinning for commands that @@ -1936,17 +2209,15 @@ static int io_iopoll_check(struct io_ring_ctx *ctx, unsigned *nr_events, */ if (!(++iters & 7)) { mutex_unlock(&ctx->uring_lock); + io_run_task_work(); mutex_lock(&ctx->uring_lock); } - if (*nr_events < min) - tmin = min - *nr_events; - - ret = io_iopoll_getevents(ctx, nr_events, tmin); + ret = io_iopoll_getevents(ctx, &nr_events, min); if (ret <= 0) break; ret = 0; - } while (min && !*nr_events && !need_resched()); + } while (min && !nr_events && !need_resched()); mutex_unlock(&ctx->uring_lock); return ret; @@ -1966,13 +2237,8 @@ static void kiocb_end_write(struct io_kiocb *req) file_end_write(req->file); } -static inline void req_set_fail_links(struct io_kiocb *req) -{ - if ((req->flags & (REQ_F_LINK | REQ_F_HARDLINK)) == REQ_F_LINK) - req->flags |= REQ_F_FAIL_LINK; -} - -static void io_complete_rw_common(struct kiocb *kiocb, long res) +static void io_complete_rw_common(struct kiocb *kiocb, long res, + struct io_comp_state *cs) { struct io_kiocb *req = container_of(kiocb, struct io_kiocb, rw.kiocb); int cflags = 0; @@ -1983,16 +2249,89 @@ static void io_complete_rw_common(struct kiocb *kiocb, long res) if (res != req->result) req_set_fail_links(req); if (req->flags & REQ_F_BUFFER_SELECTED) - cflags = io_put_kbuf(req); - __io_cqring_add_event(req, res, cflags); + cflags = io_put_rw_kbuf(req); + __io_req_complete(req, res, cflags, cs); +} + +#ifdef CONFIG_BLOCK +static bool io_resubmit_prep(struct io_kiocb *req, int error) +{ + struct iovec inline_vecs[UIO_FASTIOV], *iovec = inline_vecs; + ssize_t ret = -ECANCELED; + struct iov_iter iter; + int rw; + + if (error) { + ret = error; + goto end_req; + } + + switch (req->opcode) { + case IORING_OP_READV: + case IORING_OP_READ_FIXED: + case IORING_OP_READ: + rw = READ; + break; + case IORING_OP_WRITEV: + case IORING_OP_WRITE_FIXED: + case IORING_OP_WRITE: + rw = WRITE; + break; + default: + printk_once(KERN_WARNING "io_uring: bad opcode in resubmit %d\n", + req->opcode); + goto end_req; + } + + ret = io_import_iovec(rw, req, &iovec, &iter, false); + if (ret < 0) + goto end_req; + ret = io_setup_async_rw(req, iovec, inline_vecs, &iter, false); + if (!ret) + return true; + kfree(iovec); +end_req: + req_set_fail_links(req); + io_req_complete(req, ret); + return false; +} +#endif + +static bool io_rw_reissue(struct io_kiocb *req, long res) +{ +#ifdef CONFIG_BLOCK + umode_t mode = file_inode(req->file)->i_mode; + int ret; + + if (!S_ISBLK(mode) && !S_ISREG(mode)) + return false; + if ((res != -EAGAIN && res != -EOPNOTSUPP) || io_wq_current_is_worker()) + return false; + + ret = io_sq_thread_acquire_mm(req->ctx, req); + + if (io_resubmit_prep(req, ret)) { + refcount_inc(&req->refs); + io_queue_async_work(req); + return true; + } + +#endif + return false; +} + +static void __io_complete_rw(struct io_kiocb *req, long res, long res2, + struct io_comp_state *cs) +{ + if (!io_rw_reissue(req, res)) + io_complete_rw_common(&req->rw.kiocb, res, cs); } static void io_complete_rw(struct kiocb *kiocb, long res, long res2) { struct io_kiocb *req = container_of(kiocb, struct io_kiocb, rw.kiocb); - io_complete_rw_common(kiocb, res); - io_put_req(req); + __io_complete_rw(req, res, res2, NULL); } static void io_complete_rw_iopoll(struct kiocb *kiocb, long res, long res2) @@ -2026,13 +2365,13 @@ static void io_iopoll_req_issued(struct io_kiocb *req) * how we do polling eventually, not spinning if we're on potentially * different devices. */ - if (list_empty(&ctx->poll_list)) { + if (list_empty(&ctx->iopoll_list)) { ctx->poll_multi_file = false; } else if (!ctx->poll_multi_file) { struct io_kiocb *list_req; - list_req = list_first_entry(&ctx->poll_list, struct io_kiocb, - list); + list_req = list_first_entry(&ctx->iopoll_list, struct io_kiocb, + inflight_entry); if (list_req->file != req->file) ctx->poll_multi_file = true; } @@ -2042,9 +2381,9 @@ static void io_iopoll_req_issued(struct io_kiocb *req) * it to the front so we find it first. */ if (READ_ONCE(req->iopoll_completed)) - list_add(&req->list, &ctx->poll_list); + list_add(&req->inflight_entry, &ctx->iopoll_list); else - list_add_tail(&req->list, &ctx->poll_list); + list_add_tail(&req->inflight_entry, &ctx->iopoll_list); if ((ctx->flags & IORING_SETUP_SQPOLL) && wq_has_sleeper(&ctx->sqo_wait)) @@ -2053,10 +2392,8 @@ static void io_iopoll_req_issued(struct io_kiocb *req) static void __io_state_file_put(struct io_submit_state *state) { - int diff = state->has_refs - state->used_refs; - - if (diff) - fput_many(state->file, diff); + if (state->has_refs) + fput_many(state->file, state->has_refs); state->file = NULL; } @@ -2078,7 +2415,7 @@ static struct file *__io_file_get(struct io_submit_state *state, int fd) if (state->file) { if (state->fd == fd) { - state->used_refs++; + state->has_refs--; state->ios_left--; return state->file; } @@ -2089,12 +2426,20 @@ static struct file *__io_file_get(struct io_submit_state *state, int fd) return NULL; state->fd = fd; - state->has_refs = state->ios_left; - state->used_refs = 1; state->ios_left--; + state->has_refs = state->ios_left; return state->file; } +static bool io_bdev_nowait(struct block_device *bdev) +{ +#ifdef CONFIG_BLOCK + return !bdev || queue_is_mq(bdev_get_queue(bdev)); +#else + return true; +#endif +} + /* * If we tracked the file through the SCM inflight mechanism, we could support * any file. For now, just ensure that anything potentially problematic is done @@ -2104,10 +2449,19 @@ static bool io_file_supports_async(struct file *file, int rw) { umode_t mode = file_inode(file)->i_mode; - if (S_ISBLK(mode) || S_ISCHR(mode) || S_ISSOCK(mode)) - return true; - if (S_ISREG(mode) && file->f_op != &io_uring_fops) + if (S_ISBLK(mode)) { + if (io_bdev_nowait(file->f_inode->i_bdev)) + return true; + return false; + } + if (S_ISCHR(mode) || S_ISSOCK(mode)) return true; + if (S_ISREG(mode)) { + if (io_bdev_nowait(file->f_inode->i_sb->s_bdev) && + file->f_op != &io_uring_fops) + return true; + return false; + } /* any ->read/write should understand O_NONBLOCK */ if (file->f_flags & O_NONBLOCK) @@ -2158,6 +2512,9 @@ static int io_prep_rw(struct io_kiocb *req, const struct io_uring_sqe *sqe, if (kiocb->ki_flags & IOCB_NOWAIT) req->flags |= REQ_F_NOWAIT; + if (kiocb->ki_flags & IOCB_DIRECT) + io_get_req_task(req); + if (force_nonblock) kiocb->ki_flags |= IOCB_NOWAIT; @@ -2168,8 +2525,8 @@ static int io_prep_rw(struct io_kiocb *req, const struct io_uring_sqe *sqe, kiocb->ki_flags |= IOCB_HIPRI; kiocb->ki_complete = io_complete_rw_iopoll; - req->result = 0; req->iopoll_completed = 0; + io_get_req_task(req); } else { if (kiocb->ki_flags & IOCB_HIPRI) return -EINVAL; @@ -2197,20 +2554,29 @@ static inline void io_rw_done(struct kiocb *kiocb, ssize_t ret) * IO with EINTR. */ ret = -EINTR; - /* fall through */ + fallthrough; default: kiocb->ki_complete(kiocb, ret, 0); } } -static void kiocb_done(struct kiocb *kiocb, ssize_t ret) +static void kiocb_done(struct kiocb *kiocb, ssize_t ret, + struct io_comp_state *cs) { struct io_kiocb *req = container_of(kiocb, struct io_kiocb, rw.kiocb); + /* add previously done IO, if any */ + if (req->io && req->io->rw.bytes_done > 0) { + if (ret < 0) + ret = req->io->rw.bytes_done; + else + ret += req->io->rw.bytes_done; + } + if (req->flags & REQ_F_CUR_POS) req->file->f_pos = kiocb->ki_pos; if (ret >= 0 && kiocb->ki_complete == io_complete_rw) - io_complete_rw(kiocb, ret, 0); + __io_complete_rw(req, ret, 0, cs); else io_rw_done(kiocb, ret); } @@ -2429,9 +2795,9 @@ static ssize_t io_iov_buffer_select(struct io_kiocb *req, struct iovec *iov, return __io_iov_buffer_select(req, iov, needs_lock); } -static ssize_t io_import_iovec(int rw, struct io_kiocb *req, - struct iovec **iovec, struct iov_iter *iter, - bool needs_lock) +static ssize_t __io_import_iovec(int rw, struct io_kiocb *req, + struct iovec **iovec, struct iov_iter *iter, + bool needs_lock) { void __user *buf = u64_to_user_ptr(req->rw.addr); size_t sqe_len = req->rw.len; @@ -2451,10 +2817,8 @@ static ssize_t io_import_iovec(int rw, struct io_kiocb *req, if (opcode == IORING_OP_READ || opcode == IORING_OP_WRITE) { if (req->flags & REQ_F_BUFFER_SELECT) { buf = io_rw_buffer_select(req, &sqe_len, needs_lock); - if (IS_ERR(buf)) { - *iovec = NULL; + if (IS_ERR(buf)) return PTR_ERR(buf); - } req->rw.len = sqe_len; } @@ -2463,16 +2827,6 @@ static ssize_t io_import_iovec(int rw, struct io_kiocb *req, return ret < 0 ? ret : sqe_len; } - if (req->io) { - struct io_async_rw *iorw = &req->io->rw; - - *iovec = iorw->iov; - iov_iter_init(iter, rw, *iovec, iorw->nr_segs, iorw->size); - if (iorw->iov == iorw->fast_iov) - *iovec = NULL; - return iorw->size; - } - if (req->flags & REQ_F_BUFFER_SELECT) { ret = io_iov_buffer_select(req, *iovec, needs_lock); if (!ret) { @@ -2492,6 +2846,21 @@ static ssize_t io_import_iovec(int rw, struct io_kiocb *req, return import_iovec(rw, buf, sqe_len, UIO_FASTIOV, iovec, iter); } +static ssize_t io_import_iovec(int rw, struct io_kiocb *req, + struct iovec **iovec, struct iov_iter *iter, + bool needs_lock) +{ + if (!req->io) + return __io_import_iovec(rw, req, iovec, iter, needs_lock); + *iovec = NULL; + return iov_iter_count(&req->io->rw.iter); +} + +static inline loff_t *io_kiocb_ppos(struct kiocb *kiocb) +{ + return kiocb->ki_filp->f_mode & FMODE_STREAM ? NULL : &kiocb->ki_pos; +} + /* * For files that don't have ->read_iter() and ->write_iter(), handle them * by looping over ->read() or ->write() manually. @@ -2527,10 +2896,10 @@ static ssize_t loop_rw_iter(int rw, struct file *file, struct kiocb *kiocb, if (rw == READ) { nr = file->f_op->read(file, iovec.iov_base, - iovec.iov_len, &kiocb->ki_pos); + iovec.iov_len, io_kiocb_ppos(kiocb)); } else { nr = file->f_op->write(file, iovec.iov_base, - iovec.iov_len, &kiocb->ki_pos); + iovec.iov_len, io_kiocb_ppos(kiocb)); } if (iov_iter_is_bvec(iter)) @@ -2550,19 +2919,30 @@ static ssize_t loop_rw_iter(int rw, struct file *file, struct kiocb *kiocb, return ret; } -static void io_req_map_rw(struct io_kiocb *req, ssize_t io_size, - struct iovec *iovec, struct iovec *fast_iov, - struct iov_iter *iter) +static void io_req_map_rw(struct io_kiocb *req, const struct iovec *iovec, + const struct iovec *fast_iov, struct iov_iter *iter) { - req->io->rw.nr_segs = iter->nr_segs; - req->io->rw.size = io_size; - req->io->rw.iov = iovec; - if (!req->io->rw.iov) { - req->io->rw.iov = req->io->rw.fast_iov; - if (req->io->rw.iov != fast_iov) - memcpy(req->io->rw.iov, fast_iov, + struct io_async_rw *rw = &req->io->rw; + + memcpy(&rw->iter, iter, sizeof(*iter)); + rw->free_iovec = NULL; + rw->bytes_done = 0; + /* can only be fixed buffers, no need to do anything */ + if (iter->type == ITER_BVEC) + return; + if (!iovec) { + unsigned iov_off = 0; + + rw->iter.iov = rw->fast_iov; + if (iter->iov != fast_iov) { + iov_off = iter->iov - fast_iov; + rw->iter.iov += iov_off; + } + if (rw->fast_iov != fast_iov) + memcpy(rw->fast_iov + iov_off, fast_iov + iov_off, sizeof(struct iovec) * iter->nr_segs); } else { + rw->free_iovec = iovec; req->flags |= REQ_F_NEED_CLEANUP; } } @@ -2581,26 +2961,41 @@ static int io_alloc_async_ctx(struct io_kiocb *req) return __io_alloc_async_ctx(req); } -static int io_setup_async_rw(struct io_kiocb *req, ssize_t io_size, - struct iovec *iovec, struct iovec *fast_iov, - struct iov_iter *iter) +static int io_setup_async_rw(struct io_kiocb *req, const struct iovec *iovec, + const struct iovec *fast_iov, + struct iov_iter *iter, bool force) { - if (!io_op_defs[req->opcode].async_ctx) + if (!force && !io_op_defs[req->opcode].async_ctx) return 0; if (!req->io) { if (__io_alloc_async_ctx(req)) return -ENOMEM; - io_req_map_rw(req, io_size, iovec, fast_iov, iter); + io_req_map_rw(req, iovec, fast_iov, iter); } return 0; } +static inline int io_rw_prep_async(struct io_kiocb *req, int rw, + bool force_nonblock) +{ + struct io_async_rw *iorw = &req->io->rw; + struct iovec *iov; + ssize_t ret; + + iorw->iter.iov = iov = iorw->fast_iov; + ret = __io_import_iovec(rw, req, &iov, &iorw->iter, !force_nonblock); + if (unlikely(ret < 0)) + return ret; + + iorw->iter.iov = iov; + io_req_map_rw(req, iorw->iter.iov, iorw->fast_iov, &iorw->iter); + return 0; +} + static int io_read_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe, bool force_nonblock) { - struct io_async_ctx *io; - struct iov_iter iter; ssize_t ret; ret = io_prep_rw(req, sqe, force_nonblock); @@ -2613,75 +3008,206 @@ static int io_read_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe, /* either don't need iovec imported or already have it */ if (!req->io || req->flags & REQ_F_NEED_CLEANUP) return 0; + return io_rw_prep_async(req, READ, force_nonblock); +} - io = req->io; - io->rw.iov = io->rw.fast_iov; - req->io = NULL; - ret = io_import_iovec(READ, req, &io->rw.iov, &iter, !force_nonblock); - req->io = io; - if (ret < 0) - return ret; +/* + * This is our waitqueue callback handler, registered through lock_page_async() + * when we initially tried to do the IO with the iocb armed our waitqueue. + * This gets called when the page is unlocked, and we generally expect that to + * happen when the page IO is completed and the page is now uptodate. This will + * queue a task_work based retry of the operation, attempting to copy the data + * again. If the latter fails because the page was NOT uptodate, then we will + * do a thread based blocking retry of the operation. That's the unexpected + * slow path. + */ +static int io_async_buf_func(struct wait_queue_entry *wait, unsigned mode, + int sync, void *arg) +{ + struct wait_page_queue *wpq; + struct io_kiocb *req = wait->private; + struct wait_page_key *key = arg; + int ret; - io_req_map_rw(req, ret, io->rw.iov, io->rw.fast_iov, &iter); - return 0; + wpq = container_of(wait, struct wait_page_queue, wait); + + if (!wake_page_match(wpq, key)) + return 0; + + list_del_init(&wait->entry); + + init_task_work(&req->task_work, io_req_task_submit); + percpu_ref_get(&req->ctx->refs); + + /* submit ref gets dropped, acquire a new one */ + refcount_inc(&req->refs); + ret = io_req_task_work_add(req, &req->task_work, true); + if (unlikely(ret)) { + struct task_struct *tsk; + + /* queue just for cancelation */ + init_task_work(&req->task_work, io_req_task_cancel); + tsk = io_wq_get_task(req->ctx->io_wq); + task_work_add(tsk, &req->task_work, 0); + wake_up_process(tsk); + } + return 1; } -static int io_read(struct io_kiocb *req, bool force_nonblock) +/* + * This controls whether a given IO request should be armed for async page + * based retry. If we return false here, the request is handed to the async + * worker threads for retry. If we're doing buffered reads on a regular file, + * we prepare a private wait_page_queue entry and retry the operation. This + * will either succeed because the page is now uptodate and unlocked, or it + * will register a callback when the page is unlocked at IO completion. Through + * that callback, io_uring uses task_work to setup a retry of the operation. + * That retry will attempt the buffered read again. The retry will generally + * succeed, or in rare cases where it fails, we then fall back to using the + * async worker threads for a blocking retry. + */ +static bool io_rw_should_retry(struct io_kiocb *req) +{ + struct wait_page_queue *wait = &req->io->rw.wpq; + struct kiocb *kiocb = &req->rw.kiocb; + + /* never retry for NOWAIT, we just complete with -EAGAIN */ + if (req->flags & REQ_F_NOWAIT) + return false; + + /* Only for buffered IO */ + if (kiocb->ki_flags & (IOCB_DIRECT | IOCB_HIPRI)) + return false; + + /* + * just use poll if we can, and don't attempt if the fs doesn't + * support callback based unlocks + */ + if (file_can_poll(req->file) || !(req->file->f_mode & FMODE_BUF_RASYNC)) + return false; + + wait->wait.func = io_async_buf_func; + wait->wait.private = req; + wait->wait.flags = 0; + INIT_LIST_HEAD(&wait->wait.entry); + kiocb->ki_flags |= IOCB_WAITQ; + kiocb->ki_waitq = wait; + + io_get_req_task(req); + return true; +} + +static int io_iter_do_read(struct io_kiocb *req, struct iov_iter *iter) +{ + if (req->file->f_op->read_iter) + return call_read_iter(req->file, &req->rw.kiocb, iter); + else if (req->file->f_op->read) + return loop_rw_iter(READ, req->file, &req->rw.kiocb, iter); + else + return -EINVAL; +} + +static int io_read(struct io_kiocb *req, bool force_nonblock, + struct io_comp_state *cs) { struct iovec inline_vecs[UIO_FASTIOV], *iovec = inline_vecs; struct kiocb *kiocb = &req->rw.kiocb; - struct iov_iter iter; + struct iov_iter __iter, *iter = &__iter; + ssize_t io_size, ret, ret2; size_t iov_count; - ssize_t io_size, ret; - ret = io_import_iovec(READ, req, &iovec, &iter, !force_nonblock); + if (req->io) + iter = &req->io->rw.iter; + + ret = io_import_iovec(READ, req, &iovec, iter, !force_nonblock); if (ret < 0) return ret; + iov_count = iov_iter_count(iter); + io_size = ret; + req->result = io_size; + ret = 0; /* Ensure we clear previously set non-block flag */ if (!force_nonblock) kiocb->ki_flags &= ~IOCB_NOWAIT; - req->result = 0; - io_size = ret; - if (req->flags & REQ_F_LINK_HEAD) - req->result = io_size; - - /* - * If the file doesn't support async, mark it as REQ_F_MUST_PUNT so - * we know to async punt it even if it was opened O_NONBLOCK - */ + /* If the file doesn't support async, just async punt */ if (force_nonblock && !io_file_supports_async(req->file, READ)) goto copy_iov; - iov_count = iov_iter_count(&iter); - ret = rw_verify_area(READ, req->file, &kiocb->ki_pos, iov_count); + ret = rw_verify_area(READ, req->file, io_kiocb_ppos(kiocb), iov_count); + if (unlikely(ret)) + goto out_free; + + ret = io_iter_do_read(req, iter); + if (!ret) { - ssize_t ret2; + goto done; + } else if (ret == -EIOCBQUEUED) { + ret = 0; + goto out_free; + } else if (ret == -EAGAIN) { + /* IOPOLL retry should happen for io-wq threads */ + if (!force_nonblock && !(req->ctx->flags & IORING_SETUP_IOPOLL)) + goto done; + /* no retry on NONBLOCK marked file */ + if (req->file->f_flags & O_NONBLOCK) + goto done; + /* some cases will consume bytes even on error returns */ + iov_iter_revert(iter, iov_count - iov_iter_count(iter)); + ret = io_setup_async_rw(req, iovec, inline_vecs, iter, false); + if (ret) + goto out_free; + return -EAGAIN; + } else if (ret < 0) { + /* make sure -ERESTARTSYS -> -EINTR is done */ + goto done; + } - if (req->file->f_op->read_iter) - ret2 = call_read_iter(req->file, kiocb, &iter); - else - ret2 = loop_rw_iter(READ, req->file, kiocb, &iter); + /* read it all, or we did blocking attempt. no retry. */ + if (!iov_iter_count(iter) || !force_nonblock || + (req->file->f_flags & O_NONBLOCK)) + goto done; - /* Catch -EAGAIN return for forced non-blocking submission */ - if (!force_nonblock || ret2 != -EAGAIN) { - kiocb_done(kiocb, ret2); - } else { + io_size -= ret; copy_iov: - ret = io_setup_async_rw(req, io_size, iovec, - inline_vecs, &iter); - if (ret) - goto out_free; - /* any defer here is final, must blocking retry */ - if (!(req->flags & REQ_F_NOWAIT) && - !file_can_poll(req->file)) - req->flags |= REQ_F_MUST_PUNT; - return -EAGAIN; - } + ret2 = io_setup_async_rw(req, iovec, inline_vecs, iter, true); + if (ret2) { + ret = ret2; + goto out_free; + } + /* it's copied and will be cleaned with ->io */ + iovec = NULL; + /* now use our persistent iterator, if we aren't already */ + iter = &req->io->rw.iter; +retry: + req->io->rw.bytes_done += ret; + /* if we can retry, do so with the callbacks armed */ + if (!io_rw_should_retry(req)) { + kiocb->ki_flags &= ~IOCB_WAITQ; + return -EAGAIN; } + + /* + * Now retry read with the IOCB_WAITQ parts set in the iocb. If we + * get -EIOCBQUEUED, then we'll get a notification when the desired + * page gets unlocked. We can also get a partial read here, and if we + * do, then just retry at the new offset. + */ + ret = io_iter_do_read(req, iter); + if (ret == -EIOCBQUEUED) { + ret = 0; + goto out_free; + } else if (ret > 0 && ret < io_size) { + /* we got some bytes, but not all. retry. */ + goto retry; + } +done: + kiocb_done(kiocb, ret, cs); + ret = 0; out_free: - if (!(req->flags & REQ_F_NEED_CLEANUP)) + /* it's reportedly faster than delegating the null check to kfree() */ + if (iovec) kfree(iovec); return ret; } @@ -2689,8 +3215,6 @@ out_free: static int io_write_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe, bool force_nonblock) { - struct io_async_ctx *io; - struct iov_iter iter; ssize_t ret; ret = io_prep_rw(req, sqe, force_nonblock); @@ -2700,49 +3224,36 @@ static int io_write_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe, if (unlikely(!(req->file->f_mode & FMODE_WRITE))) return -EBADF; - req->fsize = rlimit(RLIMIT_FSIZE); - /* either don't need iovec imported or already have it */ if (!req->io || req->flags & REQ_F_NEED_CLEANUP) return 0; - - io = req->io; - io->rw.iov = io->rw.fast_iov; - req->io = NULL; - ret = io_import_iovec(WRITE, req, &io->rw.iov, &iter, !force_nonblock); - req->io = io; - if (ret < 0) - return ret; - - io_req_map_rw(req, ret, io->rw.iov, io->rw.fast_iov, &iter); - return 0; + return io_rw_prep_async(req, WRITE, force_nonblock); } -static int io_write(struct io_kiocb *req, bool force_nonblock) +static int io_write(struct io_kiocb *req, bool force_nonblock, + struct io_comp_state *cs) { struct iovec inline_vecs[UIO_FASTIOV], *iovec = inline_vecs; struct kiocb *kiocb = &req->rw.kiocb; - struct iov_iter iter; + struct iov_iter __iter, *iter = &__iter; size_t iov_count; - ssize_t ret, io_size; + ssize_t ret, ret2, io_size; + + if (req->io) + iter = &req->io->rw.iter; - ret = io_import_iovec(WRITE, req, &iovec, &iter, !force_nonblock); + ret = io_import_iovec(WRITE, req, &iovec, iter, !force_nonblock); if (ret < 0) return ret; + iov_count = iov_iter_count(iter); + io_size = ret; + req->result = io_size; /* Ensure we clear previously set non-block flag */ if (!force_nonblock) req->rw.kiocb.ki_flags &= ~IOCB_NOWAIT; - req->result = 0; - io_size = ret; - if (req->flags & REQ_F_LINK_HEAD) - req->result = io_size; - - /* - * If the file doesn't support async, mark it as REQ_F_MUST_PUNT so - * we know to async punt it even if it was opened O_NONBLOCK - */ + /* If the file doesn't support async, just async punt */ if (force_nonblock && !io_file_supports_async(req->file, WRITE)) goto copy_iov; @@ -2751,60 +3262,58 @@ static int io_write(struct io_kiocb *req, bool force_nonblock) (req->flags & REQ_F_ISREG)) goto copy_iov; - iov_count = iov_iter_count(&iter); - ret = rw_verify_area(WRITE, req->file, &kiocb->ki_pos, iov_count); - if (!ret) { - ssize_t ret2; - - /* - * Open-code file_start_write here to grab freeze protection, - * which will be released by another thread in - * io_complete_rw(). Fool lockdep by telling it the lock got - * released so that it doesn't complain about the held lock when - * we return to userspace. - */ - if (req->flags & REQ_F_ISREG) { - __sb_start_write(file_inode(req->file)->i_sb, - SB_FREEZE_WRITE, true); - __sb_writers_release(file_inode(req->file)->i_sb, - SB_FREEZE_WRITE); - } - kiocb->ki_flags |= IOCB_WRITE; - - if (!force_nonblock) - current->signal->rlim[RLIMIT_FSIZE].rlim_cur = req->fsize; + ret = rw_verify_area(WRITE, req->file, io_kiocb_ppos(kiocb), iov_count); + if (unlikely(ret)) + goto out_free; - if (req->file->f_op->write_iter) - ret2 = call_write_iter(req->file, kiocb, &iter); - else - ret2 = loop_rw_iter(WRITE, req->file, kiocb, &iter); + /* + * Open-code file_start_write here to grab freeze protection, + * which will be released by another thread in + * io_complete_rw(). Fool lockdep by telling it the lock got + * released so that it doesn't complain about the held lock when + * we return to userspace. + */ + if (req->flags & REQ_F_ISREG) { + __sb_start_write(file_inode(req->file)->i_sb, + SB_FREEZE_WRITE, true); + __sb_writers_release(file_inode(req->file)->i_sb, + SB_FREEZE_WRITE); + } + kiocb->ki_flags |= IOCB_WRITE; - if (!force_nonblock) - current->signal->rlim[RLIMIT_FSIZE].rlim_cur = RLIM_INFINITY; + if (req->file->f_op->write_iter) + ret2 = call_write_iter(req->file, kiocb, iter); + else if (req->file->f_op->write) + ret2 = loop_rw_iter(WRITE, req->file, kiocb, iter); + else + ret2 = -EINVAL; - /* - * Raw bdev writes will return -EOPNOTSUPP for IOCB_NOWAIT. Just - * retry them without IOCB_NOWAIT. - */ - if (ret2 == -EOPNOTSUPP && (kiocb->ki_flags & IOCB_NOWAIT)) - ret2 = -EAGAIN; - if (!force_nonblock || ret2 != -EAGAIN) { - kiocb_done(kiocb, ret2); - } else { + /* + * Raw bdev writes will return -EOPNOTSUPP for IOCB_NOWAIT. Just + * retry them without IOCB_NOWAIT. + */ + if (ret2 == -EOPNOTSUPP && (kiocb->ki_flags & IOCB_NOWAIT)) + ret2 = -EAGAIN; + /* no retry on NONBLOCK marked file */ + if (ret2 == -EAGAIN && (req->file->f_flags & O_NONBLOCK)) + goto done; + if (!force_nonblock || ret2 != -EAGAIN) { + /* IOPOLL retry should happen for io-wq threads */ + if ((req->ctx->flags & IORING_SETUP_IOPOLL) && ret2 == -EAGAIN) + goto copy_iov; +done: + kiocb_done(kiocb, ret2, cs); + } else { copy_iov: - ret = io_setup_async_rw(req, io_size, iovec, - inline_vecs, &iter); - if (ret) - goto out_free; - /* any defer here is final, must blocking retry */ - if (!(req->flags & REQ_F_NOWAIT) && - !file_can_poll(req->file)) - req->flags |= REQ_F_MUST_PUNT; + /* some cases will consume bytes even on error returns */ + iov_iter_revert(iter, iov_count - iov_iter_count(iter)); + ret = io_setup_async_rw(req, iovec, inline_vecs, iter, false); + if (!ret) return -EAGAIN; - } } out_free: - if (!(req->flags & REQ_F_NEED_CLEANUP)) + /* it's reportedly faster than delegating the null check to kfree() */ + if (iovec) kfree(iovec); return ret; } @@ -2870,10 +3379,9 @@ static int io_tee(struct io_kiocb *req, bool force_nonblock) io_put_file(req, in, (sp->flags & SPLICE_F_FD_IN_FIXED)); req->flags &= ~REQ_F_NEED_CLEANUP; - io_cqring_add_event(req, ret); if (ret != sp->len) req_set_fail_links(req); - io_put_req(req); + io_req_complete(req, ret); return 0; } @@ -2907,25 +3415,23 @@ static int io_splice(struct io_kiocb *req, bool force_nonblock) io_put_file(req, in, (sp->flags & SPLICE_F_FD_IN_FIXED)); req->flags &= ~REQ_F_NEED_CLEANUP; - io_cqring_add_event(req, ret); if (ret != sp->len) req_set_fail_links(req); - io_put_req(req); + io_req_complete(req, ret); return 0; } /* * IORING_OP_NOP just posts a completion event, nothing else. */ -static int io_nop(struct io_kiocb *req) +static int io_nop(struct io_kiocb *req, struct io_comp_state *cs) { struct io_ring_ctx *ctx = req->ctx; if (unlikely(ctx->flags & IORING_SETUP_IOPOLL)) return -EINVAL; - io_cqring_add_event(req, 0); - io_put_req(req); + __io_req_complete(req, 0, 0, cs); return 0; } @@ -2964,8 +3470,7 @@ static int io_fsync(struct io_kiocb *req, bool force_nonblock) req->sync.flags & IORING_FSYNC_DATASYNC); if (ret < 0) req_set_fail_links(req); - io_cqring_add_event(req, ret); - io_put_req(req); + io_req_complete(req, ret); return 0; } @@ -2980,7 +3485,6 @@ static int io_fallocate_prep(struct io_kiocb *req, req->sync.off = READ_ONCE(sqe->off); req->sync.len = READ_ONCE(sqe->addr); req->sync.mode = READ_ONCE(sqe->len); - req->fsize = rlimit(RLIMIT_FSIZE); return 0; } @@ -2991,15 +3495,11 @@ static int io_fallocate(struct io_kiocb *req, bool force_nonblock) /* fallocate always requiring blocking context */ if (force_nonblock) return -EAGAIN; - - current->signal->rlim[RLIMIT_FSIZE].rlim_cur = req->fsize; ret = vfs_fallocate(req->file, req->sync.mode, req->sync.off, req->sync.len); - current->signal->rlim[RLIMIT_FSIZE].rlim_cur = RLIM_INFINITY; if (ret < 0) req_set_fail_links(req); - io_cqring_add_event(req, ret); - io_put_req(req); + io_req_complete(req, ret); return 0; } @@ -3095,8 +3595,7 @@ err: req->flags &= ~REQ_F_NEED_CLEANUP; if (ret < 0) req_set_fail_links(req); - io_cqring_add_event(req, ret); - io_put_req(req); + io_req_complete(req, ret); return 0; } @@ -3150,7 +3649,8 @@ static int __io_remove_buffers(struct io_ring_ctx *ctx, struct io_buffer *buf, return i; } -static int io_remove_buffers(struct io_kiocb *req, bool force_nonblock) +static int io_remove_buffers(struct io_kiocb *req, bool force_nonblock, + struct io_comp_state *cs) { struct io_provide_buf *p = &req->pbuf; struct io_ring_ctx *ctx = req->ctx; @@ -3169,8 +3669,7 @@ static int io_remove_buffers(struct io_kiocb *req, bool force_nonblock) io_ring_submit_lock(ctx, !force_nonblock); if (ret < 0) req_set_fail_links(req); - io_cqring_add_event(req, ret); - io_put_req(req); + __io_req_complete(req, ret, 0, cs); return 0; } @@ -3228,7 +3727,8 @@ static int io_add_buffers(struct io_provide_buf *pbuf, struct io_buffer **head) return i ? i : -ENOMEM; } -static int io_provide_buffers(struct io_kiocb *req, bool force_nonblock) +static int io_provide_buffers(struct io_kiocb *req, bool force_nonblock, + struct io_comp_state *cs) { struct io_provide_buf *p = &req->pbuf; struct io_ring_ctx *ctx = req->ctx; @@ -3257,8 +3757,7 @@ out: io_ring_submit_unlock(ctx, !force_nonblock); if (ret < 0) req_set_fail_links(req); - io_cqring_add_event(req, ret); - io_put_req(req); + __io_req_complete(req, ret, 0, cs); return 0; } @@ -3289,7 +3788,8 @@ static int io_epoll_ctl_prep(struct io_kiocb *req, #endif } -static int io_epoll_ctl(struct io_kiocb *req, bool force_nonblock) +static int io_epoll_ctl(struct io_kiocb *req, bool force_nonblock, + struct io_comp_state *cs) { #if defined(CONFIG_EPOLL) struct io_epoll *ie = &req->epoll; @@ -3301,8 +3801,7 @@ static int io_epoll_ctl(struct io_kiocb *req, bool force_nonblock) if (ret < 0) req_set_fail_links(req); - io_cqring_add_event(req, ret); - io_put_req(req); + __io_req_complete(req, ret, 0, cs); return 0; #else return -EOPNOTSUPP; @@ -3338,8 +3837,7 @@ static int io_madvise(struct io_kiocb *req, bool force_nonblock) ret = do_madvise(ma->addr, ma->len, ma->advice); if (ret < 0) req_set_fail_links(req); - io_cqring_add_event(req, ret); - io_put_req(req); + io_req_complete(req, ret); return 0; #else return -EOPNOTSUPP; @@ -3378,8 +3876,7 @@ static int io_fadvise(struct io_kiocb *req, bool force_nonblock) ret = vfs_fadvise(req->file, fa->offset, fa->len, fa->advice); if (ret < 0) req_set_fail_links(req); - io_cqring_add_event(req, ret); - io_put_req(req); + io_req_complete(req, ret); return 0; } @@ -3418,8 +3915,7 @@ static int io_statx(struct io_kiocb *req, bool force_nonblock) if (ret < 0) req_set_fail_links(req); - io_cqring_add_event(req, ret); - io_put_req(req); + io_req_complete(req, ret); return 0; } @@ -3450,7 +3946,8 @@ static int io_close_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe) return 0; } -static int io_close(struct io_kiocb *req, bool force_nonblock) +static int io_close(struct io_kiocb *req, bool force_nonblock, + struct io_comp_state *cs) { struct io_close *close = &req->close; int ret; @@ -3464,8 +3961,10 @@ static int io_close(struct io_kiocb *req, bool force_nonblock) /* if the file has a flush method, be safe and punt to async */ if (close->put_file->f_op->flush && force_nonblock) { + /* was never set, but play safe */ + req->flags &= ~REQ_F_NOWAIT; /* avoid grabbing files - we don't need the files */ - req->flags |= REQ_F_NO_FILE_TABLE | REQ_F_MUST_PUNT; + req->flags |= REQ_F_NO_FILE_TABLE; return -EAGAIN; } @@ -3473,10 +3972,9 @@ static int io_close(struct io_kiocb *req, bool force_nonblock) ret = filp_close(close->put_file, req->work.files); if (ret < 0) req_set_fail_links(req); - io_cqring_add_event(req, ret); fput(close->put_file); close->put_file = NULL; - io_put_req(req); + __io_req_complete(req, ret, 0, cs); return 0; } @@ -3510,8 +4008,7 @@ static int io_sync_file_range(struct io_kiocb *req, bool force_nonblock) req->sync.flags); if (ret < 0) req_set_fail_links(req); - io_cqring_add_event(req, ret); - io_put_req(req); + io_req_complete(req, ret); return 0; } @@ -3531,6 +4028,15 @@ static int io_setup_async_msg(struct io_kiocb *req, return -EAGAIN; } +static int io_sendmsg_copy_hdr(struct io_kiocb *req, + struct io_async_msghdr *iomsg) +{ + iomsg->iov = iomsg->fast_iov; + iomsg->msg.msg_name = &iomsg->addr; + return sendmsg_copy_msghdr(&iomsg->msg, req->sr_msg.umsg, + req->sr_msg.msg_flags, &iomsg->iov); +} + static int io_sendmsg_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe) { struct io_sr_msg *sr = &req->sr_msg; @@ -3541,7 +4047,7 @@ static int io_sendmsg_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe) return -EINVAL; sr->msg_flags = READ_ONCE(sqe->msg_flags); - sr->msg = u64_to_user_ptr(READ_ONCE(sqe->addr)); + sr->umsg = u64_to_user_ptr(READ_ONCE(sqe->addr)); sr->len = READ_ONCE(sqe->len); #ifdef CONFIG_COMPAT @@ -3555,136 +4061,126 @@ static int io_sendmsg_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe) if (req->flags & REQ_F_NEED_CLEANUP) return 0; - io->msg.msg.msg_name = &io->msg.addr; - io->msg.iov = io->msg.fast_iov; - ret = sendmsg_copy_msghdr(&io->msg.msg, sr->msg, sr->msg_flags, - &io->msg.iov); + ret = io_sendmsg_copy_hdr(req, &io->msg); if (!ret) req->flags |= REQ_F_NEED_CLEANUP; return ret; } -static int io_sendmsg(struct io_kiocb *req, bool force_nonblock) +static int io_sendmsg(struct io_kiocb *req, bool force_nonblock, + struct io_comp_state *cs) { - struct io_async_msghdr *kmsg = NULL; + struct io_async_msghdr iomsg, *kmsg; struct socket *sock; + unsigned flags; int ret; sock = sock_from_file(req->file, &ret); - if (sock) { - struct io_async_ctx io; - unsigned flags; - - if (req->io) { - kmsg = &req->io->msg; - kmsg->msg.msg_name = &req->io->msg.addr; - /* if iov is set, it's allocated already */ - if (!kmsg->iov) - kmsg->iov = kmsg->fast_iov; - kmsg->msg.msg_iter.iov = kmsg->iov; - } else { - struct io_sr_msg *sr = &req->sr_msg; - - kmsg = &io.msg; - kmsg->msg.msg_name = &io.msg.addr; + if (unlikely(!sock)) + return ret; - io.msg.iov = io.msg.fast_iov; - ret = sendmsg_copy_msghdr(&io.msg.msg, sr->msg, - sr->msg_flags, &io.msg.iov); - if (ret) - return ret; - } + if (req->io) { + kmsg = &req->io->msg; + kmsg->msg.msg_name = &req->io->msg.addr; + /* if iov is set, it's allocated already */ + if (!kmsg->iov) + kmsg->iov = kmsg->fast_iov; + kmsg->msg.msg_iter.iov = kmsg->iov; + } else { + ret = io_sendmsg_copy_hdr(req, &iomsg); + if (ret) + return ret; + kmsg = &iomsg; + } - flags = req->sr_msg.msg_flags; - if (flags & MSG_DONTWAIT) - req->flags |= REQ_F_NOWAIT; - else if (force_nonblock) - flags |= MSG_DONTWAIT; + flags = req->sr_msg.msg_flags; + if (flags & MSG_DONTWAIT) + req->flags |= REQ_F_NOWAIT; + else if (force_nonblock) + flags |= MSG_DONTWAIT; - ret = __sys_sendmsg_sock(sock, &kmsg->msg, flags); - if (force_nonblock && ret == -EAGAIN) - return io_setup_async_msg(req, kmsg); - if (ret == -ERESTARTSYS) - ret = -EINTR; - } + ret = __sys_sendmsg_sock(sock, &kmsg->msg, flags); + if (force_nonblock && ret == -EAGAIN) + return io_setup_async_msg(req, kmsg); + if (ret == -ERESTARTSYS) + ret = -EINTR; - if (kmsg && kmsg->iov != kmsg->fast_iov) + if (kmsg->iov != kmsg->fast_iov) kfree(kmsg->iov); req->flags &= ~REQ_F_NEED_CLEANUP; - io_cqring_add_event(req, ret); if (ret < 0) req_set_fail_links(req); - io_put_req(req); + __io_req_complete(req, ret, 0, cs); return 0; } -static int io_send(struct io_kiocb *req, bool force_nonblock) +static int io_send(struct io_kiocb *req, bool force_nonblock, + struct io_comp_state *cs) { + struct io_sr_msg *sr = &req->sr_msg; + struct msghdr msg; + struct iovec iov; struct socket *sock; + unsigned flags; int ret; sock = sock_from_file(req->file, &ret); - if (sock) { - struct io_sr_msg *sr = &req->sr_msg; - struct msghdr msg; - struct iovec iov; - unsigned flags; + if (unlikely(!sock)) + return ret; - ret = import_single_range(WRITE, sr->buf, sr->len, &iov, - &msg.msg_iter); - if (ret) - return ret; + ret = import_single_range(WRITE, sr->buf, sr->len, &iov, &msg.msg_iter); + if (unlikely(ret)) + return ret;; - msg.msg_name = NULL; - msg.msg_control = NULL; - msg.msg_controllen = 0; - msg.msg_namelen = 0; + msg.msg_name = NULL; + msg.msg_control = NULL; + msg.msg_controllen = 0; + msg.msg_namelen = 0; - flags = req->sr_msg.msg_flags; - if (flags & MSG_DONTWAIT) - req->flags |= REQ_F_NOWAIT; - else if (force_nonblock) - flags |= MSG_DONTWAIT; + flags = req->sr_msg.msg_flags; + if (flags & MSG_DONTWAIT) + req->flags |= REQ_F_NOWAIT; + else if (force_nonblock) + flags |= MSG_DONTWAIT; - msg.msg_flags = flags; - ret = sock_sendmsg(sock, &msg); - if (force_nonblock && ret == -EAGAIN) - return -EAGAIN; - if (ret == -ERESTARTSYS) - ret = -EINTR; - } + msg.msg_flags = flags; + ret = sock_sendmsg(sock, &msg); + if (force_nonblock && ret == -EAGAIN) + return -EAGAIN; + if (ret == -ERESTARTSYS) + ret = -EINTR; - io_cqring_add_event(req, ret); if (ret < 0) req_set_fail_links(req); - io_put_req(req); + __io_req_complete(req, ret, 0, cs); return 0; } -static int __io_recvmsg_copy_hdr(struct io_kiocb *req, struct io_async_ctx *io) +static int __io_recvmsg_copy_hdr(struct io_kiocb *req, + struct io_async_msghdr *iomsg) { struct io_sr_msg *sr = &req->sr_msg; struct iovec __user *uiov; size_t iov_len; int ret; - ret = __copy_msghdr_from_user(&io->msg.msg, sr->msg, &io->msg.uaddr, - &uiov, &iov_len); + ret = __copy_msghdr_from_user(&iomsg->msg, sr->umsg, + &iomsg->uaddr, &uiov, &iov_len); if (ret) return ret; if (req->flags & REQ_F_BUFFER_SELECT) { if (iov_len > 1) return -EINVAL; - if (copy_from_user(io->msg.iov, uiov, sizeof(*uiov))) + if (copy_from_user(iomsg->iov, uiov, sizeof(*uiov))) return -EFAULT; - sr->len = io->msg.iov[0].iov_len; - iov_iter_init(&io->msg.msg.msg_iter, READ, io->msg.iov, 1, + sr->len = iomsg->iov[0].iov_len; + iov_iter_init(&iomsg->msg.msg_iter, READ, iomsg->iov, 1, sr->len); - io->msg.iov = NULL; + iomsg->iov = NULL; } else { ret = import_iovec(READ, uiov, iov_len, UIO_FASTIOV, - &io->msg.iov, &io->msg.msg.msg_iter); + &iomsg->iov, &iomsg->msg.msg_iter); if (ret > 0) ret = 0; } @@ -3694,7 +4190,7 @@ static int __io_recvmsg_copy_hdr(struct io_kiocb *req, struct io_async_ctx *io) #ifdef CONFIG_COMPAT static int __io_compat_recvmsg_copy_hdr(struct io_kiocb *req, - struct io_async_ctx *io) + struct io_async_msghdr *iomsg) { struct compat_msghdr __user *msg_compat; struct io_sr_msg *sr = &req->sr_msg; @@ -3703,8 +4199,8 @@ static int __io_compat_recvmsg_copy_hdr(struct io_kiocb *req, compat_size_t len; int ret; - msg_compat = (struct compat_msghdr __user *) sr->msg; - ret = __get_compat_msghdr(&io->msg.msg, msg_compat, &io->msg.uaddr, + msg_compat = (struct compat_msghdr __user *) sr->umsg; + ret = __get_compat_msghdr(&iomsg->msg, msg_compat, &iomsg->uaddr, &ptr, &len); if (ret) return ret; @@ -3721,12 +4217,12 @@ static int __io_compat_recvmsg_copy_hdr(struct io_kiocb *req, return -EFAULT; if (clen < 0) return -EINVAL; - sr->len = io->msg.iov[0].iov_len; - io->msg.iov = NULL; + sr->len = iomsg->iov[0].iov_len; + iomsg->iov = NULL; } else { ret = compat_import_iovec(READ, uiov, len, UIO_FASTIOV, - &io->msg.iov, - &io->msg.msg.msg_iter); + &iomsg->iov, + &iomsg->msg.msg_iter); if (ret < 0) return ret; } @@ -3735,40 +4231,40 @@ static int __io_compat_recvmsg_copy_hdr(struct io_kiocb *req, } #endif -static int io_recvmsg_copy_hdr(struct io_kiocb *req, struct io_async_ctx *io) +static int io_recvmsg_copy_hdr(struct io_kiocb *req, + struct io_async_msghdr *iomsg) { - io->msg.msg.msg_name = &io->msg.addr; - io->msg.iov = io->msg.fast_iov; + iomsg->msg.msg_name = &iomsg->addr; + iomsg->iov = iomsg->fast_iov; #ifdef CONFIG_COMPAT if (req->ctx->compat) - return __io_compat_recvmsg_copy_hdr(req, io); + return __io_compat_recvmsg_copy_hdr(req, iomsg); #endif - return __io_recvmsg_copy_hdr(req, io); + return __io_recvmsg_copy_hdr(req, iomsg); } static struct io_buffer *io_recv_buffer_select(struct io_kiocb *req, - int *cflags, bool needs_lock) + bool needs_lock) { struct io_sr_msg *sr = &req->sr_msg; struct io_buffer *kbuf; - if (!(req->flags & REQ_F_BUFFER_SELECT)) - return NULL; - kbuf = io_buffer_select(req, &sr->len, sr->bgid, sr->kbuf, needs_lock); if (IS_ERR(kbuf)) return kbuf; sr->kbuf = kbuf; req->flags |= REQ_F_BUFFER_SELECTED; - - *cflags = kbuf->bid << IORING_CQE_BUFFER_SHIFT; - *cflags |= IORING_CQE_F_BUFFER; return kbuf; } +static inline unsigned int io_put_recv_kbuf(struct io_kiocb *req) +{ + return io_put_kbuf(req, req->sr_msg.kbuf); +} + static int io_recvmsg_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe) { @@ -3780,7 +4276,7 @@ static int io_recvmsg_prep(struct io_kiocb *req, return -EINVAL; sr->msg_flags = READ_ONCE(sqe->msg_flags); - sr->msg = u64_to_user_ptr(READ_ONCE(sqe->addr)); + sr->umsg = u64_to_user_ptr(READ_ONCE(sqe->addr)); sr->len = READ_ONCE(sqe->len); sr->bgid = READ_ONCE(sqe->buf_group); @@ -3795,133 +4291,123 @@ static int io_recvmsg_prep(struct io_kiocb *req, if (req->flags & REQ_F_NEED_CLEANUP) return 0; - ret = io_recvmsg_copy_hdr(req, io); + ret = io_recvmsg_copy_hdr(req, &io->msg); if (!ret) req->flags |= REQ_F_NEED_CLEANUP; return ret; } -static int io_recvmsg(struct io_kiocb *req, bool force_nonblock) +static int io_recvmsg(struct io_kiocb *req, bool force_nonblock, + struct io_comp_state *cs) { - struct io_async_msghdr *kmsg = NULL; + struct io_async_msghdr iomsg, *kmsg; struct socket *sock; + struct io_buffer *kbuf; + unsigned flags; int ret, cflags = 0; sock = sock_from_file(req->file, &ret); - if (sock) { - struct io_buffer *kbuf; - struct io_async_ctx io; - unsigned flags; - - if (req->io) { - kmsg = &req->io->msg; - kmsg->msg.msg_name = &req->io->msg.addr; - /* if iov is set, it's allocated already */ - if (!kmsg->iov) - kmsg->iov = kmsg->fast_iov; - kmsg->msg.msg_iter.iov = kmsg->iov; - } else { - kmsg = &io.msg; - kmsg->msg.msg_name = &io.msg.addr; + if (unlikely(!sock)) + return ret; - ret = io_recvmsg_copy_hdr(req, &io); - if (ret) - return ret; - } + if (req->io) { + kmsg = &req->io->msg; + kmsg->msg.msg_name = &req->io->msg.addr; + /* if iov is set, it's allocated already */ + if (!kmsg->iov) + kmsg->iov = kmsg->fast_iov; + kmsg->msg.msg_iter.iov = kmsg->iov; + } else { + ret = io_recvmsg_copy_hdr(req, &iomsg); + if (ret) + return ret; + kmsg = &iomsg; + } - kbuf = io_recv_buffer_select(req, &cflags, !force_nonblock); - if (IS_ERR(kbuf)) { + if (req->flags & REQ_F_BUFFER_SELECT) { + kbuf = io_recv_buffer_select(req, !force_nonblock); + if (IS_ERR(kbuf)) return PTR_ERR(kbuf); - } else if (kbuf) { - kmsg->fast_iov[0].iov_base = u64_to_user_ptr(kbuf->addr); - iov_iter_init(&kmsg->msg.msg_iter, READ, kmsg->iov, - 1, req->sr_msg.len); - } + kmsg->fast_iov[0].iov_base = u64_to_user_ptr(kbuf->addr); + iov_iter_init(&kmsg->msg.msg_iter, READ, kmsg->iov, + 1, req->sr_msg.len); + } - flags = req->sr_msg.msg_flags; - if (flags & MSG_DONTWAIT) - req->flags |= REQ_F_NOWAIT; - else if (force_nonblock) - flags |= MSG_DONTWAIT; + flags = req->sr_msg.msg_flags; + if (flags & MSG_DONTWAIT) + req->flags |= REQ_F_NOWAIT; + else if (force_nonblock) + flags |= MSG_DONTWAIT; - ret = __sys_recvmsg_sock(sock, &kmsg->msg, req->sr_msg.msg, - kmsg->uaddr, flags); - if (force_nonblock && ret == -EAGAIN) { - ret = io_setup_async_msg(req, kmsg); - if (ret != -EAGAIN) - kfree(kbuf); - return ret; - } - if (ret == -ERESTARTSYS) - ret = -EINTR; - if (kbuf) - kfree(kbuf); - } + ret = __sys_recvmsg_sock(sock, &kmsg->msg, req->sr_msg.umsg, + kmsg->uaddr, flags); + if (force_nonblock && ret == -EAGAIN) + return io_setup_async_msg(req, kmsg); + if (ret == -ERESTARTSYS) + ret = -EINTR; - if (kmsg && kmsg->iov != kmsg->fast_iov) + if (req->flags & REQ_F_BUFFER_SELECTED) + cflags = io_put_recv_kbuf(req); + if (kmsg->iov != kmsg->fast_iov) kfree(kmsg->iov); req->flags &= ~REQ_F_NEED_CLEANUP; - __io_cqring_add_event(req, ret, cflags); if (ret < 0) req_set_fail_links(req); - io_put_req(req); + __io_req_complete(req, ret, cflags, cs); return 0; } -static int io_recv(struct io_kiocb *req, bool force_nonblock) +static int io_recv(struct io_kiocb *req, bool force_nonblock, + struct io_comp_state *cs) { - struct io_buffer *kbuf = NULL; + struct io_buffer *kbuf; + struct io_sr_msg *sr = &req->sr_msg; + struct msghdr msg; + void __user *buf = sr->buf; struct socket *sock; + struct iovec iov; + unsigned flags; int ret, cflags = 0; sock = sock_from_file(req->file, &ret); - if (sock) { - struct io_sr_msg *sr = &req->sr_msg; - void __user *buf = sr->buf; - struct msghdr msg; - struct iovec iov; - unsigned flags; + if (unlikely(!sock)) + return ret; - kbuf = io_recv_buffer_select(req, &cflags, !force_nonblock); + if (req->flags & REQ_F_BUFFER_SELECT) { + kbuf = io_recv_buffer_select(req, !force_nonblock); if (IS_ERR(kbuf)) return PTR_ERR(kbuf); - else if (kbuf) - buf = u64_to_user_ptr(kbuf->addr); + buf = u64_to_user_ptr(kbuf->addr); + } - ret = import_single_range(READ, buf, sr->len, &iov, - &msg.msg_iter); - if (ret) { - kfree(kbuf); - return ret; - } + ret = import_single_range(READ, buf, sr->len, &iov, &msg.msg_iter); + if (unlikely(ret)) + goto out_free; - req->flags |= REQ_F_NEED_CLEANUP; - msg.msg_name = NULL; - msg.msg_control = NULL; - msg.msg_controllen = 0; - msg.msg_namelen = 0; - msg.msg_iocb = NULL; - msg.msg_flags = 0; - - flags = req->sr_msg.msg_flags; - if (flags & MSG_DONTWAIT) - req->flags |= REQ_F_NOWAIT; - else if (force_nonblock) - flags |= MSG_DONTWAIT; - - ret = sock_recvmsg(sock, &msg, flags); - if (force_nonblock && ret == -EAGAIN) - return -EAGAIN; - if (ret == -ERESTARTSYS) - ret = -EINTR; - } + msg.msg_name = NULL; + msg.msg_control = NULL; + msg.msg_controllen = 0; + msg.msg_namelen = 0; + msg.msg_iocb = NULL; + msg.msg_flags = 0; - kfree(kbuf); - req->flags &= ~REQ_F_NEED_CLEANUP; - __io_cqring_add_event(req, ret, cflags); + flags = req->sr_msg.msg_flags; + if (flags & MSG_DONTWAIT) + req->flags |= REQ_F_NOWAIT; + else if (force_nonblock) + flags |= MSG_DONTWAIT; + + ret = sock_recvmsg(sock, &msg, flags); + if (force_nonblock && ret == -EAGAIN) + return -EAGAIN; + if (ret == -ERESTARTSYS) + ret = -EINTR; +out_free: + if (req->flags & REQ_F_BUFFER_SELECTED) + cflags = io_put_recv_kbuf(req); if (ret < 0) req_set_fail_links(req); - io_put_req(req); + __io_req_complete(req, ret, cflags, cs); return 0; } @@ -3941,7 +4427,8 @@ static int io_accept_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe) return 0; } -static int io_accept(struct io_kiocb *req, bool force_nonblock) +static int io_accept(struct io_kiocb *req, bool force_nonblock, + struct io_comp_state *cs) { struct io_accept *accept = &req->accept; unsigned int file_flags = force_nonblock ? O_NONBLOCK : 0; @@ -3960,8 +4447,7 @@ static int io_accept(struct io_kiocb *req, bool force_nonblock) ret = -EINTR; req_set_fail_links(req); } - io_cqring_add_event(req, ret); - io_put_req(req); + __io_req_complete(req, ret, 0, cs); return 0; } @@ -3985,7 +4471,8 @@ static int io_connect_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe) &io->connect.address); } -static int io_connect(struct io_kiocb *req, bool force_nonblock) +static int io_connect(struct io_kiocb *req, bool force_nonblock, + struct io_comp_state *cs) { struct io_async_ctx __io, *io; unsigned file_flags; @@ -4021,8 +4508,7 @@ static int io_connect(struct io_kiocb *req, bool force_nonblock) out: if (ret < 0) req_set_fail_links(req); - io_cqring_add_event(req, ret); - io_put_req(req); + __io_req_complete(req, ret, 0, cs); return 0; } #else /* !CONFIG_NET */ @@ -4031,12 +4517,14 @@ static int io_sendmsg_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe) return -EOPNOTSUPP; } -static int io_sendmsg(struct io_kiocb *req, bool force_nonblock) +static int io_sendmsg(struct io_kiocb *req, bool force_nonblock, + struct io_comp_state *cs) { return -EOPNOTSUPP; } -static int io_send(struct io_kiocb *req, bool force_nonblock) +static int io_send(struct io_kiocb *req, bool force_nonblock, + struct io_comp_state *cs) { return -EOPNOTSUPP; } @@ -4047,12 +4535,14 @@ static int io_recvmsg_prep(struct io_kiocb *req, return -EOPNOTSUPP; } -static int io_recvmsg(struct io_kiocb *req, bool force_nonblock) +static int io_recvmsg(struct io_kiocb *req, bool force_nonblock, + struct io_comp_state *cs) { return -EOPNOTSUPP; } -static int io_recv(struct io_kiocb *req, bool force_nonblock) +static int io_recv(struct io_kiocb *req, bool force_nonblock, + struct io_comp_state *cs) { return -EOPNOTSUPP; } @@ -4062,7 +4552,8 @@ static int io_accept_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe) return -EOPNOTSUPP; } -static int io_accept(struct io_kiocb *req, bool force_nonblock) +static int io_accept(struct io_kiocb *req, bool force_nonblock, + struct io_comp_state *cs) { return -EOPNOTSUPP; } @@ -4072,7 +4563,8 @@ static int io_connect_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe) return -EOPNOTSUPP; } -static int io_connect(struct io_kiocb *req, bool force_nonblock) +static int io_connect(struct io_kiocb *req, bool force_nonblock, + struct io_comp_state *cs) { return -EOPNOTSUPP; } @@ -4084,33 +4576,10 @@ struct io_poll_table { int error; }; -static int io_req_task_work_add(struct io_kiocb *req, struct callback_head *cb) -{ - struct task_struct *tsk = req->task; - struct io_ring_ctx *ctx = req->ctx; - int ret, notify = TWA_RESUME; - - /* - * SQPOLL kernel thread doesn't need notification, just a wakeup. - * If we're not using an eventfd, then TWA_RESUME is always fine, - * as we won't have dependencies between request completions for - * other kernel wait conditions. - */ - if (ctx->flags & IORING_SETUP_SQPOLL) - notify = 0; - else if (ctx->cq_ev_fd) - notify = TWA_SIGNAL; - - ret = task_work_add(tsk, cb, notify); - if (!ret) - wake_up_process(tsk); - return ret; -} - static int __io_async_wake(struct io_kiocb *req, struct io_poll_iocb *poll, __poll_t mask, task_work_func_t func) { - struct task_struct *tsk; + bool twa_signal_ok; int ret; /* for instances that support it check for an event match first: */ @@ -4121,17 +4590,28 @@ static int __io_async_wake(struct io_kiocb *req, struct io_poll_iocb *poll, list_del_init(&poll->wait.entry); - tsk = req->task; req->result = mask; init_task_work(&req->task_work, func); + percpu_ref_get(&req->ctx->refs); + + /* + * If we using the signalfd wait_queue_head for this wakeup, then + * it's not safe to use TWA_SIGNAL as we could be recursing on the + * tsk->sighand->siglock on doing the wakeup. Should not be needed + * either, as the normal wakeup will suffice. + */ + twa_signal_ok = (poll->head != &req->task->sighand->signalfd_wqh); + /* * If this fails, then the task is exiting. When a task exits, the * work gets canceled, so just cancel this request as well instead * of executing it. We can't safely execute it anyway, as we may not * have the needed state needed for it anyway. */ - ret = io_req_task_work_add(req, &req->task_work); + ret = io_req_task_work_add(req, &req->task_work, twa_signal_ok); if (unlikely(ret)) { + struct task_struct *tsk; + WRITE_ONCE(poll->canceled, true); tsk = io_wq_get_task(req->ctx->io_wq); task_work_add(tsk, &req->task_work, 0); @@ -4160,9 +4640,24 @@ static bool io_poll_rewait(struct io_kiocb *req, struct io_poll_iocb *poll) return false; } -static void io_poll_remove_double(struct io_kiocb *req, void *data) +static struct io_poll_iocb *io_poll_get_double(struct io_kiocb *req) +{ + /* pure poll stashes this in ->io, poll driven retry elsewhere */ + if (req->opcode == IORING_OP_POLL_ADD) + return (struct io_poll_iocb *) req->io; + return req->apoll->double_poll; +} + +static struct io_poll_iocb *io_poll_get_single(struct io_kiocb *req) +{ + if (req->opcode == IORING_OP_POLL_ADD) + return &req->poll; + return &req->apoll->poll; +} + +static void io_poll_remove_double(struct io_kiocb *req) { - struct io_poll_iocb *poll = data; + struct io_poll_iocb *poll = io_poll_get_double(req); lockdep_assert_held(&req->ctx->completion_lock); @@ -4182,7 +4677,7 @@ static void io_poll_complete(struct io_kiocb *req, __poll_t mask, int error) { struct io_ring_ctx *ctx = req->ctx; - io_poll_remove_double(req, req->io); + io_poll_remove_double(req); req->poll.done = true; io_cqring_fill_event(req, error ? error : mangle_poll(mask)); io_commit_cqring(ctx); @@ -4200,7 +4695,7 @@ static void io_poll_task_handler(struct io_kiocb *req, struct io_kiocb **nxt) hash_del(&req->hash_node); io_poll_complete(req, req->result, 0); req->flags |= REQ_F_COMP_LOCKED; - io_put_req_find_next(req, nxt); + *nxt = io_put_req_find_next(req); spin_unlock_irq(&ctx->completion_lock); io_cqring_ev_posted(ctx); @@ -4209,23 +4704,20 @@ static void io_poll_task_handler(struct io_kiocb *req, struct io_kiocb **nxt) static void io_poll_task_func(struct callback_head *cb) { struct io_kiocb *req = container_of(cb, struct io_kiocb, task_work); + struct io_ring_ctx *ctx = req->ctx; struct io_kiocb *nxt = NULL; io_poll_task_handler(req, &nxt); - if (nxt) { - struct io_ring_ctx *ctx = nxt->ctx; - - mutex_lock(&ctx->uring_lock); - __io_queue_sqe(nxt, NULL); - mutex_unlock(&ctx->uring_lock); - } + if (nxt) + __io_req_task_submit(nxt); + percpu_ref_put(&ctx->refs); } static int io_poll_double_wake(struct wait_queue_entry *wait, unsigned mode, int sync, void *key) { struct io_kiocb *req = wait->private; - struct io_poll_iocb *poll = req->apoll->double_poll; + struct io_poll_iocb *poll = io_poll_get_single(req); __poll_t mask = key_to_poll(key); /* for instances that support it check for an event match first: */ @@ -4239,6 +4731,8 @@ static int io_poll_double_wake(struct wait_queue_entry *wait, unsigned mode, done = list_empty(&poll->wait.entry); if (!done) list_del_init(&poll->wait.entry); + /* make sure double remove sees this as being gone */ + wait->private = NULL; spin_unlock(&poll->head->lock); if (!done) __io_async_wake(req, poll, mask, io_poll_task_func); @@ -4288,7 +4782,11 @@ static void __io_queue_proc(struct io_poll_iocb *poll, struct io_poll_table *pt, pt->error = 0; poll->head = head; - add_wait_queue(head, &poll->wait); + + if (poll->events & EPOLLEXCLUSIVE) + add_wait_queue_exclusive(head, &poll->wait); + else + add_wait_queue(head, &poll->wait); } static void io_async_queue_proc(struct file *file, struct wait_queue_head *head, @@ -4300,77 +4798,35 @@ static void io_async_queue_proc(struct file *file, struct wait_queue_head *head, __io_queue_proc(&apoll->poll, pt, head, &apoll->double_poll); } -static void io_sq_thread_drop_mm(struct io_ring_ctx *ctx) -{ - struct mm_struct *mm = current->mm; - - if (mm) { - kthread_unuse_mm(mm); - mmput(mm); - } -} - -static int io_sq_thread_acquire_mm(struct io_ring_ctx *ctx, - struct io_kiocb *req) -{ - if (io_op_defs[req->opcode].needs_mm && !current->mm) { - if (unlikely(!mmget_not_zero(ctx->sqo_mm))) - return -EFAULT; - kthread_use_mm(ctx->sqo_mm); - } - - return 0; -} - static void io_async_task_func(struct callback_head *cb) { struct io_kiocb *req = container_of(cb, struct io_kiocb, task_work); struct async_poll *apoll = req->apoll; struct io_ring_ctx *ctx = req->ctx; - bool canceled = false; trace_io_uring_task_run(req->ctx, req->opcode, req->user_data); if (io_poll_rewait(req, &apoll->poll)) { spin_unlock_irq(&ctx->completion_lock); + percpu_ref_put(&ctx->refs); return; } /* If req is still hashed, it cannot have been canceled. Don't check. */ - if (hash_hashed(&req->hash_node)) { + if (hash_hashed(&req->hash_node)) hash_del(&req->hash_node); - } else { - canceled = READ_ONCE(apoll->poll.canceled); - if (canceled) { - io_cqring_fill_event(req, -ECANCELED); - io_commit_cqring(ctx); - } - } - io_poll_remove_double(req, apoll->double_poll); + io_poll_remove_double(req); spin_unlock_irq(&ctx->completion_lock); - /* restore ->work in case we need to retry again */ - if (req->flags & REQ_F_WORK_INITIALIZED) - memcpy(&req->work, &apoll->work, sizeof(req->work)); + if (!READ_ONCE(apoll->poll.canceled)) + __io_req_task_submit(req); + else + __io_req_task_cancel(req, -ECANCELED); + + percpu_ref_put(&ctx->refs); kfree(apoll->double_poll); kfree(apoll); - - if (!canceled) { - __set_current_state(TASK_RUNNING); - if (io_sq_thread_acquire_mm(ctx, req)) { - io_cqring_add_event(req, -EFAULT); - goto end_req; - } - mutex_lock(&ctx->uring_lock); - __io_queue_sqe(req, NULL); - mutex_unlock(&ctx->uring_lock); - } else { - io_cqring_ev_posted(ctx); -end_req: - req_set_fail_links(req); - io_double_put_req(req); - } } static int io_async_wake(struct wait_queue_entry *wait, unsigned mode, int sync, @@ -4403,8 +4859,8 @@ static __poll_t __io_arm_poll_handler(struct io_kiocb *req, struct io_ring_ctx *ctx = req->ctx; bool cancel = false; - poll->file = req->file; io_init_poll_iocb(poll, mask, wake_func); + poll->file = req->file; poll->wait.private = req; ipt->pt._key = mask; @@ -4441,12 +4897,20 @@ static bool io_arm_poll_handler(struct io_kiocb *req) struct async_poll *apoll; struct io_poll_table ipt; __poll_t mask, ret; + int rw; if (!req->file || !file_can_poll(req->file)) return false; - if (req->flags & (REQ_F_MUST_PUNT | REQ_F_POLLED)) + if (req->flags & REQ_F_POLLED) return false; - if (!def->pollin && !def->pollout) + if (def->pollin) + rw = READ; + else if (def->pollout) + rw = WRITE; + else + return false; + /* if we can't nonblock try, then no point in arming a poll handler */ + if (!io_file_supports_async(req->file, rw)) return false; apoll = kmalloc(sizeof(*apoll), GFP_ATOMIC); @@ -4455,9 +4919,6 @@ static bool io_arm_poll_handler(struct io_kiocb *req) apoll->double_poll = NULL; req->flags |= REQ_F_POLLED; - if (req->flags & REQ_F_WORK_INITIALIZED) - memcpy(&apoll->work, &req->work, sizeof(req->work)); - io_get_req_task(req); req->apoll = apoll; INIT_HLIST_NODE(&req->hash_node); @@ -4473,11 +4934,9 @@ static bool io_arm_poll_handler(struct io_kiocb *req) ret = __io_arm_poll_handler(req, &apoll->poll, &ipt, mask, io_async_wake); - if (ret) { - io_poll_remove_double(req, apoll->double_poll); + if (ret || ipt.error) { + io_poll_remove_double(req); spin_unlock_irq(&ctx->completion_lock); - if (req->flags & REQ_F_WORK_INITIALIZED) - memcpy(&req->work, &apoll->work, sizeof(req->work)); kfree(apoll->double_poll); kfree(apoll); return false; @@ -4508,26 +4967,17 @@ static bool io_poll_remove_one(struct io_kiocb *req) { bool do_complete; + io_poll_remove_double(req); + if (req->opcode == IORING_OP_POLL_ADD) { - io_poll_remove_double(req, req->io); do_complete = __io_poll_remove_one(req, &req->poll); } else { struct async_poll *apoll = req->apoll; - io_poll_remove_double(req, apoll->double_poll); - /* non-poll requests have submit ref still */ do_complete = __io_poll_remove_one(req, &apoll->poll); if (do_complete) { io_put_req(req); - /* - * restore ->work because we will call - * io_req_work_drop_env below when dropping the - * final reference. - */ - if (req->flags & REQ_F_WORK_INITIALIZED) - memcpy(&req->work, &apoll->work, - sizeof(req->work)); kfree(apoll->double_poll); kfree(apoll); } @@ -4537,6 +4987,7 @@ static bool io_poll_remove_one(struct io_kiocb *req) io_cqring_fill_event(req, -ECANCELED); io_commit_cqring(req->ctx); req->flags |= REQ_F_COMP_LOCKED; + req_set_fail_links(req); io_put_req(req); } @@ -4608,10 +5059,9 @@ static int io_poll_remove(struct io_kiocb *req) ret = io_poll_cancel(ctx, addr); spin_unlock_irq(&ctx->completion_lock); - io_cqring_add_event(req, ret); if (ret < 0) req_set_fail_links(req); - io_put_req(req); + io_req_complete(req, ret); return 0; } @@ -4635,7 +5085,7 @@ static void io_poll_queue_proc(struct file *file, struct wait_queue_head *head, static int io_poll_add_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe) { struct io_poll_iocb *poll = &req->poll; - u16 events; + u32 events; if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL)) return -EINVAL; @@ -4644,8 +5094,12 @@ static int io_poll_add_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe if (!poll->file) return -EBADF; - events = READ_ONCE(sqe->poll_events); - poll->events = demangle_poll(events) | EPOLLERR | EPOLLHUP; + events = READ_ONCE(sqe->poll32_events); +#ifdef __BIG_ENDIAN + events = swahw32(events); +#endif + poll->events = demangle_poll(events) | EPOLLERR | EPOLLHUP | + (events & EPOLLEXCLUSIVE); io_get_req_task(req); return 0; @@ -4659,7 +5113,6 @@ static int io_poll_add(struct io_kiocb *req) __poll_t mask; INIT_HLIST_NODE(&req->hash_node); - INIT_LIST_HEAD(&req->list); ipt.pt._qproc = io_poll_queue_proc; mask = __io_arm_poll_handler(req, &req->poll, &ipt, poll->events, @@ -4686,15 +5139,16 @@ static enum hrtimer_restart io_timeout_fn(struct hrtimer *timer) struct io_ring_ctx *ctx = req->ctx; unsigned long flags; - atomic_inc(&ctx->cq_timeouts); - spin_lock_irqsave(&ctx->completion_lock, flags); + atomic_set(&req->ctx->cq_timeouts, + atomic_read(&req->ctx->cq_timeouts) + 1); + /* * We could be racing with timeout deletion. If the list is empty, * then timeout lookup already found it and will be handling it. */ - if (!list_empty(&req->list)) - list_del_init(&req->list); + if (!list_empty(&req->timeout.list)) + list_del_init(&req->timeout.list); io_cqring_fill_event(req, -ETIME); io_commit_cqring(ctx); @@ -4706,14 +5160,30 @@ static enum hrtimer_restart io_timeout_fn(struct hrtimer *timer) return HRTIMER_NORESTART; } +static int __io_timeout_cancel(struct io_kiocb *req) +{ + int ret; + + list_del_init(&req->timeout.list); + + ret = hrtimer_try_to_cancel(&req->io->timeout.timer); + if (ret == -1) + return -EALREADY; + + req_set_fail_links(req); + req->flags |= REQ_F_COMP_LOCKED; + io_cqring_fill_event(req, -ECANCELED); + io_put_req(req); + return 0; +} + static int io_timeout_cancel(struct io_ring_ctx *ctx, __u64 user_data) { struct io_kiocb *req; int ret = -ENOENT; - list_for_each_entry(req, &ctx->timeout_list, list) { + list_for_each_entry(req, &ctx->timeout_list, timeout.list) { if (user_data == req->user_data) { - list_del_init(&req->list); ret = 0; break; } @@ -4722,14 +5192,7 @@ static int io_timeout_cancel(struct io_ring_ctx *ctx, __u64 user_data) if (ret == -ENOENT) return ret; - ret = hrtimer_try_to_cancel(&req->io->timeout.timer); - if (ret == -1) - return -EALREADY; - - req_set_fail_links(req); - io_cqring_fill_event(req, -ECANCELED); - io_put_req(req); - return 0; + return __io_timeout_cancel(req); } static int io_timeout_remove_prep(struct io_kiocb *req, @@ -4795,7 +5258,6 @@ static int io_timeout_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe, data = &req->io->timeout; data->req = req; - req->flags |= REQ_F_TIMEOUT; if (get_timespec64(&data->ts, u64_to_user_ptr(sqe->addr))) return -EFAULT; @@ -4823,8 +5285,7 @@ static int io_timeout(struct io_kiocb *req) * timeout event to be satisfied. If it isn't set, then this is * a pure timeout request, sequence isn't used. */ - if (!off) { - req->flags |= REQ_F_TIMEOUT_NOSEQ; + if (io_is_timeout_noseq(req)) { entry = ctx->timeout_list.prev; goto add; } @@ -4837,16 +5298,17 @@ static int io_timeout(struct io_kiocb *req) * the one we need first. */ list_for_each_prev(entry, &ctx->timeout_list) { - struct io_kiocb *nxt = list_entry(entry, struct io_kiocb, list); + struct io_kiocb *nxt = list_entry(entry, struct io_kiocb, + timeout.list); - if (nxt->flags & REQ_F_TIMEOUT_NOSEQ) + if (io_is_timeout_noseq(nxt)) continue; /* nxt.seq is behind @tail, otherwise would've been completed */ if (off >= nxt->timeout.target_seq - tail) break; } add: - list_add(&req->list, entry); + list_add(&req->timeout.list, entry); data->timer.function = io_timeout_fn; hrtimer_start(&data->timer, timespec64_to_ktime(data->ts), data->mode); spin_unlock_irq(&ctx->completion_lock); @@ -4950,7 +5412,8 @@ static int io_files_update_prep(struct io_kiocb *req, return 0; } -static int io_files_update(struct io_kiocb *req, bool force_nonblock) +static int io_files_update(struct io_kiocb *req, bool force_nonblock, + struct io_comp_state *cs) { struct io_ring_ctx *ctx = req->ctx; struct io_uring_files_update up; @@ -4968,8 +5431,7 @@ static int io_files_update(struct io_kiocb *req, bool force_nonblock) if (ret < 0) req_set_fail_links(req); - io_cqring_add_event(req, ret); - io_put_req(req); + __io_req_complete(req, ret, 0, cs); return 0; } @@ -4981,15 +5443,11 @@ static int io_req_defer_prep(struct io_kiocb *req, if (!sqe) return 0; - io_req_init_async(req); - - if (io_op_defs[req->opcode].file_table) { - ret = io_grab_files(req); - if (unlikely(ret)) - return ret; - } - - io_req_work_grab_env(req, &io_op_defs[req->opcode]); + if (io_alloc_async_ctx(req)) + return -EAGAIN; + ret = io_prep_work_files(req); + if (unlikely(ret)) + return ret; switch (req->opcode) { case IORING_OP_NOP: @@ -5091,86 +5549,129 @@ static int io_req_defer_prep(struct io_kiocb *req, return ret; } +static u32 io_get_sequence(struct io_kiocb *req) +{ + struct io_kiocb *pos; + struct io_ring_ctx *ctx = req->ctx; + u32 total_submitted, nr_reqs = 1; + + if (req->flags & REQ_F_LINK_HEAD) + list_for_each_entry(pos, &req->link_list, link_list) + nr_reqs++; + + total_submitted = ctx->cached_sq_head - ctx->cached_sq_dropped; + return total_submitted - nr_reqs; +} + static int io_req_defer(struct io_kiocb *req, const struct io_uring_sqe *sqe) { struct io_ring_ctx *ctx = req->ctx; + struct io_defer_entry *de; int ret; + u32 seq; /* Still need defer if there is pending req in defer list. */ - if (!req_need_defer(req) && list_empty_careful(&ctx->defer_list)) + if (likely(list_empty_careful(&ctx->defer_list) && + !(req->flags & REQ_F_IO_DRAIN))) + return 0; + + seq = io_get_sequence(req); + /* Still a chance to pass the sequence check */ + if (!req_need_defer(req, seq) && list_empty_careful(&ctx->defer_list)) return 0; if (!req->io) { - if (io_alloc_async_ctx(req)) - return -EAGAIN; ret = io_req_defer_prep(req, sqe); - if (ret < 0) + if (ret) return ret; } + io_prep_async_link(req); + de = kmalloc(sizeof(*de), GFP_KERNEL); + if (!de) + return -ENOMEM; spin_lock_irq(&ctx->completion_lock); - if (!req_need_defer(req) && list_empty(&ctx->defer_list)) { + if (!req_need_defer(req, seq) && list_empty(&ctx->defer_list)) { spin_unlock_irq(&ctx->completion_lock); - return 0; + kfree(de); + io_queue_async_work(req); + return -EIOCBQUEUED; } trace_io_uring_defer(ctx, req, req->user_data); - list_add_tail(&req->list, &ctx->defer_list); + de->req = req; + de->seq = seq; + list_add_tail(&de->list, &ctx->defer_list); spin_unlock_irq(&ctx->completion_lock); return -EIOCBQUEUED; } -static void io_cleanup_req(struct io_kiocb *req) +static void __io_clean_op(struct io_kiocb *req) { struct io_async_ctx *io = req->io; - switch (req->opcode) { - case IORING_OP_READV: - case IORING_OP_READ_FIXED: - case IORING_OP_READ: - if (req->flags & REQ_F_BUFFER_SELECTED) + if (req->flags & REQ_F_BUFFER_SELECTED) { + switch (req->opcode) { + case IORING_OP_READV: + case IORING_OP_READ_FIXED: + case IORING_OP_READ: kfree((void *)(unsigned long)req->rw.addr); - /* fallthrough */ - case IORING_OP_WRITEV: - case IORING_OP_WRITE_FIXED: - case IORING_OP_WRITE: - if (io->rw.iov != io->rw.fast_iov) - kfree(io->rw.iov); - break; - case IORING_OP_RECVMSG: - if (req->flags & REQ_F_BUFFER_SELECTED) - kfree(req->sr_msg.kbuf); - /* fallthrough */ - case IORING_OP_SENDMSG: - if (io->msg.iov != io->msg.fast_iov) - kfree(io->msg.iov); - break; - case IORING_OP_RECV: - if (req->flags & REQ_F_BUFFER_SELECTED) + break; + case IORING_OP_RECVMSG: + case IORING_OP_RECV: kfree(req->sr_msg.kbuf); - break; - case IORING_OP_OPENAT: - case IORING_OP_OPENAT2: - break; - case IORING_OP_SPLICE: - case IORING_OP_TEE: - io_put_file(req, req->splice.file_in, - (req->splice.flags & SPLICE_F_FD_IN_FIXED)); - break; + break; + } + req->flags &= ~REQ_F_BUFFER_SELECTED; + } + + if (req->flags & REQ_F_NEED_CLEANUP) { + switch (req->opcode) { + case IORING_OP_READV: + case IORING_OP_READ_FIXED: + case IORING_OP_READ: + case IORING_OP_WRITEV: + case IORING_OP_WRITE_FIXED: + case IORING_OP_WRITE: + if (io->rw.free_iovec) + kfree(io->rw.free_iovec); + break; + case IORING_OP_RECVMSG: + case IORING_OP_SENDMSG: + if (io->msg.iov != io->msg.fast_iov) + kfree(io->msg.iov); + break; + case IORING_OP_SPLICE: + case IORING_OP_TEE: + io_put_file(req, req->splice.file_in, + (req->splice.flags & SPLICE_F_FD_IN_FIXED)); + break; + } + req->flags &= ~REQ_F_NEED_CLEANUP; } - req->flags &= ~REQ_F_NEED_CLEANUP; + if (req->flags & REQ_F_INFLIGHT) { + struct io_ring_ctx *ctx = req->ctx; + unsigned long flags; + + spin_lock_irqsave(&ctx->inflight_lock, flags); + list_del(&req->inflight_entry); + if (waitqueue_active(&ctx->inflight_wait)) + wake_up(&ctx->inflight_wait); + spin_unlock_irqrestore(&ctx->inflight_lock, flags); + req->flags &= ~REQ_F_INFLIGHT; + } } static int io_issue_sqe(struct io_kiocb *req, const struct io_uring_sqe *sqe, - bool force_nonblock) + bool force_nonblock, struct io_comp_state *cs) { struct io_ring_ctx *ctx = req->ctx; int ret; switch (req->opcode) { case IORING_OP_NOP: - ret = io_nop(req); + ret = io_nop(req, cs); break; case IORING_OP_READV: case IORING_OP_READ_FIXED: @@ -5180,7 +5681,7 @@ static int io_issue_sqe(struct io_kiocb *req, const struct io_uring_sqe *sqe, if (ret < 0) break; } - ret = io_read(req, force_nonblock); + ret = io_read(req, force_nonblock, cs); break; case IORING_OP_WRITEV: case IORING_OP_WRITE_FIXED: @@ -5190,7 +5691,7 @@ static int io_issue_sqe(struct io_kiocb *req, const struct io_uring_sqe *sqe, if (ret < 0) break; } - ret = io_write(req, force_nonblock); + ret = io_write(req, force_nonblock, cs); break; case IORING_OP_FSYNC: if (sqe) { @@ -5232,9 +5733,9 @@ static int io_issue_sqe(struct io_kiocb *req, const struct io_uring_sqe *sqe, break; } if (req->opcode == IORING_OP_SENDMSG) - ret = io_sendmsg(req, force_nonblock); + ret = io_sendmsg(req, force_nonblock, cs); else - ret = io_send(req, force_nonblock); + ret = io_send(req, force_nonblock, cs); break; case IORING_OP_RECVMSG: case IORING_OP_RECV: @@ -5244,9 +5745,9 @@ static int io_issue_sqe(struct io_kiocb *req, const struct io_uring_sqe *sqe, break; } if (req->opcode == IORING_OP_RECVMSG) - ret = io_recvmsg(req, force_nonblock); + ret = io_recvmsg(req, force_nonblock, cs); else - ret = io_recv(req, force_nonblock); + ret = io_recv(req, force_nonblock, cs); break; case IORING_OP_TIMEOUT: if (sqe) { @@ -5270,7 +5771,7 @@ static int io_issue_sqe(struct io_kiocb *req, const struct io_uring_sqe *sqe, if (ret) break; } - ret = io_accept(req, force_nonblock); + ret = io_accept(req, force_nonblock, cs); break; case IORING_OP_CONNECT: if (sqe) { @@ -5278,7 +5779,7 @@ static int io_issue_sqe(struct io_kiocb *req, const struct io_uring_sqe *sqe, if (ret) break; } - ret = io_connect(req, force_nonblock); + ret = io_connect(req, force_nonblock, cs); break; case IORING_OP_ASYNC_CANCEL: if (sqe) { @@ -5310,7 +5811,7 @@ static int io_issue_sqe(struct io_kiocb *req, const struct io_uring_sqe *sqe, if (ret) break; } - ret = io_close(req, force_nonblock); + ret = io_close(req, force_nonblock, cs); break; case IORING_OP_FILES_UPDATE: if (sqe) { @@ -5318,7 +5819,7 @@ static int io_issue_sqe(struct io_kiocb *req, const struct io_uring_sqe *sqe, if (ret) break; } - ret = io_files_update(req, force_nonblock); + ret = io_files_update(req, force_nonblock, cs); break; case IORING_OP_STATX: if (sqe) { @@ -5358,7 +5859,7 @@ static int io_issue_sqe(struct io_kiocb *req, const struct io_uring_sqe *sqe, if (ret) break; } - ret = io_epoll_ctl(req, force_nonblock); + ret = io_epoll_ctl(req, force_nonblock, cs); break; case IORING_OP_SPLICE: if (sqe) { @@ -5374,7 +5875,7 @@ static int io_issue_sqe(struct io_kiocb *req, const struct io_uring_sqe *sqe, if (ret) break; } - ret = io_provide_buffers(req, force_nonblock); + ret = io_provide_buffers(req, force_nonblock, cs); break; case IORING_OP_REMOVE_BUFFERS: if (sqe) { @@ -5382,7 +5883,7 @@ static int io_issue_sqe(struct io_kiocb *req, const struct io_uring_sqe *sqe, if (ret) break; } - ret = io_remove_buffers(req, force_nonblock); + ret = io_remove_buffers(req, force_nonblock, cs); break; case IORING_OP_TEE: if (sqe) { @@ -5417,25 +5918,15 @@ static int io_issue_sqe(struct io_kiocb *req, const struct io_uring_sqe *sqe, return 0; } -static void io_arm_async_linked_timeout(struct io_kiocb *req) -{ - struct io_kiocb *link; - - /* link head's timeout is queued in io_queue_async_work() */ - if (!(req->flags & REQ_F_QUEUE_TIMEOUT)) - return; - - link = list_first_entry(&req->link_list, struct io_kiocb, link_list); - io_queue_linked_timeout(link); -} - -static void io_wq_submit_work(struct io_wq_work **workptr) +static struct io_wq_work *io_wq_submit_work(struct io_wq_work *work) { - struct io_wq_work *work = *workptr; struct io_kiocb *req = container_of(work, struct io_kiocb, work); + struct io_kiocb *timeout; int ret = 0; - io_arm_async_linked_timeout(req); + timeout = io_prep_linked_timeout(req); + if (timeout) + io_queue_linked_timeout(timeout); /* if NO_CANCEL is set, we must still run the work */ if ((work->flags & (IO_WQ_WORK_CANCEL|IO_WQ_WORK_NO_CANCEL)) == @@ -5445,7 +5936,7 @@ static void io_wq_submit_work(struct io_wq_work **workptr) if (!ret) { do { - ret = io_issue_sqe(req, NULL, false); + ret = io_issue_sqe(req, NULL, false, NULL); /* * We can get EAGAIN for polled IO even though we're * forcing a sync submission from here, since we can't @@ -5459,11 +5950,10 @@ static void io_wq_submit_work(struct io_wq_work **workptr) if (ret) { req_set_fail_links(req); - io_cqring_add_event(req, ret); - io_put_req(req); + io_req_complete(req, ret); } - io_steal_work(req, workptr); + return io_steal_work(req); } static inline struct file *io_file_from_index(struct io_ring_ctx *ctx, @@ -5520,6 +6010,8 @@ static int io_grab_files(struct io_kiocb *req) int ret = -EBADF; struct io_ring_ctx *ctx = req->ctx; + io_req_init_async(req); + if (req->work.files || (req->flags & REQ_F_NO_FILE_TABLE)) return 0; if (!ctx->ring_file) @@ -5545,6 +6037,13 @@ static int io_grab_files(struct io_kiocb *req) return ret; } +static inline int io_prep_work_files(struct io_kiocb *req) +{ + if (!io_op_defs[req->opcode].file_table) + return 0; + return io_grab_files(req); +} + static enum hrtimer_restart io_link_timeout_fn(struct hrtimer *timer) { struct io_timeout_data *data = container_of(timer, @@ -5577,21 +6076,17 @@ static enum hrtimer_restart io_link_timeout_fn(struct hrtimer *timer) io_async_find_and_cancel(ctx, req, prev->user_data, -ETIME); io_put_req(prev); } else { - io_cqring_add_event(req, -ETIME); - io_put_req(req); + io_req_complete(req, -ETIME); } return HRTIMER_NORESTART; } -static void io_queue_linked_timeout(struct io_kiocb *req) +static void __io_queue_linked_timeout(struct io_kiocb *req) { - struct io_ring_ctx *ctx = req->ctx; - /* * If the list is now empty, then our linked request finished before * we got a chance to setup the timer */ - spin_lock_irq(&ctx->completion_lock); if (!list_empty(&req->link_list)) { struct io_timeout_data *data = &req->io->timeout; @@ -5599,6 +6094,14 @@ static void io_queue_linked_timeout(struct io_kiocb *req) hrtimer_start(&data->timer, timespec64_to_ktime(data->ts), data->mode); } +} + +static void io_queue_linked_timeout(struct io_kiocb *req) +{ + struct io_ring_ctx *ctx = req->ctx; + + spin_lock_irq(&ctx->completion_lock); + __io_queue_linked_timeout(req); spin_unlock_irq(&ctx->completion_lock); /* drop submission reference */ @@ -5611,8 +6114,7 @@ static struct io_kiocb *io_prep_linked_timeout(struct io_kiocb *req) if (!(req->flags & REQ_F_LINK_HEAD)) return NULL; - /* for polled retry, if flag is set, we already went through here */ - if (req->flags & REQ_F_POLLED) + if (req->flags & REQ_F_LINK_TIMEOUT) return NULL; nxt = list_first_entry_or_null(&req->link_list, struct io_kiocb, @@ -5624,7 +6126,8 @@ static struct io_kiocb *io_prep_linked_timeout(struct io_kiocb *req) return nxt; } -static void __io_queue_sqe(struct io_kiocb *req, const struct io_uring_sqe *sqe) +static void __io_queue_sqe(struct io_kiocb *req, const struct io_uring_sqe *sqe, + struct io_comp_state *cs) { struct io_kiocb *linked_timeout; struct io_kiocb *nxt; @@ -5644,54 +6147,45 @@ again: old_creds = override_creds(req->work.creds); } - ret = io_issue_sqe(req, sqe, true); + ret = io_issue_sqe(req, sqe, true, cs); /* * We async punt it if the file wasn't marked NOWAIT, or if the file * doesn't support non-blocking read/write attempts */ - if (ret == -EAGAIN && (!(req->flags & REQ_F_NOWAIT) || - (req->flags & REQ_F_MUST_PUNT))) { - if (io_arm_poll_handler(req)) { - if (linked_timeout) - io_queue_linked_timeout(linked_timeout); - goto exit; - } + if (ret == -EAGAIN && !(req->flags & REQ_F_NOWAIT)) { + if (!io_arm_poll_handler(req)) { punt: - io_req_init_async(req); - - if (io_op_defs[req->opcode].file_table) { - ret = io_grab_files(req); - if (ret) + ret = io_prep_work_files(req); + if (unlikely(ret)) goto err; + /* + * Queued up for async execution, worker will release + * submit reference when the iocb is actually submitted. + */ + io_queue_async_work(req); } - /* - * Queued up for async execution, worker will release - * submit reference when the iocb is actually submitted. - */ - io_queue_async_work(req); + if (linked_timeout) + io_queue_linked_timeout(linked_timeout); goto exit; } + if (unlikely(ret)) { err: - nxt = NULL; - /* drop submission reference */ - io_put_req_find_next(req, &nxt); - - if (linked_timeout) { - if (!ret) - io_queue_linked_timeout(linked_timeout); - else - io_put_req(linked_timeout); - } - - /* and drop final reference, if we failed */ - if (ret) { - io_cqring_add_event(req, ret); + /* un-prep timeout, so it'll be killed as any other linked */ + req->flags &= ~REQ_F_LINK_TIMEOUT; req_set_fail_links(req); io_put_req(req); + io_req_complete(req, ret); + goto exit; } + + /* drop submission reference */ + nxt = io_put_req_find_next(req); + if (linked_timeout) + io_queue_linked_timeout(linked_timeout); + if (nxt) { req = nxt; @@ -5704,7 +6198,8 @@ exit: revert_creds(old_creds); } -static void io_queue_sqe(struct io_kiocb *req, const struct io_uring_sqe *sqe) +static void io_queue_sqe(struct io_kiocb *req, const struct io_uring_sqe *sqe, + struct io_comp_state *cs) { int ret; @@ -5712,17 +6207,14 @@ static void io_queue_sqe(struct io_kiocb *req, const struct io_uring_sqe *sqe) if (ret) { if (ret != -EIOCBQUEUED) { fail_req: - io_cqring_add_event(req, ret); req_set_fail_links(req); - io_double_put_req(req); + io_put_req(req); + io_req_complete(req, ret); } } else if (req->flags & REQ_F_FORCE_ASYNC) { if (!req->io) { - ret = -EAGAIN; - if (io_alloc_async_ctx(req)) - goto fail_req; ret = io_req_defer_prep(req, sqe); - if (unlikely(ret < 0)) + if (unlikely(ret)) goto fail_req; } @@ -5734,21 +6226,22 @@ fail_req: req->work.flags |= IO_WQ_WORK_CONCURRENT; io_queue_async_work(req); } else { - __io_queue_sqe(req, sqe); + __io_queue_sqe(req, sqe, cs); } } -static inline void io_queue_link_head(struct io_kiocb *req) +static inline void io_queue_link_head(struct io_kiocb *req, + struct io_comp_state *cs) { if (unlikely(req->flags & REQ_F_FAIL_LINK)) { - io_cqring_add_event(req, -ECANCELED); - io_double_put_req(req); + io_put_req(req); + io_req_complete(req, -ECANCELED); } else - io_queue_sqe(req, NULL); + io_queue_sqe(req, NULL, cs); } static int io_submit_sqe(struct io_kiocb *req, const struct io_uring_sqe *sqe, - struct io_kiocb **link) + struct io_kiocb **link, struct io_comp_state *cs) { struct io_ring_ctx *ctx = req->ctx; int ret; @@ -5774,21 +6267,19 @@ static int io_submit_sqe(struct io_kiocb *req, const struct io_uring_sqe *sqe, head->flags |= REQ_F_IO_DRAIN; ctx->drain_next = 1; } - if (io_alloc_async_ctx(req)) - return -EAGAIN; - ret = io_req_defer_prep(req, sqe); - if (ret) { + if (unlikely(ret)) { /* fail even hard links since we don't submit */ head->flags |= REQ_F_FAIL_LINK; return ret; } trace_io_uring_link(ctx, req, head); + io_get_req_task(req); list_add_tail(&req->link_list, &head->link_list); /* last request of a link, enqueue the link */ if (!(req->flags & (REQ_F_LINK | REQ_F_HARDLINK))) { - io_queue_link_head(head); + io_queue_link_head(head, cs); *link = NULL; } } else { @@ -5800,15 +6291,12 @@ static int io_submit_sqe(struct io_kiocb *req, const struct io_uring_sqe *sqe, req->flags |= REQ_F_LINK_HEAD; INIT_LIST_HEAD(&req->link_list); - if (io_alloc_async_ctx(req)) - return -EAGAIN; - ret = io_req_defer_prep(req, sqe); - if (ret) + if (unlikely(ret)) req->flags |= REQ_F_FAIL_LINK; *link = req; } else { - io_queue_sqe(req, sqe); + io_queue_sqe(req, sqe, cs); } } @@ -5820,6 +6308,8 @@ static int io_submit_sqe(struct io_kiocb *req, const struct io_uring_sqe *sqe, */ static void io_submit_state_end(struct io_submit_state *state) { + if (!list_empty(&state->comp.list)) + io_submit_flush_completions(&state->comp); blk_finish_plug(&state->plug); io_state_file_put(state); if (state->free_reqs) @@ -5830,9 +6320,15 @@ static void io_submit_state_end(struct io_submit_state *state) * Start submission side cache. */ static void io_submit_state_start(struct io_submit_state *state, - unsigned int max_ios) + struct io_ring_ctx *ctx, unsigned int max_ios) { blk_start_plug(&state->plug); +#ifdef CONFIG_BLOCK + state->plug.nowait = true; +#endif + state->comp.nr = 0; + INIT_LIST_HEAD(&state->comp.list); + state->comp.ctx = ctx; state->free_reqs = 0; state->file = NULL; state->ios_left = max_ios; @@ -5897,12 +6393,6 @@ static int io_init_req(struct io_ring_ctx *ctx, struct io_kiocb *req, unsigned int sqe_flags; int id; - /* - * All io need record the previous position, if LINK vs DARIN, - * it can be used to mark the position of the first IO in the - * link list. - */ - req->sequence = ctx->cached_sq_head - ctx->cached_sq_dropped; req->opcode = READ_ONCE(sqe->opcode); req->user_data = READ_ONCE(sqe->user_data); req->io = NULL; @@ -5950,7 +6440,7 @@ static int io_init_req(struct io_ring_ctx *ctx, struct io_kiocb *req, static int io_submit_sqes(struct io_ring_ctx *ctx, unsigned int nr, struct file *ring_file, int ring_fd) { - struct io_submit_state state, *statep = NULL; + struct io_submit_state state; struct io_kiocb *link = NULL; int i, submitted = 0; @@ -5967,10 +6457,7 @@ static int io_submit_sqes(struct io_ring_ctx *ctx, unsigned int nr, if (!percpu_ref_tryget_many(&ctx->refs, nr)) return -EAGAIN; - if (nr > IO_PLUG_THRESHOLD) { - io_submit_state_start(&state, nr); - statep = &state; - } + io_submit_state_start(&state, ctx, nr); ctx->ring_fd = ring_fd; ctx->ring_file = ring_file; @@ -5985,28 +6472,28 @@ static int io_submit_sqes(struct io_ring_ctx *ctx, unsigned int nr, io_consume_sqe(ctx); break; } - req = io_alloc_req(ctx, statep); + req = io_alloc_req(ctx, &state); if (unlikely(!req)) { if (!submitted) submitted = -EAGAIN; break; } - err = io_init_req(ctx, req, sqe, statep); + err = io_init_req(ctx, req, sqe, &state); io_consume_sqe(ctx); /* will complete beyond this point, count as submitted */ submitted++; if (unlikely(err)) { fail_req: - io_cqring_add_event(req, err); - io_double_put_req(req); + io_put_req(req); + io_req_complete(req, err); break; } trace_io_uring_submit_sqe(ctx, req->opcode, req->user_data, true, io_async_submit(ctx)); - err = io_submit_sqe(req, sqe, &link); + err = io_submit_sqe(req, sqe, &link, &state.comp); if (err) goto fail_req; } @@ -6017,9 +6504,8 @@ fail_req: percpu_ref_put_many(&ctx->refs, nr - ref_used); } if (link) - io_queue_link_head(link); - if (statep) - io_submit_state_end(&state); + io_queue_link_head(link, &state.comp); + io_submit_state_end(&state); /* Commit SQ ring head once we've consumed and submitted all SQEs */ io_commit_sqring(ctx); @@ -6027,6 +6513,21 @@ fail_req: return submitted; } +static inline void io_ring_set_wakeup_flag(struct io_ring_ctx *ctx) +{ + /* Tell userspace we may need a wakeup call */ + spin_lock_irq(&ctx->completion_lock); + ctx->rings->sq_flags |= IORING_SQ_NEED_WAKEUP; + spin_unlock_irq(&ctx->completion_lock); +} + +static inline void io_ring_clear_wakeup_flag(struct io_ring_ctx *ctx) +{ + spin_lock_irq(&ctx->completion_lock); + ctx->rings->sq_flags &= ~IORING_SQ_NEED_WAKEUP; + spin_unlock_irq(&ctx->completion_lock); +} + static int io_sq_thread(void *data) { struct io_ring_ctx *ctx = data; @@ -6043,12 +6544,12 @@ static int io_sq_thread(void *data) while (!kthread_should_park()) { unsigned int to_submit; - if (!list_empty(&ctx->poll_list)) { + if (!list_empty(&ctx->iopoll_list)) { unsigned nr_events = 0; mutex_lock(&ctx->uring_lock); - if (!list_empty(&ctx->poll_list)) - io_iopoll_getevents(ctx, &nr_events, 0); + if (!list_empty(&ctx->iopoll_list) && !need_resched()) + io_do_iopoll(ctx, &nr_events, 0); else timeout = jiffies + ctx->sq_thread_idle; mutex_unlock(&ctx->uring_lock); @@ -6067,7 +6568,7 @@ static int io_sq_thread(void *data) * adding ourselves to the waitqueue, as the unuse/drop * may sleep. */ - io_sq_thread_drop_mm(ctx); + io_sq_thread_drop_mm(); /* * We're polling. If we're within the defined idle @@ -6076,11 +6577,10 @@ static int io_sq_thread(void *data) * more IO, we should wait for the application to * reap events and wake us up. */ - if (!list_empty(&ctx->poll_list) || need_resched() || + if (!list_empty(&ctx->iopoll_list) || need_resched() || (!time_after(jiffies, timeout) && ret != -EBUSY && !percpu_ref_is_dying(&ctx->refs))) { - if (current->task_works) - task_work_run(); + io_run_task_work(); cond_resched(); continue; } @@ -6090,21 +6590,18 @@ static int io_sq_thread(void *data) /* * While doing polled IO, before going to sleep, we need - * to check if there are new reqs added to poll_list, it - * is because reqs may have been punted to io worker and - * will be added to poll_list later, hence check the - * poll_list again. + * to check if there are new reqs added to iopoll_list, + * it is because reqs may have been punted to io worker + * and will be added to iopoll_list later, hence check + * the iopoll_list again. */ if ((ctx->flags & IORING_SETUP_IOPOLL) && - !list_empty_careful(&ctx->poll_list)) { + !list_empty_careful(&ctx->iopoll_list)) { finish_wait(&ctx->sqo_wait, &wait); continue; } - /* Tell userspace we may need a wakeup call */ - spin_lock_irq(&ctx->completion_lock); - ctx->rings->sq_flags |= IORING_SQ_NEED_WAKEUP; - spin_unlock_irq(&ctx->completion_lock); + io_ring_set_wakeup_flag(ctx); to_submit = io_sqring_entries(ctx); if (!to_submit || ret == -EBUSY) { @@ -6112,9 +6609,9 @@ static int io_sq_thread(void *data) finish_wait(&ctx->sqo_wait, &wait); break; } - if (current->task_works) { - task_work_run(); + if (io_run_task_work()) { finish_wait(&ctx->sqo_wait, &wait); + io_ring_clear_wakeup_flag(ctx); continue; } if (signal_pending(current)) @@ -6122,17 +6619,13 @@ static int io_sq_thread(void *data) schedule(); finish_wait(&ctx->sqo_wait, &wait); - spin_lock_irq(&ctx->completion_lock); - ctx->rings->sq_flags &= ~IORING_SQ_NEED_WAKEUP; - spin_unlock_irq(&ctx->completion_lock); + io_ring_clear_wakeup_flag(ctx); ret = 0; continue; } finish_wait(&ctx->sqo_wait, &wait); - spin_lock_irq(&ctx->completion_lock); - ctx->rings->sq_flags &= ~IORING_SQ_NEED_WAKEUP; - spin_unlock_irq(&ctx->completion_lock); + io_ring_clear_wakeup_flag(ctx); } mutex_lock(&ctx->uring_lock); @@ -6142,10 +6635,9 @@ static int io_sq_thread(void *data) timeout = jiffies + ctx->sq_thread_idle; } - if (current->task_works) - task_work_run(); + io_run_task_work(); - io_sq_thread_drop_mm(ctx); + io_sq_thread_drop_mm(); revert_creds(old_cred); kthread_parkme(); @@ -6208,9 +6700,8 @@ static int io_cqring_wait(struct io_ring_ctx *ctx, int min_events, do { if (io_cqring_events(ctx, false) >= min_events) return 0; - if (!current->task_works) + if (!io_run_task_work()) break; - task_work_run(); } while (1); if (sig) { @@ -6232,8 +6723,8 @@ static int io_cqring_wait(struct io_ring_ctx *ctx, int min_events, prepare_to_wait_exclusive(&ctx->wait, &iowq.wq, TASK_INTERRUPTIBLE); /* make sure we run task_work before checking for signals */ - if (current->task_works) - task_work_run(); + if (io_run_task_work()) + continue; if (signal_pending(current)) { if (current->jobctl & JOBCTL_TASK_WORK) { spin_lock_irq(¤t->sighand->siglock); @@ -6844,7 +7335,7 @@ static int __io_sqe_files_update(struct io_ring_ctx *ctx, table = &ctx->file_data->table[i >> IORING_FILE_TABLE_SHIFT]; index = i & IORING_FILE_TABLE_MASK; if (table->files[index]) { - file = io_file_from_index(ctx, index); + file = table->files[index]; err = io_queue_file_removal(data, file); if (err) break; @@ -6873,6 +7364,7 @@ static int __io_sqe_files_update(struct io_ring_ctx *ctx, table->files[index] = file; err = io_sqe_file_register(ctx, file, i); if (err) { + table->files[index] = NULL; fput(file); break; } @@ -6972,9 +7464,6 @@ static int io_sq_offload_start(struct io_ring_ctx *ctx, { int ret; - mmgrab(current->mm); - ctx->sqo_mm = current->mm; - if (ctx->flags & IORING_SETUP_SQPOLL) { ret = -EPERM; if (!capable(CAP_SYS_ADMIN)) @@ -7019,17 +7508,17 @@ static int io_sq_offload_start(struct io_ring_ctx *ctx, return 0; err: io_finish_async(ctx); - mmdrop(ctx->sqo_mm); - ctx->sqo_mm = NULL; return ret; } -static void io_unaccount_mem(struct user_struct *user, unsigned long nr_pages) +static inline void __io_unaccount_mem(struct user_struct *user, + unsigned long nr_pages) { atomic_long_sub(nr_pages, &user->locked_vm); } -static int io_account_mem(struct user_struct *user, unsigned long nr_pages) +static inline int __io_account_mem(struct user_struct *user, + unsigned long nr_pages) { unsigned long page_limit, cur_pages, new_pages; @@ -7047,6 +7536,41 @@ static int io_account_mem(struct user_struct *user, unsigned long nr_pages) return 0; } +static void io_unaccount_mem(struct io_ring_ctx *ctx, unsigned long nr_pages, + enum io_mem_account acct) +{ + if (ctx->limit_mem) + __io_unaccount_mem(ctx->user, nr_pages); + + if (ctx->sqo_mm) { + if (acct == ACCT_LOCKED) + ctx->sqo_mm->locked_vm -= nr_pages; + else if (acct == ACCT_PINNED) + atomic64_sub(nr_pages, &ctx->sqo_mm->pinned_vm); + } +} + +static int io_account_mem(struct io_ring_ctx *ctx, unsigned long nr_pages, + enum io_mem_account acct) +{ + int ret; + + if (ctx->limit_mem) { + ret = __io_account_mem(ctx->user, nr_pages); + if (ret) + return ret; + } + + if (ctx->sqo_mm) { + if (acct == ACCT_LOCKED) + ctx->sqo_mm->locked_vm += nr_pages; + else if (acct == ACCT_PINNED) + atomic64_add(nr_pages, &ctx->sqo_mm->pinned_vm); + } + + return 0; +} + static void io_mem_free(void *ptr) { struct page *page; @@ -7083,6 +7607,9 @@ static unsigned long rings_size(unsigned sq_entries, unsigned cq_entries, return SIZE_MAX; #endif + if (sq_offset) + *sq_offset = off; + sq_array_size = array_size(sizeof(u32), sq_entries); if (sq_array_size == SIZE_MAX) return SIZE_MAX; @@ -7090,9 +7617,6 @@ static unsigned long rings_size(unsigned sq_entries, unsigned cq_entries, if (check_add_overflow(off, sq_array_size, &off)) return SIZE_MAX; - if (sq_offset) - *sq_offset = off; - return off; } @@ -7121,8 +7645,7 @@ static int io_sqe_buffer_unregister(struct io_ring_ctx *ctx) for (j = 0; j < imu->nr_bvecs; j++) unpin_user_page(imu->bvec[j].bv_page); - if (ctx->account_mem) - io_unaccount_mem(ctx->user, imu->nr_bvecs); + io_unaccount_mem(ctx, imu->nr_bvecs, ACCT_PINNED); kvfree(imu->bvec); imu->nr_bvecs = 0; } @@ -7205,11 +7728,9 @@ static int io_sqe_buffer_register(struct io_ring_ctx *ctx, void __user *arg, start = ubuf >> PAGE_SHIFT; nr_pages = end - start; - if (ctx->account_mem) { - ret = io_account_mem(ctx->user, nr_pages); - if (ret) - goto err; - } + ret = io_account_mem(ctx, nr_pages, ACCT_PINNED); + if (ret) + goto err; ret = 0; if (!pages || nr_pages > got_pages) { @@ -7222,8 +7743,7 @@ static int io_sqe_buffer_register(struct io_ring_ctx *ctx, void __user *arg, GFP_KERNEL); if (!pages || !vmas) { ret = -ENOMEM; - if (ctx->account_mem) - io_unaccount_mem(ctx->user, nr_pages); + io_unaccount_mem(ctx, nr_pages, ACCT_PINNED); goto err; } got_pages = nr_pages; @@ -7233,8 +7753,7 @@ static int io_sqe_buffer_register(struct io_ring_ctx *ctx, void __user *arg, GFP_KERNEL); ret = -ENOMEM; if (!imu->bvec) { - if (ctx->account_mem) - io_unaccount_mem(ctx->user, nr_pages); + io_unaccount_mem(ctx, nr_pages, ACCT_PINNED); goto err; } @@ -7265,8 +7784,7 @@ static int io_sqe_buffer_register(struct io_ring_ctx *ctx, void __user *arg, */ if (pret > 0) unpin_user_pages(pages, pret); - if (ctx->account_mem) - io_unaccount_mem(ctx->user, nr_pages); + io_unaccount_mem(ctx, nr_pages, ACCT_PINNED); kvfree(imu->bvec); goto err; } @@ -7350,11 +7868,12 @@ static void io_destroy_buffers(struct io_ring_ctx *ctx) static void io_ring_ctx_free(struct io_ring_ctx *ctx) { io_finish_async(ctx); - if (ctx->sqo_mm) + io_sqe_buffer_unregister(ctx); + if (ctx->sqo_mm) { mmdrop(ctx->sqo_mm); + ctx->sqo_mm = NULL; + } - io_iopoll_reap_events(ctx); - io_sqe_buffer_unregister(ctx); io_sqe_files_unregister(ctx); io_eventfd_unregister(ctx); io_destroy_buffers(ctx); @@ -7418,11 +7937,8 @@ static int io_remove_personalities(int id, void *p, void *data) static void io_ring_exit_work(struct work_struct *work) { - struct io_ring_ctx *ctx; - - ctx = container_of(work, struct io_ring_ctx, exit_work); - if (ctx->rings) - io_cqring_overflow_flush(ctx, true); + struct io_ring_ctx *ctx = container_of(work, struct io_ring_ctx, + exit_work); /* * If we're doing polled IO and end up having requests being @@ -7430,11 +7946,11 @@ static void io_ring_exit_work(struct work_struct *work) * we're waiting for refs to drop. We need to reap these manually, * as nobody else will be looking for them. */ - while (!wait_for_completion_timeout(&ctx->ref_comp, HZ/20)) { - io_iopoll_reap_events(ctx); + do { if (ctx->rings) io_cqring_overflow_flush(ctx, true); - } + io_iopoll_try_reap_events(ctx); + } while (!wait_for_completion_timeout(&ctx->ref_comp, HZ/20)); io_ring_ctx_free(ctx); } @@ -7450,10 +7966,10 @@ static void io_ring_ctx_wait_and_kill(struct io_ring_ctx *ctx) if (ctx->io_wq) io_wq_cancel_all(ctx->io_wq); - io_iopoll_reap_events(ctx); /* if we failed setting up the ctx, we might not have any rings */ if (ctx->rings) io_cqring_overflow_flush(ctx, true); + io_iopoll_try_reap_events(ctx); idr_for_each(&ctx->personality_idr, io_remove_personalities, ctx); /* @@ -7461,12 +7977,17 @@ static void io_ring_ctx_wait_and_kill(struct io_ring_ctx *ctx) * is closed but resources aren't reaped yet. This can cause * spurious failure in setting up a new ring. */ - if (ctx->account_mem) - io_unaccount_mem(ctx->user, - ring_pages(ctx->sq_entries, ctx->cq_entries)); + io_unaccount_mem(ctx, ring_pages(ctx->sq_entries, ctx->cq_entries), + ACCT_LOCKED); INIT_WORK(&ctx->exit_work, io_ring_exit_work); - queue_work(system_wq, &ctx->exit_work); + /* + * Use system_unbound_wq to avoid spawning tons of event kworkers + * if we're exiting a ton of rings at the same time. It just adds + * noise and overhead, there's no discernable change in runtime + * over using system_wq. + */ + queue_work(system_unbound_wq, &ctx->exit_work); } static int io_uring_release(struct inode *inode, struct file *file) @@ -7485,12 +8006,152 @@ static bool io_wq_files_match(struct io_wq_work *work, void *data) return work->files == files; } +/* + * Returns true if 'preq' is the link parent of 'req' + */ +static bool io_match_link(struct io_kiocb *preq, struct io_kiocb *req) +{ + struct io_kiocb *link; + + if (!(preq->flags & REQ_F_LINK_HEAD)) + return false; + + list_for_each_entry(link, &preq->link_list, link_list) { + if (link == req) + return true; + } + + return false; +} + +static inline bool io_match_files(struct io_kiocb *req, + struct files_struct *files) +{ + return (req->flags & REQ_F_WORK_INITIALIZED) && req->work.files == files; +} + +static bool io_match_link_files(struct io_kiocb *req, + struct files_struct *files) +{ + struct io_kiocb *link; + + if (io_match_files(req, files)) + return true; + if (req->flags & REQ_F_LINK_HEAD) { + list_for_each_entry(link, &req->link_list, link_list) { + if (io_match_files(link, files)) + return true; + } + } + return false; +} + +/* + * We're looking to cancel 'req' because it's holding on to our files, but + * 'req' could be a link to another request. See if it is, and cancel that + * parent request if so. + */ +static bool io_poll_remove_link(struct io_ring_ctx *ctx, struct io_kiocb *req) +{ + struct hlist_node *tmp; + struct io_kiocb *preq; + bool found = false; + int i; + + spin_lock_irq(&ctx->completion_lock); + for (i = 0; i < (1U << ctx->cancel_hash_bits); i++) { + struct hlist_head *list; + + list = &ctx->cancel_hash[i]; + hlist_for_each_entry_safe(preq, tmp, list, hash_node) { + found = io_match_link(preq, req); + if (found) { + io_poll_remove_one(preq); + break; + } + } + } + spin_unlock_irq(&ctx->completion_lock); + return found; +} + +static bool io_timeout_remove_link(struct io_ring_ctx *ctx, + struct io_kiocb *req) +{ + struct io_kiocb *preq; + bool found = false; + + spin_lock_irq(&ctx->completion_lock); + list_for_each_entry(preq, &ctx->timeout_list, timeout.list) { + found = io_match_link(preq, req); + if (found) { + __io_timeout_cancel(preq); + break; + } + } + spin_unlock_irq(&ctx->completion_lock); + return found; +} + +static bool io_cancel_link_cb(struct io_wq_work *work, void *data) +{ + return io_match_link(container_of(work, struct io_kiocb, work), data); +} + +static void io_attempt_cancel(struct io_ring_ctx *ctx, struct io_kiocb *req) +{ + enum io_wq_cancel cret; + + /* cancel this particular work, if it's running */ + cret = io_wq_cancel_work(ctx->io_wq, &req->work); + if (cret != IO_WQ_CANCEL_NOTFOUND) + return; + + /* find links that hold this pending, cancel those */ + cret = io_wq_cancel_cb(ctx->io_wq, io_cancel_link_cb, req, true); + if (cret != IO_WQ_CANCEL_NOTFOUND) + return; + + /* if we have a poll link holding this pending, cancel that */ + if (io_poll_remove_link(ctx, req)) + return; + + /* final option, timeout link is holding this req pending */ + io_timeout_remove_link(ctx, req); +} + +static void io_cancel_defer_files(struct io_ring_ctx *ctx, + struct files_struct *files) +{ + struct io_defer_entry *de = NULL; + LIST_HEAD(list); + + spin_lock_irq(&ctx->completion_lock); + list_for_each_entry_reverse(de, &ctx->defer_list, list) { + if (io_match_link_files(de->req, files)) { + list_cut_position(&list, &ctx->defer_list, &de->list); + break; + } + } + spin_unlock_irq(&ctx->completion_lock); + + while (!list_empty(&list)) { + de = list_first_entry(&list, struct io_defer_entry, list); + list_del_init(&de->list); + req_set_fail_links(de->req); + io_put_req(de->req); + io_req_complete(de->req, -ECANCELED); + kfree(de); + } +} + static void io_uring_cancel_files(struct io_ring_ctx *ctx, struct files_struct *files) { if (list_empty_careful(&ctx->inflight_list)) return; + io_cancel_defer_files(ctx, files); /* cancel all at once, should be faster than doing it one by one*/ io_wq_cancel_cb(ctx->io_wq, io_wq_files_match, files, true); @@ -7516,35 +8177,9 @@ static void io_uring_cancel_files(struct io_ring_ctx *ctx, /* We need to keep going until we don't find a matching req */ if (!cancel_req) break; - - if (cancel_req->flags & REQ_F_OVERFLOW) { - spin_lock_irq(&ctx->completion_lock); - list_del(&cancel_req->list); - cancel_req->flags &= ~REQ_F_OVERFLOW; - if (list_empty(&ctx->cq_overflow_list)) { - clear_bit(0, &ctx->sq_check_overflow); - clear_bit(0, &ctx->cq_check_overflow); - ctx->rings->sq_flags &= ~IORING_SQ_CQ_OVERFLOW; - } - spin_unlock_irq(&ctx->completion_lock); - - WRITE_ONCE(ctx->rings->cq_overflow, - atomic_inc_return(&ctx->cached_cq_overflow)); - - /* - * Put inflight ref and overflow ref. If that's - * all we had, then we're done with this request. - */ - if (refcount_sub_and_test(2, &cancel_req->refs)) { - io_free_req(cancel_req); - finish_wait(&ctx->inflight_wait, &wait); - continue; - } - } else { - io_wq_cancel_work(ctx->io_wq, &cancel_req->work); - io_put_req(cancel_req); - } - + /* cancel this request, or head link requests */ + io_attempt_cancel(ctx, cancel_req); + io_put_req(cancel_req); schedule(); finish_wait(&ctx->inflight_wait, &wait); } @@ -7652,8 +8287,7 @@ SYSCALL_DEFINE6(io_uring_enter, unsigned int, fd, u32, to_submit, int submitted = 0; struct fd f; - if (current->task_works) - task_work_run(); + io_run_task_work(); if (flags & ~(IORING_ENTER_GETEVENTS | IORING_ENTER_SQ_WAKEUP)) return -EINVAL; @@ -7692,8 +8326,6 @@ SYSCALL_DEFINE6(io_uring_enter, unsigned int, fd, u32, to_submit, goto out; } if (flags & IORING_ENTER_GETEVENTS) { - unsigned nr_events = 0; - min_complete = min(min_complete, ctx->cq_entries); /* @@ -7704,7 +8336,7 @@ SYSCALL_DEFINE6(io_uring_enter, unsigned int, fd, u32, to_submit, */ if (ctx->flags & IORING_SETUP_IOPOLL && !(ctx->flags & IORING_SETUP_SQPOLL)) { - ret = io_iopoll_check(ctx, &nr_events, min_complete); + ret = io_iopoll_check(ctx, min_complete); } else { ret = io_cqring_wait(ctx, min_complete, sig, sigsz); } @@ -7825,6 +8457,10 @@ static int io_allocate_scq_urings(struct io_ring_ctx *ctx, struct io_rings *rings; size_t size, sq_array_offset; + /* make sure these are sane, as we already accounted them */ + ctx->sq_entries = p->sq_entries; + ctx->cq_entries = p->cq_entries; + size = rings_size(p->sq_entries, p->cq_entries, &sq_array_offset); if (size == SIZE_MAX) return -EOVERFLOW; @@ -7841,8 +8477,6 @@ static int io_allocate_scq_urings(struct io_ring_ctx *ctx, rings->cq_ring_entries = p->cq_entries; ctx->sq_mask = rings->sq_ring_mask; ctx->cq_mask = rings->cq_ring_mask; - ctx->sq_entries = rings->sq_ring_entries; - ctx->cq_entries = rings->cq_ring_entries; size = array_size(sizeof(struct io_uring_sqe), p->sq_entries); if (size == SIZE_MAX) { @@ -7909,7 +8543,7 @@ static int io_uring_create(unsigned entries, struct io_uring_params *p, { struct user_struct *user = NULL; struct io_ring_ctx *ctx; - bool account_mem; + bool limit_mem; int ret; if (!entries) @@ -7948,10 +8582,10 @@ static int io_uring_create(unsigned entries, struct io_uring_params *p, } user = get_uid(current_user()); - account_mem = !capable(CAP_IPC_LOCK); + limit_mem = !capable(CAP_IPC_LOCK); - if (account_mem) { - ret = io_account_mem(user, + if (limit_mem) { + ret = __io_account_mem(user, ring_pages(p->sq_entries, p->cq_entries)); if (ret) { free_uid(user); @@ -7961,17 +8595,29 @@ static int io_uring_create(unsigned entries, struct io_uring_params *p, ctx = io_ring_ctx_alloc(p); if (!ctx) { - if (account_mem) - io_unaccount_mem(user, ring_pages(p->sq_entries, + if (limit_mem) + __io_unaccount_mem(user, ring_pages(p->sq_entries, p->cq_entries)); free_uid(user); return -ENOMEM; } ctx->compat = in_compat_syscall(); - ctx->account_mem = account_mem; ctx->user = user; ctx->creds = get_current_cred(); + mmgrab(current->mm); + ctx->sqo_mm = current->mm; + + /* + * Account memory _before_ installing the file descriptor. Once + * the descriptor is installed, it can get closed at any time. Also + * do this before hitting the general error path, as ring freeing + * will un-account as well. + */ + io_account_mem(ctx, ring_pages(p->sq_entries, p->cq_entries), + ACCT_LOCKED); + ctx->limit_mem = limit_mem; + ret = io_allocate_scq_urings(ctx, p); if (ret) goto err; @@ -8000,12 +8646,14 @@ static int io_uring_create(unsigned entries, struct io_uring_params *p, p->features = IORING_FEAT_SINGLE_MMAP | IORING_FEAT_NODROP | IORING_FEAT_SUBMIT_STABLE | IORING_FEAT_RW_CUR_POS | - IORING_FEAT_CUR_PERSONALITY | IORING_FEAT_FAST_POLL; + IORING_FEAT_CUR_PERSONALITY | IORING_FEAT_FAST_POLL | + IORING_FEAT_POLL_32BITS; if (copy_to_user(params, p, sizeof(*p))) { ret = -EFAULT; goto err; } + /* * Install ring fd as the very last thing, so we don't risk someone * having closed it before we finish setup @@ -8289,7 +8937,8 @@ static int __init io_uring_init(void) BUILD_BUG_SQE_ELEM(28, /* compat */ int, rw_flags); BUILD_BUG_SQE_ELEM(28, /* compat */ __u32, rw_flags); BUILD_BUG_SQE_ELEM(28, __u32, fsync_flags); - BUILD_BUG_SQE_ELEM(28, __u16, poll_events); + BUILD_BUG_SQE_ELEM(28, /* compat */ __u16, poll_events); + BUILD_BUG_SQE_ELEM(28, __u32, poll32_events); BUILD_BUG_SQE_ELEM(28, __u32, sync_range_flags); BUILD_BUG_SQE_ELEM(28, __u32, msg_flags); BUILD_BUG_SQE_ELEM(28, __u32, timeout_flags); diff --git a/fs/ioctl.c b/fs/ioctl.c index d69786d1dd91..4e6cc0a7d69c 100644 --- a/fs/ioctl.c +++ b/fs/ioctl.c @@ -736,7 +736,7 @@ static int do_vfs_ioctl(struct file *filp, unsigned int fd, return -ENOIOCTLCMD; } -int ksys_ioctl(unsigned int fd, unsigned int cmd, unsigned long arg) +SYSCALL_DEFINE3(ioctl, unsigned int, fd, unsigned int, cmd, unsigned long, arg) { struct fd f = fdget(fd); int error; @@ -757,11 +757,6 @@ out: return error; } -SYSCALL_DEFINE3(ioctl, unsigned int, fd, unsigned int, cmd, unsigned long, arg) -{ - return ksys_ioctl(fd, cmd, arg); -} - #ifdef CONFIG_COMPAT /** * compat_ptr_ioctl - generic implementation of .compat_ioctl file operation diff --git a/fs/iomap/apply.c b/fs/iomap/apply.c index 76925b40b5fd..26ab6563181f 100644 --- a/fs/iomap/apply.c +++ b/fs/iomap/apply.c @@ -46,10 +46,14 @@ iomap_apply(struct inode *inode, loff_t pos, loff_t length, unsigned flags, ret = ops->iomap_begin(inode, pos, length, flags, &iomap, &srcmap); if (ret) return ret; - if (WARN_ON(iomap.offset > pos)) - return -EIO; - if (WARN_ON(iomap.length == 0)) - return -EIO; + if (WARN_ON(iomap.offset > pos)) { + written = -EIO; + goto out; + } + if (WARN_ON(iomap.length == 0)) { + written = -EIO; + goto out; + } trace_iomap_apply_dstmap(inode, &iomap); if (srcmap.type != IOMAP_HOLE) @@ -80,6 +84,7 @@ iomap_apply(struct inode *inode, loff_t pos, loff_t length, unsigned flags, written = actor(inode, pos, length, data, &iomap, srcmap.type != IOMAP_HOLE ? &srcmap : &iomap); +out: /* * Now the data has been copied, commit the range we've copied. This * should not fail unless the filesystem has had a fatal error. diff --git a/fs/iomap/direct-io.c b/fs/iomap/direct-io.c index ec7b78e6feca..c1aafb2ab990 100644 --- a/fs/iomap/direct-io.c +++ b/fs/iomap/direct-io.c @@ -10,6 +10,7 @@ #include <linux/backing-dev.h> #include <linux/uio.h> #include <linux/task_io_accounting_ops.h> +#include "trace.h" #include "../internal.h" @@ -401,6 +402,9 @@ iomap_dio_actor(struct inode *inode, loff_t pos, loff_t length, * can be mapped into multiple disjoint IOs and only a subset of the IOs issued * may be pure data writes. In that case, we still need to do a full data sync * completion. + * + * Returns -ENOTBLK In case of a page invalidation invalidation failure for + * writes. The callers needs to fall back to buffered I/O in this case. */ ssize_t iomap_dio_rw(struct kiocb *iocb, struct iov_iter *iter, @@ -475,23 +479,24 @@ iomap_dio_rw(struct kiocb *iocb, struct iov_iter *iter, if (ret) goto out_free_dio; - /* - * Try to invalidate cache pages for the range we're direct - * writing. If this invalidation fails, tough, the write will - * still work, but racing two incompatible write paths is a - * pretty crazy thing to do, so we don't support it 100%. - */ - ret = invalidate_inode_pages2_range(mapping, - pos >> PAGE_SHIFT, end >> PAGE_SHIFT); - if (ret) - dio_warn_stale_pagecache(iocb->ki_filp); - ret = 0; - - if (iov_iter_rw(iter) == WRITE && !wait_for_completion && - !inode->i_sb->s_dio_done_wq) { - ret = sb_init_dio_done_wq(inode->i_sb); - if (ret < 0) + if (iov_iter_rw(iter) == WRITE) { + /* + * Try to invalidate cache pages for the range we are writing. + * If this invalidation fails, let the caller fall back to + * buffered I/O. + */ + if (invalidate_inode_pages2_range(mapping, pos >> PAGE_SHIFT, + end >> PAGE_SHIFT)) { + trace_iomap_dio_invalidate_fail(inode, pos, count); + ret = -ENOTBLK; goto out_free_dio; + } + + if (!wait_for_completion && !inode->i_sb->s_dio_done_wq) { + ret = sb_init_dio_done_wq(inode->i_sb); + if (ret < 0) + goto out_free_dio; + } } inode_dio_begin(inode); diff --git a/fs/iomap/seek.c b/fs/iomap/seek.c index 89f61d93c0bc..107ee80c3568 100644 --- a/fs/iomap/seek.c +++ b/fs/iomap/seek.c @@ -127,7 +127,7 @@ iomap_seek_hole_actor(struct inode *inode, loff_t offset, loff_t length, SEEK_HOLE); if (offset < 0) return length; - /* fall through */ + fallthrough; case IOMAP_HOLE: *(loff_t *)data = offset; return 0; @@ -175,7 +175,7 @@ iomap_seek_data_actor(struct inode *inode, loff_t offset, loff_t length, SEEK_DATA); if (offset < 0) return length; - /*FALLTHRU*/ + fallthrough; default: *(loff_t *)data = offset; return 0; diff --git a/fs/iomap/trace.h b/fs/iomap/trace.h index 5693a39d52fb..fdc7ae388476 100644 --- a/fs/iomap/trace.h +++ b/fs/iomap/trace.h @@ -74,6 +74,7 @@ DEFINE_EVENT(iomap_range_class, name, \ DEFINE_RANGE_EVENT(iomap_writepage); DEFINE_RANGE_EVENT(iomap_releasepage); DEFINE_RANGE_EVENT(iomap_invalidatepage); +DEFINE_RANGE_EVENT(iomap_dio_invalidate_fail); #define IOMAP_TYPE_STRINGS \ { IOMAP_HOLE, "HOLE" }, \ diff --git a/fs/isofs/inode.c b/fs/isofs/inode.c index d634561f871a..78f5c96c76f3 100644 --- a/fs/isofs/inode.c +++ b/fs/isofs/inode.c @@ -612,9 +612,6 @@ static bool rootdir_empty(struct super_block *sb, unsigned long block) /* * Initialize the superblock and read the root inode. - * - * Note: a check_disk_change() has been done immediately prior - * to this call, so we don't need to check again. */ static int isofs_fill_super(struct super_block *s, void *data, int silent) { diff --git a/fs/isofs/namei.c b/fs/isofs/namei.c index cac468f04820..402769881c32 100644 --- a/fs/isofs/namei.c +++ b/fs/isofs/namei.c @@ -152,8 +152,8 @@ isofs_find_entry(struct inode *dir, struct dentry *dentry, struct dentry *isofs_lookup(struct inode *dir, struct dentry *dentry, unsigned int flags) { int found; - unsigned long uninitialized_var(block); - unsigned long uninitialized_var(offset); + unsigned long block; + unsigned long offset; struct inode *inode; struct page *page; diff --git a/fs/jbd2/journal.c b/fs/jbd2/journal.c index e4944436e733..17fdc482f554 100644 --- a/fs/jbd2/journal.c +++ b/fs/jbd2/journal.c @@ -1285,7 +1285,7 @@ journal_t *jbd2_journal_init_inode(struct inode *inode) * superblock as being NULL to prevent the journal destroy from writing * back a bogus superblock. */ -static void journal_fail_superblock (journal_t *journal) +static void journal_fail_superblock(journal_t *journal) { struct buffer_head *bh = journal->j_sb_buffer; brelse(bh); @@ -1367,8 +1367,10 @@ static int jbd2_write_superblock(journal_t *journal, int write_flags) int ret; /* Buffer got discarded which means block device got invalidated */ - if (!buffer_mapped(bh)) + if (!buffer_mapped(bh)) { + unlock_buffer(bh); return -EIO; + } trace_jbd2_write_superblock(journal, write_flags); if (!(journal->j_flags & JBD2_BARRIER)) @@ -1815,7 +1817,7 @@ int jbd2_journal_destroy(journal_t *journal) /** - *int jbd2_journal_check_used_features () - Check if features specified are used. + *int jbd2_journal_check_used_features() - Check if features specified are used. * @journal: Journal to check. * @compat: bitmask of compatible features * @ro: bitmask of features that force read-only mount @@ -1825,7 +1827,7 @@ int jbd2_journal_destroy(journal_t *journal) * features. Return true (non-zero) if it does. **/ -int jbd2_journal_check_used_features (journal_t *journal, unsigned long compat, +int jbd2_journal_check_used_features(journal_t *journal, unsigned long compat, unsigned long ro, unsigned long incompat) { journal_superblock_t *sb; @@ -1860,7 +1862,7 @@ int jbd2_journal_check_used_features (journal_t *journal, unsigned long compat, * all of a given set of features on this journal. Return true * (non-zero) if it can. */ -int jbd2_journal_check_available_features (journal_t *journal, unsigned long compat, +int jbd2_journal_check_available_features(journal_t *journal, unsigned long compat, unsigned long ro, unsigned long incompat) { if (!compat && !ro && !incompat) @@ -1882,7 +1884,7 @@ int jbd2_journal_check_available_features (journal_t *journal, unsigned long com } /** - * int jbd2_journal_set_features () - Mark a given journal feature in the superblock + * int jbd2_journal_set_features() - Mark a given journal feature in the superblock * @journal: Journal to act on. * @compat: bitmask of compatible features * @ro: bitmask of features that force read-only mount @@ -1893,7 +1895,7 @@ int jbd2_journal_check_available_features (journal_t *journal, unsigned long com * */ -int jbd2_journal_set_features (journal_t *journal, unsigned long compat, +int jbd2_journal_set_features(journal_t *journal, unsigned long compat, unsigned long ro, unsigned long incompat) { #define INCOMPAT_FEATURE_ON(f) \ diff --git a/fs/jbd2/recovery.c b/fs/jbd2/recovery.c index 2ed278f0dced..faa97d748474 100644 --- a/fs/jbd2/recovery.c +++ b/fs/jbd2/recovery.c @@ -690,14 +690,11 @@ static int do_one_pass(journal_t *journal, * number. */ if (pass == PASS_SCAN && jbd2_has_feature_checksum(journal)) { - int chksum_err, chksum_seen; struct commit_header *cbh = (struct commit_header *)bh->b_data; unsigned found_chksum = be32_to_cpu(cbh->h_chksum[0]); - chksum_err = chksum_seen = 0; - if (info->end_transaction) { journal->j_failed_commit = info->end_transaction; @@ -705,42 +702,23 @@ static int do_one_pass(journal_t *journal, break; } - if (crc32_sum == found_chksum && - cbh->h_chksum_type == JBD2_CRC32_CHKSUM && - cbh->h_chksum_size == - JBD2_CRC32_CHKSUM_SIZE) - chksum_seen = 1; - else if (!(cbh->h_chksum_type == 0 && - cbh->h_chksum_size == 0 && - found_chksum == 0 && - !chksum_seen)) - /* - * If fs is mounted using an old kernel and then - * kernel with journal_chksum is used then we - * get a situation where the journal flag has - * checksum flag set but checksums are not - * present i.e chksum = 0, in the individual - * commit blocks. - * Hence to avoid checksum failures, in this - * situation, this extra check is added. - */ - chksum_err = 1; - - if (chksum_err) { - info->end_transaction = next_commit_ID; - - if (!jbd2_has_feature_async_commit(journal)) { - journal->j_failed_commit = - next_commit_ID; - brelse(bh); - break; - } - } + /* Neither checksum match nor unused? */ + if (!((crc32_sum == found_chksum && + cbh->h_chksum_type == + JBD2_CRC32_CHKSUM && + cbh->h_chksum_size == + JBD2_CRC32_CHKSUM_SIZE) || + (cbh->h_chksum_type == 0 && + cbh->h_chksum_size == 0 && + found_chksum == 0))) + goto chksum_error; + crc32_sum = ~0; } if (pass == PASS_SCAN && !jbd2_commit_block_csum_verify(journal, bh->b_data)) { + chksum_error: info->end_transaction = next_commit_ID; if (!jbd2_has_feature_async_commit(journal)) { diff --git a/fs/jbd2/transaction.c b/fs/jbd2/transaction.c index e91aad3637a2..43985738aa86 100644 --- a/fs/jbd2/transaction.c +++ b/fs/jbd2/transaction.c @@ -2026,6 +2026,9 @@ static void __jbd2_journal_temp_unlink_buffer(struct journal_head *jh) */ static void __jbd2_journal_unfile_buffer(struct journal_head *jh) { + J_ASSERT_JH(jh, jh->b_transaction != NULL); + J_ASSERT_JH(jh, jh->b_next_transaction == NULL); + __jbd2_journal_temp_unlink_buffer(jh); jh->b_transaction = NULL; } @@ -2078,10 +2081,6 @@ out: * int jbd2_journal_try_to_free_buffers() - try to free page buffers. * @journal: journal for operation * @page: to try and free - * @gfp_mask: we use the mask to detect how hard should we try to release - * buffers. If __GFP_DIRECT_RECLAIM and __GFP_FS is set, we wait for commit - * code to release the buffers. - * * * For all the buffers on this page, * if they are fully written out ordered data, move them onto BUF_CLEAN @@ -2112,11 +2111,11 @@ out: * * Return 0 on failure, 1 on success */ -int jbd2_journal_try_to_free_buffers(journal_t *journal, - struct page *page, gfp_t gfp_mask) +int jbd2_journal_try_to_free_buffers(journal_t *journal, struct page *page) { struct buffer_head *head; struct buffer_head *bh; + bool has_write_io_error = false; int ret = 0; J_ASSERT(PageLocked(page)); @@ -2141,11 +2140,26 @@ int jbd2_journal_try_to_free_buffers(journal_t *journal, jbd2_journal_put_journal_head(jh); if (buffer_jbd(bh)) goto busy; + + /* + * If we free a metadata buffer which has been failed to + * write out, the jbd2 checkpoint procedure will not detect + * this failure and may lead to filesystem inconsistency + * after cleanup journal tail. + */ + if (buffer_write_io_error(bh)) { + pr_err("JBD2: Error while async write back metadata bh %llu.", + (unsigned long long)bh->b_blocknr); + has_write_io_error = true; + } } while ((bh = bh->b_this_page) != head); ret = try_to_free_buffers(page); busy: + if (has_write_io_error) + jbd2_journal_abort(journal, -EIO); + return ret; } @@ -2572,6 +2586,13 @@ bool __jbd2_journal_refile_buffer(struct journal_head *jh) was_dirty = test_clear_buffer_jbddirty(bh); __jbd2_journal_temp_unlink_buffer(jh); + + /* + * b_transaction must be set, otherwise the new b_transaction won't + * be holding jh reference + */ + J_ASSERT_JH(jh, jh->b_transaction != NULL); + /* * We set b_transaction here because b_next_transaction will inherit * our jh reference and thus __jbd2_journal_file_buffer() must not diff --git a/fs/jffs2/dir.c b/fs/jffs2/dir.c index f20cff1194bb..776493713153 100644 --- a/fs/jffs2/dir.c +++ b/fs/jffs2/dir.c @@ -590,10 +590,14 @@ static int jffs2_rmdir (struct inode *dir_i, struct dentry *dentry) int ret; uint32_t now = JFFS2_NOW(); + mutex_lock(&f->sem); for (fd = f->dents ; fd; fd = fd->next) { - if (fd->ino) + if (fd->ino) { + mutex_unlock(&f->sem); return -ENOTEMPTY; + } } + mutex_unlock(&f->sem); ret = jffs2_do_unlink(c, dir_f, dentry->d_name.name, dentry->d_name.len, f, now); diff --git a/fs/jffs2/erase.c b/fs/jffs2/erase.c index 83b8f06b4a64..7e9abdb89712 100644 --- a/fs/jffs2/erase.c +++ b/fs/jffs2/erase.c @@ -401,7 +401,7 @@ static void jffs2_mark_erased_block(struct jffs2_sb_info *c, struct jffs2_eraseb { size_t retlen; int ret; - uint32_t uninitialized_var(bad_offset); + uint32_t bad_offset; switch (jffs2_block_check_erase(c, jeb, &bad_offset)) { case -EAGAIN: goto refile; diff --git a/fs/jffs2/fs.c b/fs/jffs2/fs.c index ab8cdd9e9325..78858f6e9583 100644 --- a/fs/jffs2/fs.c +++ b/fs/jffs2/fs.c @@ -341,7 +341,7 @@ struct inode *jffs2_iget(struct super_block *sb, unsigned long ino) rdev = old_decode_dev(je16_to_cpu(jdev.old_id)); else rdev = new_decode_dev(je32_to_cpu(jdev.new_id)); - /* fall through */ + fallthrough; case S_IFSOCK: case S_IFIFO: diff --git a/fs/jffs2/readinode.c b/fs/jffs2/readinode.c index bccfc40b3a74..2f6f0b140c05 100644 --- a/fs/jffs2/readinode.c +++ b/fs/jffs2/readinode.c @@ -1273,7 +1273,7 @@ static int jffs2_do_read_inode_internal(struct jffs2_sb_info *c, dbg_readinode("symlink's target '%s' cached\n", f->target); } - /* fall through... */ + fallthrough; case S_IFBLK: case S_IFCHR: diff --git a/fs/jffs2/scan.c b/fs/jffs2/scan.c index 5f7e284e0df3..db72a9d2d0af 100644 --- a/fs/jffs2/scan.c +++ b/fs/jffs2/scan.c @@ -261,7 +261,8 @@ int jffs2_scan_medium(struct jffs2_sb_info *c) } #endif if (c->nr_erasing_blocks) { - if ( !c->used_size && ((c->nr_free_blocks+empty_blocks+bad_blocks)!= c->nr_blocks || bad_blocks == c->nr_blocks) ) { + if (!c->used_size && !c->unchecked_size && + ((c->nr_free_blocks+empty_blocks+bad_blocks) != c->nr_blocks || bad_blocks == c->nr_blocks)) { pr_notice("Cowardly refusing to erase blocks on filesystem with no valid JFFS2 nodes\n"); pr_notice("empty_blocks %d, bad_blocks %d, c->nr_blocks %d\n", empty_blocks, bad_blocks, c->nr_blocks); diff --git a/fs/jfs/jfs_mount.c b/fs/jfs/jfs_mount.c index eb8b9e233d73..2935d4c776ec 100644 --- a/fs/jfs/jfs_mount.c +++ b/fs/jfs/jfs_mount.c @@ -36,6 +36,7 @@ #include <linux/fs.h> #include <linux/buffer_head.h> +#include <linux/blkdev.h> #include "jfs_incore.h" #include "jfs_filsys.h" diff --git a/fs/jfs/resize.c b/fs/jfs/resize.c index 66acea9d878b..bde787c354fc 100644 --- a/fs/jfs/resize.c +++ b/fs/jfs/resize.c @@ -6,6 +6,7 @@ #include <linux/fs.h> #include <linux/buffer_head.h> #include <linux/quotaops.h> +#include <linux/blkdev.h> #include "jfs_incore.h" #include "jfs_filsys.h" #include "jfs_metapage.h" diff --git a/fs/kernfs/file.c b/fs/kernfs/file.c index 06b342d8462b..f277d023ebcd 100644 --- a/fs/kernfs/file.c +++ b/fs/kernfs/file.c @@ -883,6 +883,7 @@ repeat: list_for_each_entry(info, &kernfs_root(kn)->supers, node) { struct kernfs_node *parent; + struct inode *p_inode = NULL; struct inode *inode; struct qstr name; @@ -899,20 +900,20 @@ repeat: name = (struct qstr)QSTR_INIT(kn->name, strlen(kn->name)); parent = kernfs_get_parent(kn); if (parent) { - struct inode *p_inode; - p_inode = ilookup(info->sb, kernfs_ino(parent)); if (p_inode) { - fsnotify(p_inode, FS_MODIFY | FS_EVENT_ON_CHILD, - inode, FSNOTIFY_EVENT_INODE, &name, 0); + fsnotify(FS_MODIFY | FS_EVENT_ON_CHILD, + inode, FSNOTIFY_EVENT_INODE, + p_inode, &name, inode, 0); iput(p_inode); } kernfs_put(parent); } - fsnotify(inode, FS_MODIFY, inode, FSNOTIFY_EVENT_INODE, - &name, 0); + if (!p_inode) + fsnotify_inode(inode, FS_MODIFY); + iput(inode); } diff --git a/fs/libfs.c b/fs/libfs.c index 4d08edf19c78..e0d42e977d9a 100644 --- a/fs/libfs.c +++ b/fs/libfs.c @@ -137,11 +137,11 @@ loff_t dcache_dir_lseek(struct file *file, loff_t offset, int whence) switch (whence) { case 1: offset += file->f_pos; - /* fall through */ + fallthrough; case 0: if (offset >= 0) break; - /* fall through */ + fallthrough; default: return -EINVAL; } diff --git a/fs/locks.c b/fs/locks.c index 7df0f9fa66f4..1f84a03601fe 100644 --- a/fs/locks.c +++ b/fs/locks.c @@ -1282,6 +1282,7 @@ static int posix_lock_inode(struct inode *inode, struct file_lock *request, if (!new_fl) goto out; locks_copy_lock(new_fl, request); + locks_move_blocks(new_fl, request); request = new_fl; new_fl = NULL; locks_insert_lock_ctx(request, &fl->fl_list); @@ -1498,7 +1499,7 @@ static void lease_clear_pending(struct file_lock *fl, int arg) switch (arg) { case F_UNLCK: fl->fl_flags &= ~FL_UNLOCK_PENDING; - /* fall through */ + fallthrough; case F_RDLCK: fl->fl_flags &= ~FL_DOWNGRADE_PENDING; } @@ -1807,6 +1808,9 @@ check_conflicting_open(struct file *filp, const long arg, int flags) if (flags & FL_LAYOUT) return 0; + if (flags & FL_DELEG) + /* We leave these checks to the caller. */ + return 0; if (arg == F_RDLCK) return inode_is_open_for_write(inode) ? -EAGAIN : 0; @@ -2521,7 +2525,7 @@ int fcntl_setlk(unsigned int fd, struct file *filp, unsigned int cmd, cmd = F_SETLKW; file_lock->fl_flags |= FL_OFDLCK; file_lock->fl_owner = filp; - /* Fallthrough */ + fallthrough; case F_SETLKW: file_lock->fl_flags |= FL_SLEEP; } @@ -2652,7 +2656,7 @@ int fcntl_setlk64(unsigned int fd, struct file *filp, unsigned int cmd, cmd = F_SETLKW64; file_lock->fl_flags |= FL_OFDLCK; file_lock->fl_owner = filp; - /* Fallthrough */ + fallthrough; case F_SETLKW64: file_lock->fl_flags |= FL_SLEEP; } diff --git a/fs/minix/inode.c b/fs/minix/inode.c index 7cb5fd38eb14..7b09a9158e40 100644 --- a/fs/minix/inode.c +++ b/fs/minix/inode.c @@ -150,6 +150,25 @@ static int minix_remount (struct super_block * sb, int * flags, char * data) return 0; } +static bool minix_check_superblock(struct super_block *sb) +{ + struct minix_sb_info *sbi = minix_sb(sb); + + if (sbi->s_imap_blocks == 0 || sbi->s_zmap_blocks == 0) + return false; + + /* + * s_max_size must not exceed the block mapping limitation. This check + * is only needed for V1 filesystems, since V2/V3 support an extra level + * of indirect blocks which places the limit well above U32_MAX. + */ + if (sbi->s_version == MINIX_V1 && + sb->s_maxbytes > (7 + 512 + 512*512) * BLOCK_SIZE) + return false; + + return true; +} + static int minix_fill_super(struct super_block *s, void *data, int silent) { struct buffer_head *bh; @@ -185,7 +204,7 @@ static int minix_fill_super(struct super_block *s, void *data, int silent) sbi->s_zmap_blocks = ms->s_zmap_blocks; sbi->s_firstdatazone = ms->s_firstdatazone; sbi->s_log_zone_size = ms->s_log_zone_size; - sbi->s_max_size = ms->s_max_size; + s->s_maxbytes = ms->s_max_size; s->s_magic = ms->s_magic; if (s->s_magic == MINIX_SUPER_MAGIC) { sbi->s_version = MINIX_V1; @@ -216,7 +235,7 @@ static int minix_fill_super(struct super_block *s, void *data, int silent) sbi->s_zmap_blocks = m3s->s_zmap_blocks; sbi->s_firstdatazone = m3s->s_firstdatazone; sbi->s_log_zone_size = m3s->s_log_zone_size; - sbi->s_max_size = m3s->s_max_size; + s->s_maxbytes = m3s->s_max_size; sbi->s_ninodes = m3s->s_ninodes; sbi->s_nzones = m3s->s_zones; sbi->s_dirsize = 64; @@ -228,11 +247,12 @@ static int minix_fill_super(struct super_block *s, void *data, int silent) } else goto out_no_fs; + if (!minix_check_superblock(s)) + goto out_illegal_sb; + /* * Allocate the buffer map to keep the superblock small. */ - if (sbi->s_imap_blocks == 0 || sbi->s_zmap_blocks == 0) - goto out_illegal_sb; i = (sbi->s_imap_blocks + sbi->s_zmap_blocks) * sizeof(bh); map = kzalloc(i, GFP_KERNEL); if (!map) @@ -468,6 +488,13 @@ static struct inode *V1_minix_iget(struct inode *inode) iget_failed(inode); return ERR_PTR(-EIO); } + if (raw_inode->i_nlinks == 0) { + printk("MINIX-fs: deleted inode referenced: %lu\n", + inode->i_ino); + brelse(bh); + iget_failed(inode); + return ERR_PTR(-ESTALE); + } inode->i_mode = raw_inode->i_mode; i_uid_write(inode, raw_inode->i_uid); i_gid_write(inode, raw_inode->i_gid); @@ -501,6 +528,13 @@ static struct inode *V2_minix_iget(struct inode *inode) iget_failed(inode); return ERR_PTR(-EIO); } + if (raw_inode->i_nlinks == 0) { + printk("MINIX-fs: deleted inode referenced: %lu\n", + inode->i_ino); + brelse(bh); + iget_failed(inode); + return ERR_PTR(-ESTALE); + } inode->i_mode = raw_inode->i_mode; i_uid_write(inode, raw_inode->i_uid); i_gid_write(inode, raw_inode->i_gid); diff --git a/fs/minix/itree_common.c b/fs/minix/itree_common.c index 043c3fdbc8e7..446148792f41 100644 --- a/fs/minix/itree_common.c +++ b/fs/minix/itree_common.c @@ -75,6 +75,7 @@ static int alloc_branch(struct inode *inode, int n = 0; int i; int parent = minix_new_block(inode); + int err = -ENOSPC; branch[0].key = cpu_to_block(parent); if (parent) for (n = 1; n < num; n++) { @@ -85,6 +86,11 @@ static int alloc_branch(struct inode *inode, break; branch[n].key = cpu_to_block(nr); bh = sb_getblk(inode->i_sb, parent); + if (!bh) { + minix_free_block(inode, nr); + err = -ENOMEM; + break; + } lock_buffer(bh); memset(bh->b_data, 0, bh->b_size); branch[n].bh = bh; @@ -103,7 +109,7 @@ static int alloc_branch(struct inode *inode, bforget(branch[i].bh); for (i = 0; i < n; i++) minix_free_block(inode, block_to_cpu(branch[i].key)); - return -ENOSPC; + return err; } static inline int splice_branch(struct inode *inode, diff --git a/fs/minix/itree_v1.c b/fs/minix/itree_v1.c index 046cc96ee7ad..1fed906042aa 100644 --- a/fs/minix/itree_v1.c +++ b/fs/minix/itree_v1.c @@ -29,12 +29,12 @@ static int block_to_path(struct inode * inode, long block, int offsets[DEPTH]) if (block < 0) { printk("MINIX-fs: block_to_path: block %ld < 0 on dev %pg\n", block, inode->i_sb->s_bdev); - } else if (block >= (minix_sb(inode->i_sb)->s_max_size/BLOCK_SIZE)) { - if (printk_ratelimit()) - printk("MINIX-fs: block_to_path: " - "block %ld too big on dev %pg\n", - block, inode->i_sb->s_bdev); - } else if (block < 7) { + return 0; + } + if ((u64)block * BLOCK_SIZE >= inode->i_sb->s_maxbytes) + return 0; + + if (block < 7) { offsets[n++] = block; } else if ((block -= 7) < 512) { offsets[n++] = 7; diff --git a/fs/minix/itree_v2.c b/fs/minix/itree_v2.c index f7fc7ecccccc..9d00f31a2d9d 100644 --- a/fs/minix/itree_v2.c +++ b/fs/minix/itree_v2.c @@ -32,13 +32,12 @@ static int block_to_path(struct inode * inode, long block, int offsets[DEPTH]) if (block < 0) { printk("MINIX-fs: block_to_path: block %ld < 0 on dev %pg\n", block, sb->s_bdev); - } else if ((u64)block * (u64)sb->s_blocksize >= - minix_sb(sb)->s_max_size) { - if (printk_ratelimit()) - printk("MINIX-fs: block_to_path: " - "block %ld too big on dev %pg\n", - block, sb->s_bdev); - } else if (block < DIRCOUNT) { + return 0; + } + if ((u64)block * (u64)sb->s_blocksize >= sb->s_maxbytes) + return 0; + + if (block < DIRCOUNT) { offsets[n++] = block; } else if ((block -= DIRCOUNT) < INDIRCOUNT(sb)) { offsets[n++] = DIRCOUNT; diff --git a/fs/minix/minix.h b/fs/minix/minix.h index df081e8afcc3..168d45d3de73 100644 --- a/fs/minix/minix.h +++ b/fs/minix/minix.h @@ -32,7 +32,6 @@ struct minix_sb_info { unsigned long s_zmap_blocks; unsigned long s_firstdatazone; unsigned long s_log_zone_size; - unsigned long s_max_size; int s_dirsize; int s_namelen; struct buffer_head ** s_imap; diff --git a/fs/namei.c b/fs/namei.c index 72d4219c93ac..e99e2a9da0f7 100644 --- a/fs/namei.c +++ b/fs/namei.c @@ -1024,7 +1024,7 @@ static bool safe_hardlink_source(struct inode *inode) * * Returns 0 if successful, -ve on error. */ -static int may_linkat(struct path *link) +int may_linkat(struct path *link) { struct inode *inode = link->dentry->d_inode; @@ -2851,16 +2851,24 @@ static int may_open(const struct path *path, int acc_mode, int flag) case S_IFDIR: if (acc_mode & MAY_WRITE) return -EISDIR; + if (acc_mode & MAY_EXEC) + return -EACCES; break; case S_IFBLK: case S_IFCHR: if (!may_open_dev(path)) return -EACCES; - /*FALLTHRU*/ + fallthrough; case S_IFIFO: case S_IFSOCK: + if (acc_mode & MAY_EXEC) + return -EACCES; flag &= ~O_TRUNC; break; + case S_IFREG: + if ((acc_mode & MAY_EXEC) && path_noexec(path)) + return -EACCES; + break; } error = inode_permission(inode, MAY_OPEN | acc_mode); @@ -3564,7 +3572,7 @@ static int may_mknod(umode_t mode) } } -long do_mknodat(int dfd, const char __user *filename, umode_t mode, +static long do_mknodat(int dfd, const char __user *filename, umode_t mode, unsigned int dev) { struct dentry *dentry; @@ -3645,7 +3653,7 @@ int vfs_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode) } EXPORT_SYMBOL(vfs_mkdir); -long do_mkdirat(int dfd, const char __user *pathname, umode_t mode) +static long do_mkdirat(int dfd, const char __user *pathname, umode_t mode) { struct dentry *dentry; struct path path; @@ -3720,17 +3728,16 @@ out: } EXPORT_SYMBOL(vfs_rmdir); -long do_rmdir(int dfd, const char __user *pathname) +long do_rmdir(int dfd, struct filename *name) { int error = 0; - struct filename *name; struct dentry *dentry; struct path path; struct qstr last; int type; unsigned int lookup_flags = 0; retry: - name = filename_parentat(dfd, getname(pathname), lookup_flags, + name = filename_parentat(dfd, name, lookup_flags, &path, &last, &type); if (IS_ERR(name)) return PTR_ERR(name); @@ -3771,17 +3778,17 @@ exit2: mnt_drop_write(path.mnt); exit1: path_put(&path); - putname(name); if (retry_estale(error, lookup_flags)) { lookup_flags |= LOOKUP_REVAL; goto retry; } + putname(name); return error; } SYSCALL_DEFINE1(rmdir, const char __user *, pathname) { - return do_rmdir(AT_FDCWD, pathname); + return do_rmdir(AT_FDCWD, getname(pathname)); } /** @@ -3926,8 +3933,7 @@ SYSCALL_DEFINE3(unlinkat, int, dfd, const char __user *, pathname, int, flag) return -EINVAL; if (flag & AT_REMOVEDIR) - return do_rmdir(dfd, pathname); - + return do_rmdir(dfd, getname(pathname)); return do_unlinkat(dfd, getname(pathname)); } @@ -3957,7 +3963,7 @@ int vfs_symlink(struct inode *dir, struct dentry *dentry, const char *oldname) } EXPORT_SYMBOL(vfs_symlink); -long do_symlinkat(const char __user *oldname, int newdfd, +static long do_symlinkat(const char __user *oldname, int newdfd, const char __user *newname) { int error; @@ -4088,7 +4094,7 @@ EXPORT_SYMBOL(vfs_link); * with linux 2.0, and to avoid hard-linking to directories * and other special files. --ADM */ -int do_linkat(int olddfd, const char __user *oldname, int newdfd, +static int do_linkat(int olddfd, const char __user *oldname, int newdfd, const char __user *newname, int flags) { struct dentry *new_dentry; diff --git a/fs/namespace.c b/fs/namespace.c index 4a0f600a3328..bae0e95b3713 100644 --- a/fs/namespace.c +++ b/fs/namespace.c @@ -1706,52 +1706,52 @@ static inline bool may_mandlock(void) } #endif -/* - * Now umount can handle mount points as well as block devices. - * This is important for filesystems which use unnamed block devices. - * - * We now support a flag for forced unmount like the other 'big iron' - * unixes. Our API is identical to OSF/1 to avoid making a mess of AMD - */ - -int ksys_umount(char __user *name, int flags) +static int can_umount(const struct path *path, int flags) { - struct path path; - struct mount *mnt; - int retval; - int lookup_flags = LOOKUP_MOUNTPOINT; + struct mount *mnt = real_mount(path->mnt); if (flags & ~(MNT_FORCE | MNT_DETACH | MNT_EXPIRE | UMOUNT_NOFOLLOW)) return -EINVAL; - if (!may_mount()) return -EPERM; - - if (!(flags & UMOUNT_NOFOLLOW)) - lookup_flags |= LOOKUP_FOLLOW; - - retval = user_path_at(AT_FDCWD, name, lookup_flags, &path); - if (retval) - goto out; - mnt = real_mount(path.mnt); - retval = -EINVAL; - if (path.dentry != path.mnt->mnt_root) - goto dput_and_out; + if (path->dentry != path->mnt->mnt_root) + return -EINVAL; if (!check_mnt(mnt)) - goto dput_and_out; + return -EINVAL; if (mnt->mnt.mnt_flags & MNT_LOCKED) /* Check optimistically */ - goto dput_and_out; - retval = -EPERM; + return -EINVAL; if (flags & MNT_FORCE && !capable(CAP_SYS_ADMIN)) - goto dput_and_out; + return -EPERM; + return 0; +} + +int path_umount(struct path *path, int flags) +{ + struct mount *mnt = real_mount(path->mnt); + int ret; + + ret = can_umount(path, flags); + if (!ret) + ret = do_umount(mnt, flags); - retval = do_umount(mnt, flags); -dput_and_out: /* we mustn't call path_put() as that would clear mnt_expiry_mark */ - dput(path.dentry); + dput(path->dentry); mntput_no_expire(mnt); -out: - return retval; + return ret; +} + +static int ksys_umount(char __user *name, int flags) +{ + int lookup_flags = LOOKUP_MOUNTPOINT; + struct path path; + int ret; + + if (!(flags & UMOUNT_NOFOLLOW)) + lookup_flags |= LOOKUP_FOLLOW; + ret = user_path_at(AT_FDCWD, name, lookup_flags, &path); + if (ret) + return ret; + return path_umount(&path, flags); } SYSCALL_DEFINE2(umount, char __user *, name, int, flags) @@ -3116,12 +3116,11 @@ char *copy_mount_string(const void __user *data) * Therefore, if this magic number is present, it carries no information * and must be discarded. */ -long do_mount(const char *dev_name, const char __user *dir_name, +int path_mount(const char *dev_name, struct path *path, const char *type_page, unsigned long flags, void *data_page) { - struct path path; unsigned int mnt_flags = 0, sb_flags; - int retval = 0; + int ret; /* Discard magic */ if ((flags & MS_MGC_MSK) == MS_MGC_VAL) @@ -3134,19 +3133,13 @@ long do_mount(const char *dev_name, const char __user *dir_name, if (flags & MS_NOUSER) return -EINVAL; - /* ... and get the mountpoint */ - retval = user_path_at(AT_FDCWD, dir_name, LOOKUP_FOLLOW, &path); - if (retval) - return retval; - - retval = security_sb_mount(dev_name, &path, - type_page, flags, data_page); - if (!retval && !may_mount()) - retval = -EPERM; - if (!retval && (flags & SB_MANDLOCK) && !may_mandlock()) - retval = -EPERM; - if (retval) - goto dput_out; + ret = security_sb_mount(dev_name, path, type_page, flags, data_page); + if (ret) + return ret; + if (!may_mount()) + return -EPERM; + if ((flags & SB_MANDLOCK) && !may_mandlock()) + return -EPERM; /* Default to relatime unless overriden */ if (!(flags & MS_NOATIME)) @@ -3173,7 +3166,7 @@ long do_mount(const char *dev_name, const char __user *dir_name, ((flags & (MS_NOATIME | MS_NODIRATIME | MS_RELATIME | MS_STRICTATIME)) == 0)) { mnt_flags &= ~MNT_ATIME_MASK; - mnt_flags |= path.mnt->mnt_flags & MNT_ATIME_MASK; + mnt_flags |= path->mnt->mnt_flags & MNT_ATIME_MASK; } sb_flags = flags & (SB_RDONLY | @@ -3186,22 +3179,32 @@ long do_mount(const char *dev_name, const char __user *dir_name, SB_I_VERSION); if ((flags & (MS_REMOUNT | MS_BIND)) == (MS_REMOUNT | MS_BIND)) - retval = do_reconfigure_mnt(&path, mnt_flags); - else if (flags & MS_REMOUNT) - retval = do_remount(&path, flags, sb_flags, mnt_flags, - data_page); - else if (flags & MS_BIND) - retval = do_loopback(&path, dev_name, flags & MS_REC); - else if (flags & (MS_SHARED | MS_PRIVATE | MS_SLAVE | MS_UNBINDABLE)) - retval = do_change_type(&path, flags); - else if (flags & MS_MOVE) - retval = do_move_mount_old(&path, dev_name); - else - retval = do_new_mount(&path, type_page, sb_flags, mnt_flags, - dev_name, data_page); -dput_out: + return do_reconfigure_mnt(path, mnt_flags); + if (flags & MS_REMOUNT) + return do_remount(path, flags, sb_flags, mnt_flags, data_page); + if (flags & MS_BIND) + return do_loopback(path, dev_name, flags & MS_REC); + if (flags & (MS_SHARED | MS_PRIVATE | MS_SLAVE | MS_UNBINDABLE)) + return do_change_type(path, flags); + if (flags & MS_MOVE) + return do_move_mount_old(path, dev_name); + + return do_new_mount(path, type_page, sb_flags, mnt_flags, dev_name, + data_page); +} + +long do_mount(const char *dev_name, const char __user *dir_name, + const char *type_page, unsigned long flags, void *data_page) +{ + struct path path; + int ret; + + ret = user_path_at(AT_FDCWD, dir_name, LOOKUP_FOLLOW, &path); + if (ret) + return ret; + ret = path_mount(dev_name, &path, type_page, flags, data_page); path_put(&path); - return retval; + return ret; } static struct ucounts *inc_mnt_namespaces(struct user_namespace *ns) diff --git a/fs/nfs/Makefile b/fs/nfs/Makefile index 2433c3e03cfa..22d11fdc6deb 100644 --- a/fs/nfs/Makefile +++ b/fs/nfs/Makefile @@ -30,7 +30,7 @@ nfsv4-y := nfs4proc.o nfs4xdr.o nfs4state.o nfs4renewd.o nfs4super.o nfs4file.o nfsv4-$(CONFIG_NFS_USE_LEGACY_DNS) += cache_lib.o nfsv4-$(CONFIG_SYSCTL) += nfs4sysctl.o nfsv4-$(CONFIG_NFS_V4_1) += pnfs.o pnfs_dev.o pnfs_nfs.o -nfsv4-$(CONFIG_NFS_V4_2) += nfs42proc.o +nfsv4-$(CONFIG_NFS_V4_2) += nfs42proc.o nfs42xattr.o obj-$(CONFIG_PNFS_FILE_LAYOUT) += filelayout/ obj-$(CONFIG_PNFS_BLOCK) += blocklayout/ diff --git a/fs/nfs/blocklayout/blocklayout.c b/fs/nfs/blocklayout/blocklayout.c index d1a0e2c8b1b4..08108b6d2fa1 100644 --- a/fs/nfs/blocklayout/blocklayout.c +++ b/fs/nfs/blocklayout/blocklayout.c @@ -753,7 +753,7 @@ out: case -ENODEV: /* Our extent block devices are unavailable */ set_bit(NFS_LSEG_UNAVAILABLE, &lseg->pls_flags); - /* Fall through */ + fallthrough; case 0: return lseg; default: diff --git a/fs/nfs/blocklayout/rpc_pipefs.c b/fs/nfs/blocklayout/rpc_pipefs.c index 9fb067a6f7e0..ef9db135c649 100644 --- a/fs/nfs/blocklayout/rpc_pipefs.c +++ b/fs/nfs/blocklayout/rpc_pipefs.c @@ -79,7 +79,7 @@ bl_resolve_deviceid(struct nfs_server *server, struct pnfs_block_volume *b, goto out_free_data; bl_msg = msg->data; - bl_msg->type = BL_DEVICE_MOUNT, + bl_msg->type = BL_DEVICE_MOUNT; bl_msg->totallen = b->simple.len; nfs4_encode_simple(msg->data + sizeof(*bl_msg), b); diff --git a/fs/nfs/client.c b/fs/nfs/client.c index f1ff3076e4a4..4b8cc93913f7 100644 --- a/fs/nfs/client.c +++ b/fs/nfs/client.c @@ -50,6 +50,7 @@ #include "nfs.h" #include "netns.h" #include "sysfs.h" +#include "nfs42.h" #define NFSDBG_FACILITY NFSDBG_CLIENT @@ -749,7 +750,7 @@ error: static void nfs_server_set_fsinfo(struct nfs_server *server, struct nfs_fsinfo *fsinfo) { - unsigned long max_rpc_payload; + unsigned long max_rpc_payload, raw_max_rpc_payload; /* Work out a lot of parameters */ if (server->rsize == 0) @@ -762,7 +763,9 @@ static void nfs_server_set_fsinfo(struct nfs_server *server, if (fsinfo->wtmax >= 512 && server->wsize > fsinfo->wtmax) server->wsize = nfs_block_size(fsinfo->wtmax, NULL); - max_rpc_payload = nfs_block_size(rpc_max_payload(server->client), NULL); + raw_max_rpc_payload = rpc_max_payload(server->client); + max_rpc_payload = nfs_block_size(raw_max_rpc_payload, NULL); + if (server->rsize > max_rpc_payload) server->rsize = max_rpc_payload; if (server->rsize > NFS_MAX_FILE_IO_SIZE) @@ -795,6 +798,21 @@ static void nfs_server_set_fsinfo(struct nfs_server *server, server->clone_blksize = fsinfo->clone_blksize; /* We're airborne Set socket buffersize */ rpc_setbufsize(server->client, server->wsize + 100, server->rsize + 100); + +#ifdef CONFIG_NFS_V4_2 + /* + * Defaults until limited by the session parameters. + */ + server->gxasize = min_t(unsigned int, raw_max_rpc_payload, + XATTR_SIZE_MAX); + server->sxasize = min_t(unsigned int, raw_max_rpc_payload, + XATTR_SIZE_MAX); + server->lxasize = min_t(unsigned int, raw_max_rpc_payload, + nfs42_listxattr_xdrsize(XATTR_LIST_MAX)); + + if (fsinfo->xattr_support) + server->caps |= NFS_CAP_XATTR; +#endif } /* diff --git a/fs/nfs/dir.c b/fs/nfs/dir.c index 5a331da5f55a..e732580fe47b 100644 --- a/fs/nfs/dir.c +++ b/fs/nfs/dir.c @@ -1181,7 +1181,7 @@ int nfs_lookup_verify_inode(struct inode *inode, unsigned int flags) /* A NFSv4 OPEN will revalidate later */ if (server->caps & NFS_CAP_ATOMIC_OPEN) goto out; - /* Fallthrough */ + fallthrough; case S_IFDIR: if (server->flags & NFS_MOUNT_NOCTO) break; @@ -2460,7 +2460,7 @@ static struct nfs_access_entry *nfs_access_search_rbtree(struct inode *inode, co return NULL; } -static int nfs_access_get_cached(struct inode *inode, const struct cred *cred, struct nfs_access_entry *res, bool may_block) +static int nfs_access_get_cached_locked(struct inode *inode, const struct cred *cred, struct nfs_access_entry *res, bool may_block) { struct nfs_inode *nfsi = NFS_I(inode); struct nfs_access_entry *cache; @@ -2533,6 +2533,20 @@ out: return err; } +int nfs_access_get_cached(struct inode *inode, const struct cred *cred, struct +nfs_access_entry *res, bool may_block) +{ + int status; + + status = nfs_access_get_cached_rcu(inode, cred, res); + if (status != 0) + status = nfs_access_get_cached_locked(inode, cred, res, + may_block); + + return status; +} +EXPORT_SYMBOL_GPL(nfs_access_get_cached); + static void nfs_access_add_rbtree(struct inode *inode, struct nfs_access_entry *set) { struct nfs_inode *nfsi = NFS_I(inode); @@ -2647,9 +2661,7 @@ static int nfs_do_access(struct inode *inode, const struct cred *cred, int mask) trace_nfs_access_enter(inode); - status = nfs_access_get_cached_rcu(inode, cred, &cache); - if (status != 0) - status = nfs_access_get_cached(inode, cred, &cache, may_block); + status = nfs_access_get_cached(inode, cred, &cache, may_block); if (status == 0) goto out_cached; @@ -2661,6 +2673,10 @@ static int nfs_do_access(struct inode *inode, const struct cred *cred, int mask) * Determine which access bits we want to ask for... */ cache.mask = NFS_ACCESS_READ | NFS_ACCESS_MODIFY | NFS_ACCESS_EXTEND; + if (nfs_server_capable(inode, NFS_CAP_XATTR)) { + cache.mask |= NFS_ACCESS_XAREAD | NFS_ACCESS_XAWRITE | + NFS_ACCESS_XALIST; + } if (S_ISDIR(inode->i_mode)) cache.mask |= NFS_ACCESS_DELETE | NFS_ACCESS_LOOKUP; else diff --git a/fs/nfs/direct.c b/fs/nfs/direct.c index 1b79dd5cf661..2d30a4da49fa 100644 --- a/fs/nfs/direct.c +++ b/fs/nfs/direct.c @@ -896,7 +896,7 @@ static ssize_t nfs_direct_write_schedule_iovec(struct nfs_direct_req *dreq, */ ssize_t nfs_file_direct_write(struct kiocb *iocb, struct iov_iter *iter) { - ssize_t result = -EINVAL, requested; + ssize_t result, requested; size_t count; struct file *file = iocb->ki_filp; struct address_space *mapping = file->f_mapping; diff --git a/fs/nfs/file.c b/fs/nfs/file.c index f96367a2463e..63940a7a70be 100644 --- a/fs/nfs/file.c +++ b/fs/nfs/file.c @@ -140,6 +140,7 @@ static int nfs_file_flush(struct file *file, fl_owner_t id) { struct inode *inode = file_inode(file); + errseq_t since; dprintk("NFS: flush(%pD2)\n", file); @@ -148,7 +149,9 @@ nfs_file_flush(struct file *file, fl_owner_t id) return 0; /* Flush writes to the server and return any errors */ - return nfs_wb_all(inode); + since = filemap_sample_wb_err(file->f_mapping); + nfs_wb_all(inode); + return filemap_check_wb_err(file->f_mapping, since); } ssize_t @@ -587,12 +590,14 @@ static const struct vm_operations_struct nfs_file_vm_ops = { .page_mkwrite = nfs_vm_page_mkwrite, }; -static int nfs_need_check_write(struct file *filp, struct inode *inode) +static int nfs_need_check_write(struct file *filp, struct inode *inode, + int error) { struct nfs_open_context *ctx; ctx = nfs_file_open_context(filp); - if (nfs_ctx_key_to_expire(ctx, inode)) + if (nfs_error_is_fatal_on_server(error) || + nfs_ctx_key_to_expire(ctx, inode)) return 1; return 0; } @@ -603,6 +608,8 @@ ssize_t nfs_file_write(struct kiocb *iocb, struct iov_iter *from) struct inode *inode = file_inode(file); unsigned long written = 0; ssize_t result; + errseq_t since; + int error; result = nfs_key_timeout_notify(file, inode); if (result) @@ -627,6 +634,7 @@ ssize_t nfs_file_write(struct kiocb *iocb, struct iov_iter *from) if (iocb->ki_pos > i_size_read(inode)) nfs_revalidate_mapping(inode, file->f_mapping); + since = filemap_sample_wb_err(file->f_mapping); nfs_start_io_write(inode); result = generic_write_checks(iocb, from); if (result > 0) { @@ -645,7 +653,8 @@ ssize_t nfs_file_write(struct kiocb *iocb, struct iov_iter *from) goto out; /* Return error values */ - if (nfs_need_check_write(file, inode)) { + error = filemap_check_wb_err(file->f_mapping, since); + if (nfs_need_check_write(file, inode, error)) { int err = nfs_wb_all(inode); if (err < 0) result = err; diff --git a/fs/nfs/filelayout/filelayout.c b/fs/nfs/filelayout/filelayout.c index a13e69009f19..7f5aa0403e16 100644 --- a/fs/nfs/filelayout/filelayout.c +++ b/fs/nfs/filelayout/filelayout.c @@ -187,7 +187,7 @@ static int filelayout_async_handle_error(struct rpc_task *task, pnfs_error_mark_layout_for_return(inode, lseg); pnfs_set_lo_fail(lseg); rpc_wake_up(&tbl->slot_tbl_waitq); - /* fall through */ + fallthrough; default: reset: dprintk("%s Retry through MDS. Error %d\n", __func__, diff --git a/fs/nfs/flexfilelayout/flexfilelayout.c b/fs/nfs/flexfilelayout/flexfilelayout.c index de03e440b7ee..ff8965d1a4d4 100644 --- a/fs/nfs/flexfilelayout/flexfilelayout.c +++ b/fs/nfs/flexfilelayout/flexfilelayout.c @@ -790,6 +790,19 @@ ff_layout_choose_best_ds_for_read(struct pnfs_layout_segment *lseg, return ff_layout_choose_any_ds_for_read(lseg, start_idx, best_idx); } +static struct nfs4_pnfs_ds * +ff_layout_get_ds_for_read(struct nfs_pageio_descriptor *pgio, int *best_idx) +{ + struct pnfs_layout_segment *lseg = pgio->pg_lseg; + struct nfs4_pnfs_ds *ds; + + ds = ff_layout_choose_best_ds_for_read(lseg, pgio->pg_mirror_idx, + best_idx); + if (ds || !pgio->pg_mirror_idx) + return ds; + return ff_layout_choose_best_ds_for_read(lseg, 0, best_idx); +} + static void ff_layout_pg_get_read(struct nfs_pageio_descriptor *pgio, struct nfs_page *req, @@ -840,12 +853,11 @@ retry: goto out_nolseg; } - ds = ff_layout_choose_best_ds_for_read(pgio->pg_lseg, 0, &ds_idx); + ds = ff_layout_get_ds_for_read(pgio, &ds_idx); if (!ds) { if (!ff_layout_no_fallback_to_mds(pgio->pg_lseg)) goto out_mds; - pnfs_put_lseg(pgio->pg_lseg); - pgio->pg_lseg = NULL; + pnfs_generic_pg_cleanup(pgio); /* Sleep for 1 second before retrying */ ssleep(1); goto retry; @@ -871,8 +883,6 @@ out_mds: 0, NFS4_MAX_UINT64, IOMODE_READ, NFS_I(pgio->pg_inode)->layout, pgio->pg_lseg); - pnfs_put_lseg(pgio->pg_lseg); - pgio->pg_lseg = NULL; pgio->pg_maxretrans = 0; nfs_pageio_reset_read_mds(pgio); } @@ -916,8 +926,7 @@ retry: if (!ds) { if (!ff_layout_no_fallback_to_mds(pgio->pg_lseg)) goto out_mds; - pnfs_put_lseg(pgio->pg_lseg); - pgio->pg_lseg = NULL; + pnfs_generic_pg_cleanup(pgio); /* Sleep for 1 second before retrying */ ssleep(1); goto retry; @@ -939,8 +948,6 @@ out_mds: 0, NFS4_MAX_UINT64, IOMODE_RW, NFS_I(pgio->pg_inode)->layout, pgio->pg_lseg); - pnfs_put_lseg(pgio->pg_lseg); - pgio->pg_lseg = NULL; pgio->pg_maxretrans = 0; nfs_pageio_reset_write_mds(pgio); pgio->pg_error = -EAGAIN; @@ -953,8 +960,8 @@ ff_layout_pg_get_mirror_count_write(struct nfs_pageio_descriptor *pgio, if (!pgio->pg_lseg) { pgio->pg_lseg = pnfs_update_layout(pgio->pg_inode, nfs_req_openctx(req), - 0, - NFS4_MAX_UINT64, + req_offset(req), + req->wb_bytes, IOMODE_RW, false, GFP_NOFS); @@ -1028,11 +1035,24 @@ static void ff_layout_reset_write(struct nfs_pgio_header *hdr, bool retry_pnfs) } } +static void ff_layout_resend_pnfs_read(struct nfs_pgio_header *hdr) +{ + u32 idx = hdr->pgio_mirror_idx + 1; + int new_idx = 0; + + if (ff_layout_choose_any_ds_for_read(hdr->lseg, idx + 1, &new_idx)) + ff_layout_send_layouterror(hdr->lseg); + else + pnfs_error_mark_layout_for_return(hdr->inode, hdr->lseg); + pnfs_read_resend_pnfs(hdr, new_idx); +} + static void ff_layout_reset_read(struct nfs_pgio_header *hdr) { struct rpc_task *task = &hdr->task; pnfs_layoutcommit_inode(hdr->inode, false); + pnfs_error_mark_layout_for_return(hdr->inode, hdr->lseg); if (!test_and_set_bit(NFS_IOHDR_REDO, &hdr->flags)) { dprintk("%s Reset task %5u for i/o through MDS " @@ -1113,7 +1133,7 @@ static int ff_layout_async_handle_error_v4(struct rpc_task *task, nfs4_delete_deviceid(devid->ld, devid->nfs_client, &devid->deviceid); rpc_wake_up(&tbl->slot_tbl_waitq); - /* fall through */ + fallthrough; default: if (ff_layout_avoid_mds_available_ds(lseg)) return -NFS4ERR_RESET_TO_PNFS; @@ -1234,7 +1254,13 @@ static void ff_layout_io_track_ds_error(struct pnfs_layout_segment *lseg, break; case NFS4ERR_NXIO: ff_layout_mark_ds_unreachable(lseg, idx); - /* Fallthrough */ + /* + * Don't return the layout if this is a read and we still + * have layouts to try + */ + if (opnum == OP_READ) + break; + fallthrough; default: pnfs_error_mark_layout_for_return(lseg->pls_layout->plh_inode, lseg); @@ -1247,7 +1273,6 @@ static void ff_layout_io_track_ds_error(struct pnfs_layout_segment *lseg, static int ff_layout_read_done_cb(struct rpc_task *task, struct nfs_pgio_header *hdr) { - int new_idx = hdr->pgio_mirror_idx; int err; if (task->tk_status < 0) { @@ -1267,10 +1292,6 @@ static int ff_layout_read_done_cb(struct rpc_task *task, clear_bit(NFS_IOHDR_RESEND_MDS, &hdr->flags); switch (err) { case -NFS4ERR_RESET_TO_PNFS: - if (ff_layout_choose_best_ds_for_read(hdr->lseg, - hdr->pgio_mirror_idx + 1, - &new_idx)) - goto out_layouterror; set_bit(NFS_IOHDR_RESEND_PNFS, &hdr->flags); return task->tk_status; case -NFS4ERR_RESET_TO_MDS: @@ -1281,10 +1302,6 @@ static int ff_layout_read_done_cb(struct rpc_task *task, } return 0; -out_layouterror: - ff_layout_read_record_layoutstats_done(task, hdr); - ff_layout_send_layouterror(hdr->lseg); - hdr->pgio_mirror_idx = new_idx; out_eagain: rpc_restart_call_prepare(task); return -EAGAIN; @@ -1411,10 +1428,9 @@ static void ff_layout_read_release(void *data) struct nfs_pgio_header *hdr = data; ff_layout_read_record_layoutstats_done(&hdr->task, hdr); - if (test_bit(NFS_IOHDR_RESEND_PNFS, &hdr->flags)) { - ff_layout_send_layouterror(hdr->lseg); - pnfs_read_resend_pnfs(hdr); - } else if (test_bit(NFS_IOHDR_RESEND_MDS, &hdr->flags)) + if (test_bit(NFS_IOHDR_RESEND_PNFS, &hdr->flags)) + ff_layout_resend_pnfs_read(hdr); + else if (test_bit(NFS_IOHDR_RESEND_MDS, &hdr->flags)) ff_layout_reset_read(hdr); pnfs_generic_rw_release(data); } diff --git a/fs/nfs/fs_context.c b/fs/nfs/fs_context.c index ccc88be88d6a..524812984e2d 100644 --- a/fs/nfs/fs_context.c +++ b/fs/nfs/fs_context.c @@ -651,21 +651,21 @@ static int nfs_fs_context_parse_param(struct fs_context *fc, switch (lookup_constant(nfs_xprt_protocol_tokens, param->string, -1)) { case Opt_xprt_udp6: protofamily = AF_INET6; - /* fall through */ + fallthrough; case Opt_xprt_udp: ctx->flags &= ~NFS_MOUNT_TCP; ctx->nfs_server.protocol = XPRT_TRANSPORT_UDP; break; case Opt_xprt_tcp6: protofamily = AF_INET6; - /* fall through */ + fallthrough; case Opt_xprt_tcp: ctx->flags |= NFS_MOUNT_TCP; ctx->nfs_server.protocol = XPRT_TRANSPORT_TCP; break; case Opt_xprt_rdma6: protofamily = AF_INET6; - /* fall through */ + fallthrough; case Opt_xprt_rdma: /* vector side protocols to TCP */ ctx->flags |= NFS_MOUNT_TCP; @@ -684,13 +684,13 @@ static int nfs_fs_context_parse_param(struct fs_context *fc, switch (lookup_constant(nfs_xprt_protocol_tokens, param->string, -1)) { case Opt_xprt_udp6: mountfamily = AF_INET6; - /* fall through */ + fallthrough; case Opt_xprt_udp: ctx->mount_server.protocol = XPRT_TRANSPORT_UDP; break; case Opt_xprt_tcp6: mountfamily = AF_INET6; - /* fall through */ + fallthrough; case Opt_xprt_tcp: ctx->mount_server.protocol = XPRT_TRANSPORT_TCP; break; @@ -899,9 +899,11 @@ static int nfs23_parse_monolithic(struct fs_context *fc, ctx->version = NFS_DEFAULT_VERSION; switch (data->version) { case 1: - data->namlen = 0; /* fall through */ + data->namlen = 0; + fallthrough; case 2: - data->bsize = 0; /* fall through */ + data->bsize = 0; + fallthrough; case 3: if (data->flags & NFS_MOUNT_VER3) goto out_no_v3; @@ -909,14 +911,14 @@ static int nfs23_parse_monolithic(struct fs_context *fc, memcpy(data->root.data, data->old_root.data, NFS2_FHSIZE); /* Turn off security negotiation */ extra_flags |= NFS_MOUNT_SECFLAVOUR; - /* fall through */ + fallthrough; case 4: if (data->flags & NFS_MOUNT_SECFLAVOUR) goto out_no_sec; - /* fall through */ + fallthrough; case 5: memset(data->context, 0, sizeof(data->context)); - /* fall through */ + fallthrough; case 6: if (data->flags & NFS_MOUNT_VER3) { if (data->root.size > NFS3_FHSIZE || data->root.size == 0) @@ -982,7 +984,7 @@ static int nfs23_parse_monolithic(struct fs_context *fc, /* * The legacy version 6 binary mount data from userspace has a * field used only to transport selinux information into the - * the kernel. To continue to support that functionality we + * kernel. To continue to support that functionality we * have a touch of selinux knowledge here in the NFS code. The * userspace code converted context=blah to just blah so we are * converting back to the full string selinux understands. diff --git a/fs/nfs/inode.c b/fs/nfs/inode.c index 0bf1f835de01..aa6493905bbe 100644 --- a/fs/nfs/inode.c +++ b/fs/nfs/inode.c @@ -193,6 +193,7 @@ bool nfs_check_cache_invalid(struct inode *inode, unsigned long flags) return nfs_check_cache_invalid_not_delegated(inode, flags); } +EXPORT_SYMBOL_GPL(nfs_check_cache_invalid); static void nfs_set_cache_invalid(struct inode *inode, unsigned long flags) { @@ -204,7 +205,8 @@ static void nfs_set_cache_invalid(struct inode *inode, unsigned long flags) flags &= ~NFS_INO_INVALID_OTHER; flags &= ~(NFS_INO_INVALID_CHANGE | NFS_INO_INVALID_SIZE - | NFS_INO_REVAL_PAGECACHE); + | NFS_INO_REVAL_PAGECACHE + | NFS_INO_INVALID_XATTR); } if (inode->i_mapping->nrpages == 0) @@ -233,11 +235,13 @@ static void nfs_zap_caches_locked(struct inode *inode) | NFS_INO_INVALID_DATA | NFS_INO_INVALID_ACCESS | NFS_INO_INVALID_ACL + | NFS_INO_INVALID_XATTR | NFS_INO_REVAL_PAGECACHE); } else nfs_set_cache_invalid(inode, NFS_INO_INVALID_ATTR | NFS_INO_INVALID_ACCESS | NFS_INO_INVALID_ACL + | NFS_INO_INVALID_XATTR | NFS_INO_REVAL_PAGECACHE); nfs_zap_label_cache_locked(nfsi); } @@ -542,6 +546,8 @@ nfs_fhget(struct super_block *sb, struct nfs_fh *fh, struct nfs_fattr *fattr, st inode->i_gid = fattr->gid; else if (nfs_server_capable(inode, NFS_CAP_OWNER_GROUP)) nfs_set_cache_invalid(inode, NFS_INO_INVALID_OTHER); + if (nfs_server_capable(inode, NFS_CAP_XATTR)) + nfs_set_cache_invalid(inode, NFS_INO_INVALID_XATTR); if (fattr->valid & NFS_ATTR_FATTR_BLOCKS_USED) inode->i_blocks = fattr->du.nfs2.blocks; if (fattr->valid & NFS_ATTR_FATTR_SPACE_USED) { @@ -794,8 +800,10 @@ int nfs_getattr(const struct path *path, struct kstat *stat, trace_nfs_getattr_enter(inode); - if ((query_flags & AT_STATX_DONT_SYNC) && !force_sync) + if ((query_flags & AT_STATX_DONT_SYNC) && !force_sync) { + nfs_readdirplus_parent_cache_hit(path->dentry); goto out_no_update; + } /* Flush out writes to the server in order to update c/mtime. */ if ((request_mask & (STATX_CTIME|STATX_MTIME)) && @@ -1375,6 +1383,8 @@ static void nfs_wcc_update_inode(struct inode *inode, struct nfs_fattr *fattr) inode_set_iversion_raw(inode, fattr->change_attr); if (S_ISDIR(inode->i_mode)) nfs_set_cache_invalid(inode, NFS_INO_INVALID_DATA); + else if (nfs_server_capable(inode, NFS_CAP_XATTR)) + nfs_set_cache_invalid(inode, NFS_INO_INVALID_XATTR); } /* If we have atomic WCC data, we may update some attributes */ ts = inode->i_ctime; @@ -1892,7 +1902,8 @@ static int nfs_update_inode(struct inode *inode, struct nfs_fattr *fattr) if (!(have_writers || have_delegation)) { invalid |= NFS_INO_INVALID_DATA | NFS_INO_INVALID_ACCESS - | NFS_INO_INVALID_ACL; + | NFS_INO_INVALID_ACL + | NFS_INO_INVALID_XATTR; /* Force revalidate of all attributes */ save_cache_validity |= NFS_INO_INVALID_CTIME | NFS_INO_INVALID_MTIME @@ -2095,6 +2106,9 @@ struct inode *nfs_alloc_inode(struct super_block *sb) #if IS_ENABLED(CONFIG_NFS_V4) nfsi->nfs4_acl = NULL; #endif /* CONFIG_NFS_V4 */ +#ifdef CONFIG_NFS_V4_2 + nfsi->xattr_cache = NULL; +#endif return &nfsi->vfs_inode; } EXPORT_SYMBOL_GPL(nfs_alloc_inode); diff --git a/fs/nfs/nfs3acl.c b/fs/nfs/nfs3acl.c index 26c94b32d6f4..c6c863382f37 100644 --- a/fs/nfs/nfs3acl.c +++ b/fs/nfs/nfs3acl.c @@ -108,7 +108,7 @@ struct posix_acl *nfs3_get_acl(struct inode *inode, int type) case -EPROTONOSUPPORT: dprintk("NFS_V3_ACL extension not supported; disabling\n"); server->caps &= ~NFS_CAP_ACLS; - /* fall through */ + fallthrough; case -ENOTSUPP: status = -EOPNOTSUPP; default: @@ -228,7 +228,7 @@ static int __nfs3_proc_setacls(struct inode *inode, struct posix_acl *acl, dprintk("NFS_V3_ACL SETACL RPC not supported" "(will not retry)\n"); server->caps &= ~NFS_CAP_ACLS; - /* fall through */ + fallthrough; case -ENOTSUPP: status = -EOPNOTSUPP; } diff --git a/fs/nfs/nfs42.h b/fs/nfs/nfs42.h index c891af949886..0fe5aacbcfdf 100644 --- a/fs/nfs/nfs42.h +++ b/fs/nfs/nfs42.h @@ -6,6 +6,8 @@ #ifndef __LINUX_FS_NFS_NFS4_2_H #define __LINUX_FS_NFS_NFS4_2_H +#include <linux/xattr.h> + /* * FIXME: four LAYOUTSTATS calls per compound at most! Do we need to support * more? Need to consider not to pre-alloc too much for a compound. @@ -36,5 +38,27 @@ static inline bool nfs42_files_from_same_server(struct file *in, return nfs4_check_serverowner_major_id(c_in->cl_serverowner, c_out->cl_serverowner); } + +ssize_t nfs42_proc_getxattr(struct inode *inode, const char *name, + void *buf, size_t buflen); +int nfs42_proc_setxattr(struct inode *inode, const char *name, + const void *buf, size_t buflen, int flags); +ssize_t nfs42_proc_listxattrs(struct inode *inode, void *buf, + size_t buflen, u64 *cookiep, bool *eofp); +int nfs42_proc_removexattr(struct inode *inode, const char *name); + +/* + * Maximum XDR buffer size needed for a listxattr buffer of buflen size. + * + * The upper boundary is a buffer with all 1-byte sized attribute names. + * They would be 7 bytes long in the eventual buffer ("user.x\0"), and + * 8 bytes long XDR-encoded. + * + * Include the trailing eof word as well. + */ +static inline u32 nfs42_listxattr_xdrsize(u32 buflen) +{ + return ((buflen / (XATTR_USER_PREFIX_LEN + 2)) * 8) + 4; +} #endif /* CONFIG_NFS_V4_2 */ #endif /* __LINUX_FS_NFS_NFS4_2_H */ diff --git a/fs/nfs/nfs42proc.c b/fs/nfs/nfs42proc.c index e2ae54b35dfe..142225f0af59 100644 --- a/fs/nfs/nfs42proc.c +++ b/fs/nfs/nfs42proc.c @@ -17,6 +17,7 @@ #include "nfs4session.h" #include "internal.h" #include "delegation.h" +#include "nfs4trace.h" #define NFSDBG_FACILITY NFSDBG_PROC static int nfs42_do_offload_cancel_async(struct file *dst, nfs4_stateid *std); @@ -714,7 +715,7 @@ nfs42_layoutstat_done(struct rpc_task *task, void *calldata) switch (task->tk_status) { case 0: - break; + return; case -NFS4ERR_BADHANDLE: case -ESTALE: pnfs_destroy_layout(NFS_I(inode)); @@ -760,6 +761,8 @@ nfs42_layoutstat_done(struct rpc_task *task, void *calldata) case -EOPNOTSUPP: NFS_SERVER(inode)->caps &= ~NFS_CAP_LAYOUTSTATS; } + + trace_nfs4_layoutstats(inode, &data->args.stateid, task->tk_status); } static void @@ -882,7 +885,7 @@ nfs42_layouterror_done(struct rpc_task *task, void *calldata) switch (task->tk_status) { case 0: - break; + return; case -NFS4ERR_BADHANDLE: case -ESTALE: pnfs_destroy_layout(NFS_I(inode)); @@ -926,6 +929,9 @@ nfs42_layouterror_done(struct rpc_task *task, void *calldata) case -EOPNOTSUPP: NFS_SERVER(inode)->caps &= ~NFS_CAP_LAYOUTERROR; } + + trace_nfs4_layouterror(inode, &data->args.errors[0].stateid, + task->tk_status); } static void @@ -1088,3 +1094,251 @@ out_put_src_lock: nfs_put_lock_context(src_lock); return err; } + +#define NFS4XATTR_MAXPAGES DIV_ROUND_UP(XATTR_SIZE_MAX, PAGE_SIZE) + +static int _nfs42_proc_removexattr(struct inode *inode, const char *name) +{ + struct nfs_server *server = NFS_SERVER(inode); + struct nfs42_removexattrargs args = { + .fh = NFS_FH(inode), + .xattr_name = name, + }; + struct nfs42_removexattrres res; + struct rpc_message msg = { + .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_REMOVEXATTR], + .rpc_argp = &args, + .rpc_resp = &res, + }; + int ret; + unsigned long timestamp = jiffies; + + ret = nfs4_call_sync(server->client, server, &msg, &args.seq_args, + &res.seq_res, 1); + if (!ret) + nfs4_update_changeattr(inode, &res.cinfo, timestamp, 0); + + return ret; +} + +static int _nfs42_proc_setxattr(struct inode *inode, const char *name, + const void *buf, size_t buflen, int flags) +{ + struct nfs_server *server = NFS_SERVER(inode); + struct page *pages[NFS4XATTR_MAXPAGES]; + struct nfs42_setxattrargs arg = { + .fh = NFS_FH(inode), + .xattr_pages = pages, + .xattr_len = buflen, + .xattr_name = name, + .xattr_flags = flags, + }; + struct nfs42_setxattrres res; + struct rpc_message msg = { + .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_SETXATTR], + .rpc_argp = &arg, + .rpc_resp = &res, + }; + int ret, np; + unsigned long timestamp = jiffies; + + if (buflen > server->sxasize) + return -ERANGE; + + if (buflen > 0) { + np = nfs4_buf_to_pages_noslab(buf, buflen, arg.xattr_pages); + if (np < 0) + return np; + } else + np = 0; + + ret = nfs4_call_sync(server->client, server, &msg, &arg.seq_args, + &res.seq_res, 1); + + for (; np > 0; np--) + put_page(pages[np - 1]); + + if (!ret) + nfs4_update_changeattr(inode, &res.cinfo, timestamp, 0); + + return ret; +} + +static ssize_t _nfs42_proc_getxattr(struct inode *inode, const char *name, + void *buf, size_t buflen) +{ + struct nfs_server *server = NFS_SERVER(inode); + struct page *pages[NFS4XATTR_MAXPAGES] = {}; + struct nfs42_getxattrargs arg = { + .fh = NFS_FH(inode), + .xattr_pages = pages, + .xattr_len = buflen, + .xattr_name = name, + }; + struct nfs42_getxattrres res; + struct rpc_message msg = { + .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_GETXATTR], + .rpc_argp = &arg, + .rpc_resp = &res, + }; + int ret, np; + + ret = nfs4_call_sync(server->client, server, &msg, &arg.seq_args, + &res.seq_res, 0); + if (ret < 0) + return ret; + + /* + * Normally, the caching is done one layer up, but for successful + * RPCS, always cache the result here, even if the caller was + * just querying the length, or if the reply was too big for + * the caller. This avoids a second RPC in the case of the + * common query-alloc-retrieve cycle for xattrs. + * + * Note that xattr_len is always capped to XATTR_SIZE_MAX. + */ + + nfs4_xattr_cache_add(inode, name, NULL, pages, res.xattr_len); + + if (buflen) { + if (res.xattr_len > buflen) + return -ERANGE; + _copy_from_pages(buf, pages, 0, res.xattr_len); + } + + np = DIV_ROUND_UP(res.xattr_len, PAGE_SIZE); + while (--np >= 0) + __free_page(pages[np]); + + return res.xattr_len; +} + +static ssize_t _nfs42_proc_listxattrs(struct inode *inode, void *buf, + size_t buflen, u64 *cookiep, bool *eofp) +{ + struct nfs_server *server = NFS_SERVER(inode); + struct page **pages; + struct nfs42_listxattrsargs arg = { + .fh = NFS_FH(inode), + .cookie = *cookiep, + }; + struct nfs42_listxattrsres res = { + .eof = false, + .xattr_buf = buf, + .xattr_len = buflen, + }; + struct rpc_message msg = { + .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_LISTXATTRS], + .rpc_argp = &arg, + .rpc_resp = &res, + }; + u32 xdrlen; + int ret, np; + + + res.scratch = alloc_page(GFP_KERNEL); + if (!res.scratch) + return -ENOMEM; + + xdrlen = nfs42_listxattr_xdrsize(buflen); + if (xdrlen > server->lxasize) + xdrlen = server->lxasize; + np = xdrlen / PAGE_SIZE + 1; + + pages = kcalloc(np, sizeof(struct page *), GFP_KERNEL); + if (pages == NULL) { + __free_page(res.scratch); + return -ENOMEM; + } + + arg.xattr_pages = pages; + arg.count = xdrlen; + + ret = nfs4_call_sync(server->client, server, &msg, &arg.seq_args, + &res.seq_res, 0); + + if (ret >= 0) { + ret = res.copied; + *cookiep = res.cookie; + *eofp = res.eof; + } + + while (--np >= 0) { + if (pages[np]) + __free_page(pages[np]); + } + + __free_page(res.scratch); + kfree(pages); + + return ret; + +} + +ssize_t nfs42_proc_getxattr(struct inode *inode, const char *name, + void *buf, size_t buflen) +{ + struct nfs4_exception exception = { }; + ssize_t err; + + do { + err = _nfs42_proc_getxattr(inode, name, buf, buflen); + if (err >= 0) + break; + err = nfs4_handle_exception(NFS_SERVER(inode), err, + &exception); + } while (exception.retry); + + return err; +} + +int nfs42_proc_setxattr(struct inode *inode, const char *name, + const void *buf, size_t buflen, int flags) +{ + struct nfs4_exception exception = { }; + int err; + + do { + err = _nfs42_proc_setxattr(inode, name, buf, buflen, flags); + if (!err) + break; + err = nfs4_handle_exception(NFS_SERVER(inode), err, + &exception); + } while (exception.retry); + + return err; +} + +ssize_t nfs42_proc_listxattrs(struct inode *inode, void *buf, + size_t buflen, u64 *cookiep, bool *eofp) +{ + struct nfs4_exception exception = { }; + ssize_t err; + + do { + err = _nfs42_proc_listxattrs(inode, buf, buflen, + cookiep, eofp); + if (err >= 0) + break; + err = nfs4_handle_exception(NFS_SERVER(inode), err, + &exception); + } while (exception.retry); + + return err; +} + +int nfs42_proc_removexattr(struct inode *inode, const char *name) +{ + struct nfs4_exception exception = { }; + int err; + + do { + err = _nfs42_proc_removexattr(inode, name); + if (!err) + break; + err = nfs4_handle_exception(NFS_SERVER(inode), err, + &exception); + } while (exception.retry); + + return err; +} diff --git a/fs/nfs/nfs42xattr.c b/fs/nfs/nfs42xattr.c new file mode 100644 index 000000000000..86777996cfec --- /dev/null +++ b/fs/nfs/nfs42xattr.c @@ -0,0 +1,1056 @@ +// SPDX-License-Identifier: GPL-2.0 + +/* + * Copyright 2019, 2020 Amazon.com, Inc. or its affiliates. All rights reserved. + * + * User extended attribute client side cache functions. + * + * Author: Frank van der Linden <fllinden@amazon.com> + */ +#include <linux/errno.h> +#include <linux/nfs_fs.h> +#include <linux/hashtable.h> +#include <linux/refcount.h> +#include <uapi/linux/xattr.h> + +#include "nfs4_fs.h" +#include "internal.h" + +/* + * User extended attributes client side caching is implemented by having + * a cache structure attached to NFS inodes. This structure is allocated + * when needed, and freed when the cache is zapped. + * + * The cache structure contains as hash table of entries, and a pointer + * to a special-cased entry for the listxattr cache. + * + * Accessing and allocating / freeing the caches is done via reference + * counting. The cache entries use a similar refcounting scheme. + * + * This makes freeing a cache, both from the shrinker and from the + * zap cache path, easy. It also means that, in current use cases, + * the large majority of inodes will not waste any memory, as they + * will never have any user extended attributes assigned to them. + * + * Attribute entries are hashed in to a simple hash table. They are + * also part of an LRU. + * + * There are three shrinkers. + * + * Two shrinkers deal with the cache entries themselves: one for + * large entries (> PAGE_SIZE), and one for smaller entries. The + * shrinker for the larger entries works more aggressively than + * those for the smaller entries. + * + * The other shrinker frees the cache structures themselves. + */ + +/* + * 64 buckets is a good default. There is likely no reasonable + * workload that uses more than even 64 user extended attributes. + * You can certainly add a lot more - but you get what you ask for + * in those circumstances. + */ +#define NFS4_XATTR_HASH_SIZE 64 + +#define NFSDBG_FACILITY NFSDBG_XATTRCACHE + +struct nfs4_xattr_cache; +struct nfs4_xattr_entry; + +struct nfs4_xattr_bucket { + spinlock_t lock; + struct hlist_head hlist; + struct nfs4_xattr_cache *cache; + bool draining; +}; + +struct nfs4_xattr_cache { + struct kref ref; + spinlock_t hash_lock; /* protects hashtable and lru */ + struct nfs4_xattr_bucket buckets[NFS4_XATTR_HASH_SIZE]; + struct list_head lru; + struct list_head dispose; + atomic_long_t nent; + spinlock_t listxattr_lock; + struct inode *inode; + struct nfs4_xattr_entry *listxattr; +}; + +struct nfs4_xattr_entry { + struct kref ref; + struct hlist_node hnode; + struct list_head lru; + struct list_head dispose; + char *xattr_name; + void *xattr_value; + size_t xattr_size; + struct nfs4_xattr_bucket *bucket; + uint32_t flags; +}; + +#define NFS4_XATTR_ENTRY_EXTVAL 0x0001 + +/* + * LRU list of NFS inodes that have xattr caches. + */ +static struct list_lru nfs4_xattr_cache_lru; +static struct list_lru nfs4_xattr_entry_lru; +static struct list_lru nfs4_xattr_large_entry_lru; + +static struct kmem_cache *nfs4_xattr_cache_cachep; + +/* + * Hashing helper functions. + */ +static void +nfs4_xattr_hash_init(struct nfs4_xattr_cache *cache) +{ + unsigned int i; + + for (i = 0; i < NFS4_XATTR_HASH_SIZE; i++) { + INIT_HLIST_HEAD(&cache->buckets[i].hlist); + spin_lock_init(&cache->buckets[i].lock); + cache->buckets[i].cache = cache; + cache->buckets[i].draining = false; + } +} + +/* + * Locking order: + * 1. inode i_lock or bucket lock + * 2. list_lru lock (taken by list_lru_* functions) + */ + +/* + * Wrapper functions to add a cache entry to the right LRU. + */ +static bool +nfs4_xattr_entry_lru_add(struct nfs4_xattr_entry *entry) +{ + struct list_lru *lru; + + lru = (entry->flags & NFS4_XATTR_ENTRY_EXTVAL) ? + &nfs4_xattr_large_entry_lru : &nfs4_xattr_entry_lru; + + return list_lru_add(lru, &entry->lru); +} + +static bool +nfs4_xattr_entry_lru_del(struct nfs4_xattr_entry *entry) +{ + struct list_lru *lru; + + lru = (entry->flags & NFS4_XATTR_ENTRY_EXTVAL) ? + &nfs4_xattr_large_entry_lru : &nfs4_xattr_entry_lru; + + return list_lru_del(lru, &entry->lru); +} + +/* + * This function allocates cache entries. They are the normal + * extended attribute name/value pairs, but may also be a listxattr + * cache. Those allocations use the same entry so that they can be + * treated as one by the memory shrinker. + * + * xattr cache entries are allocated together with names. If the + * value fits in to one page with the entry structure and the name, + * it will also be part of the same allocation (kmalloc). This is + * expected to be the vast majority of cases. Larger allocations + * have a value pointer that is allocated separately by kvmalloc. + * + * Parameters: + * + * @name: Name of the extended attribute. NULL for listxattr cache + * entry. + * @value: Value of attribute, or listxattr cache. NULL if the + * value is to be copied from pages instead. + * @pages: Pages to copy the value from, if not NULL. Passed in to + * make it easier to copy the value after an RPC, even if + * the value will not be passed up to application (e.g. + * for a 'query' getxattr with NULL buffer). + * @len: Length of the value. Can be 0 for zero-length attribues. + * @value and @pages will be NULL if @len is 0. + */ +static struct nfs4_xattr_entry * +nfs4_xattr_alloc_entry(const char *name, const void *value, + struct page **pages, size_t len) +{ + struct nfs4_xattr_entry *entry; + void *valp; + char *namep; + size_t alloclen, slen; + char *buf; + uint32_t flags; + + BUILD_BUG_ON(sizeof(struct nfs4_xattr_entry) + + XATTR_NAME_MAX + 1 > PAGE_SIZE); + + alloclen = sizeof(struct nfs4_xattr_entry); + if (name != NULL) { + slen = strlen(name) + 1; + alloclen += slen; + } else + slen = 0; + + if (alloclen + len <= PAGE_SIZE) { + alloclen += len; + flags = 0; + } else { + flags = NFS4_XATTR_ENTRY_EXTVAL; + } + + buf = kmalloc(alloclen, GFP_KERNEL_ACCOUNT | GFP_NOFS); + if (buf == NULL) + return NULL; + entry = (struct nfs4_xattr_entry *)buf; + + if (name != NULL) { + namep = buf + sizeof(struct nfs4_xattr_entry); + memcpy(namep, name, slen); + } else { + namep = NULL; + } + + + if (flags & NFS4_XATTR_ENTRY_EXTVAL) { + valp = kvmalloc(len, GFP_KERNEL_ACCOUNT | GFP_NOFS); + if (valp == NULL) { + kfree(buf); + return NULL; + } + } else if (len != 0) { + valp = buf + sizeof(struct nfs4_xattr_entry) + slen; + } else + valp = NULL; + + if (valp != NULL) { + if (value != NULL) + memcpy(valp, value, len); + else + _copy_from_pages(valp, pages, 0, len); + } + + entry->flags = flags; + entry->xattr_value = valp; + kref_init(&entry->ref); + entry->xattr_name = namep; + entry->xattr_size = len; + entry->bucket = NULL; + INIT_LIST_HEAD(&entry->lru); + INIT_LIST_HEAD(&entry->dispose); + INIT_HLIST_NODE(&entry->hnode); + + return entry; +} + +static void +nfs4_xattr_free_entry(struct nfs4_xattr_entry *entry) +{ + if (entry->flags & NFS4_XATTR_ENTRY_EXTVAL) + kvfree(entry->xattr_value); + kfree(entry); +} + +static void +nfs4_xattr_free_entry_cb(struct kref *kref) +{ + struct nfs4_xattr_entry *entry; + + entry = container_of(kref, struct nfs4_xattr_entry, ref); + + if (WARN_ON(!list_empty(&entry->lru))) + return; + + nfs4_xattr_free_entry(entry); +} + +static void +nfs4_xattr_free_cache_cb(struct kref *kref) +{ + struct nfs4_xattr_cache *cache; + int i; + + cache = container_of(kref, struct nfs4_xattr_cache, ref); + + for (i = 0; i < NFS4_XATTR_HASH_SIZE; i++) { + if (WARN_ON(!hlist_empty(&cache->buckets[i].hlist))) + return; + cache->buckets[i].draining = false; + } + + cache->listxattr = NULL; + + kmem_cache_free(nfs4_xattr_cache_cachep, cache); + +} + +static struct nfs4_xattr_cache * +nfs4_xattr_alloc_cache(void) +{ + struct nfs4_xattr_cache *cache; + + cache = kmem_cache_alloc(nfs4_xattr_cache_cachep, + GFP_KERNEL_ACCOUNT | GFP_NOFS); + if (cache == NULL) + return NULL; + + kref_init(&cache->ref); + atomic_long_set(&cache->nent, 0); + + return cache; +} + +/* + * Set the listxattr cache, which is a special-cased cache entry. + * The special value ERR_PTR(-ESTALE) is used to indicate that + * the cache is being drained - this prevents a new listxattr + * cache from being added to what is now a stale cache. + */ +static int +nfs4_xattr_set_listcache(struct nfs4_xattr_cache *cache, + struct nfs4_xattr_entry *new) +{ + struct nfs4_xattr_entry *old; + int ret = 1; + + spin_lock(&cache->listxattr_lock); + + old = cache->listxattr; + + if (old == ERR_PTR(-ESTALE)) { + ret = 0; + goto out; + } + + cache->listxattr = new; + if (new != NULL && new != ERR_PTR(-ESTALE)) + nfs4_xattr_entry_lru_add(new); + + if (old != NULL) { + nfs4_xattr_entry_lru_del(old); + kref_put(&old->ref, nfs4_xattr_free_entry_cb); + } +out: + spin_unlock(&cache->listxattr_lock); + + return ret; +} + +/* + * Unlink a cache from its parent inode, clearing out an invalid + * cache. Must be called with i_lock held. + */ +static struct nfs4_xattr_cache * +nfs4_xattr_cache_unlink(struct inode *inode) +{ + struct nfs_inode *nfsi; + struct nfs4_xattr_cache *oldcache; + + nfsi = NFS_I(inode); + + oldcache = nfsi->xattr_cache; + if (oldcache != NULL) { + list_lru_del(&nfs4_xattr_cache_lru, &oldcache->lru); + oldcache->inode = NULL; + } + nfsi->xattr_cache = NULL; + nfsi->cache_validity &= ~NFS_INO_INVALID_XATTR; + + return oldcache; + +} + +/* + * Discard a cache. Called by get_cache() if there was an old, + * invalid cache. Can also be called from a shrinker callback. + * + * The cache is dead, it has already been unlinked from its inode, + * and no longer appears on the cache LRU list. + * + * Mark all buckets as draining, so that no new entries are added. This + * could still happen in the unlikely, but possible case that another + * thread had grabbed a reference before it was unlinked from the inode, + * and is still holding it for an add operation. + * + * Remove all entries from the LRU lists, so that there is no longer + * any way to 'find' this cache. Then, remove the entries from the hash + * table. + * + * At that point, the cache will remain empty and can be freed when the final + * reference drops, which is very likely the kref_put at the end of + * this function, or the one called immediately afterwards in the + * shrinker callback. + */ +static void +nfs4_xattr_discard_cache(struct nfs4_xattr_cache *cache) +{ + unsigned int i; + struct nfs4_xattr_entry *entry; + struct nfs4_xattr_bucket *bucket; + struct hlist_node *n; + + nfs4_xattr_set_listcache(cache, ERR_PTR(-ESTALE)); + + for (i = 0; i < NFS4_XATTR_HASH_SIZE; i++) { + bucket = &cache->buckets[i]; + + spin_lock(&bucket->lock); + bucket->draining = true; + hlist_for_each_entry_safe(entry, n, &bucket->hlist, hnode) { + nfs4_xattr_entry_lru_del(entry); + hlist_del_init(&entry->hnode); + kref_put(&entry->ref, nfs4_xattr_free_entry_cb); + } + spin_unlock(&bucket->lock); + } + + atomic_long_set(&cache->nent, 0); + + kref_put(&cache->ref, nfs4_xattr_free_cache_cb); +} + +/* + * Get a referenced copy of the cache structure. Avoid doing allocs + * while holding i_lock. Which means that we do some optimistic allocation, + * and might have to free the result in rare cases. + * + * This function only checks the NFS_INO_INVALID_XATTR cache validity bit + * and acts accordingly, replacing the cache when needed. For the read case + * (!add), this means that the caller must make sure that the cache + * is valid before caling this function. getxattr and listxattr call + * revalidate_inode to do this. The attribute cache timeout (for the + * non-delegated case) is expected to be dealt with in the revalidate + * call. + */ + +static struct nfs4_xattr_cache * +nfs4_xattr_get_cache(struct inode *inode, int add) +{ + struct nfs_inode *nfsi; + struct nfs4_xattr_cache *cache, *oldcache, *newcache; + + nfsi = NFS_I(inode); + + cache = oldcache = NULL; + + spin_lock(&inode->i_lock); + + if (nfsi->cache_validity & NFS_INO_INVALID_XATTR) + oldcache = nfs4_xattr_cache_unlink(inode); + else + cache = nfsi->xattr_cache; + + if (cache != NULL) + kref_get(&cache->ref); + + spin_unlock(&inode->i_lock); + + if (add && cache == NULL) { + newcache = NULL; + + cache = nfs4_xattr_alloc_cache(); + if (cache == NULL) + goto out; + + spin_lock(&inode->i_lock); + if (nfsi->cache_validity & NFS_INO_INVALID_XATTR) { + /* + * The cache was invalidated again. Give up, + * since what we want to enter is now likely + * outdated anyway. + */ + spin_unlock(&inode->i_lock); + kref_put(&cache->ref, nfs4_xattr_free_cache_cb); + cache = NULL; + goto out; + } + + /* + * Check if someone beat us to it. + */ + if (nfsi->xattr_cache != NULL) { + newcache = nfsi->xattr_cache; + kref_get(&newcache->ref); + } else { + kref_get(&cache->ref); + nfsi->xattr_cache = cache; + cache->inode = inode; + list_lru_add(&nfs4_xattr_cache_lru, &cache->lru); + } + + spin_unlock(&inode->i_lock); + + /* + * If there was a race, throw away the cache we just + * allocated, and use the new one allocated by someone + * else. + */ + if (newcache != NULL) { + kref_put(&cache->ref, nfs4_xattr_free_cache_cb); + cache = newcache; + } + } + +out: + /* + * Discard the now orphaned old cache. + */ + if (oldcache != NULL) + nfs4_xattr_discard_cache(oldcache); + + return cache; +} + +static inline struct nfs4_xattr_bucket * +nfs4_xattr_hash_bucket(struct nfs4_xattr_cache *cache, const char *name) +{ + return &cache->buckets[jhash(name, strlen(name), 0) & + (ARRAY_SIZE(cache->buckets) - 1)]; +} + +static struct nfs4_xattr_entry * +nfs4_xattr_get_entry(struct nfs4_xattr_bucket *bucket, const char *name) +{ + struct nfs4_xattr_entry *entry; + + entry = NULL; + + hlist_for_each_entry(entry, &bucket->hlist, hnode) { + if (!strcmp(entry->xattr_name, name)) + break; + } + + return entry; +} + +static int +nfs4_xattr_hash_add(struct nfs4_xattr_cache *cache, + struct nfs4_xattr_entry *entry) +{ + struct nfs4_xattr_bucket *bucket; + struct nfs4_xattr_entry *oldentry = NULL; + int ret = 1; + + bucket = nfs4_xattr_hash_bucket(cache, entry->xattr_name); + entry->bucket = bucket; + + spin_lock(&bucket->lock); + + if (bucket->draining) { + ret = 0; + goto out; + } + + oldentry = nfs4_xattr_get_entry(bucket, entry->xattr_name); + if (oldentry != NULL) { + hlist_del_init(&oldentry->hnode); + nfs4_xattr_entry_lru_del(oldentry); + } else { + atomic_long_inc(&cache->nent); + } + + hlist_add_head(&entry->hnode, &bucket->hlist); + nfs4_xattr_entry_lru_add(entry); + +out: + spin_unlock(&bucket->lock); + + if (oldentry != NULL) + kref_put(&oldentry->ref, nfs4_xattr_free_entry_cb); + + return ret; +} + +static void +nfs4_xattr_hash_remove(struct nfs4_xattr_cache *cache, const char *name) +{ + struct nfs4_xattr_bucket *bucket; + struct nfs4_xattr_entry *entry; + + bucket = nfs4_xattr_hash_bucket(cache, name); + + spin_lock(&bucket->lock); + + entry = nfs4_xattr_get_entry(bucket, name); + if (entry != NULL) { + hlist_del_init(&entry->hnode); + nfs4_xattr_entry_lru_del(entry); + atomic_long_dec(&cache->nent); + } + + spin_unlock(&bucket->lock); + + if (entry != NULL) + kref_put(&entry->ref, nfs4_xattr_free_entry_cb); +} + +static struct nfs4_xattr_entry * +nfs4_xattr_hash_find(struct nfs4_xattr_cache *cache, const char *name) +{ + struct nfs4_xattr_bucket *bucket; + struct nfs4_xattr_entry *entry; + + bucket = nfs4_xattr_hash_bucket(cache, name); + + spin_lock(&bucket->lock); + + entry = nfs4_xattr_get_entry(bucket, name); + if (entry != NULL) + kref_get(&entry->ref); + + spin_unlock(&bucket->lock); + + return entry; +} + +/* + * Entry point to retrieve an entry from the cache. + */ +ssize_t nfs4_xattr_cache_get(struct inode *inode, const char *name, char *buf, + ssize_t buflen) +{ + struct nfs4_xattr_cache *cache; + struct nfs4_xattr_entry *entry; + ssize_t ret; + + cache = nfs4_xattr_get_cache(inode, 0); + if (cache == NULL) + return -ENOENT; + + ret = 0; + entry = nfs4_xattr_hash_find(cache, name); + + if (entry != NULL) { + dprintk("%s: cache hit '%s', len %lu\n", __func__, + entry->xattr_name, (unsigned long)entry->xattr_size); + if (buflen == 0) { + /* Length probe only */ + ret = entry->xattr_size; + } else if (buflen < entry->xattr_size) + ret = -ERANGE; + else { + memcpy(buf, entry->xattr_value, entry->xattr_size); + ret = entry->xattr_size; + } + kref_put(&entry->ref, nfs4_xattr_free_entry_cb); + } else { + dprintk("%s: cache miss '%s'\n", __func__, name); + ret = -ENOENT; + } + + kref_put(&cache->ref, nfs4_xattr_free_cache_cb); + + return ret; +} + +/* + * Retrieve a cached list of xattrs from the cache. + */ +ssize_t nfs4_xattr_cache_list(struct inode *inode, char *buf, ssize_t buflen) +{ + struct nfs4_xattr_cache *cache; + struct nfs4_xattr_entry *entry; + ssize_t ret; + + cache = nfs4_xattr_get_cache(inode, 0); + if (cache == NULL) + return -ENOENT; + + spin_lock(&cache->listxattr_lock); + + entry = cache->listxattr; + + if (entry != NULL && entry != ERR_PTR(-ESTALE)) { + if (buflen == 0) { + /* Length probe only */ + ret = entry->xattr_size; + } else if (entry->xattr_size > buflen) + ret = -ERANGE; + else { + memcpy(buf, entry->xattr_value, entry->xattr_size); + ret = entry->xattr_size; + } + } else { + ret = -ENOENT; + } + + spin_unlock(&cache->listxattr_lock); + + kref_put(&cache->ref, nfs4_xattr_free_cache_cb); + + return ret; +} + +/* + * Add an xattr to the cache. + * + * This also invalidates the xattr list cache. + */ +void nfs4_xattr_cache_add(struct inode *inode, const char *name, + const char *buf, struct page **pages, ssize_t buflen) +{ + struct nfs4_xattr_cache *cache; + struct nfs4_xattr_entry *entry; + + dprintk("%s: add '%s' len %lu\n", __func__, + name, (unsigned long)buflen); + + cache = nfs4_xattr_get_cache(inode, 1); + if (cache == NULL) + return; + + entry = nfs4_xattr_alloc_entry(name, buf, pages, buflen); + if (entry == NULL) + goto out; + + (void)nfs4_xattr_set_listcache(cache, NULL); + + if (!nfs4_xattr_hash_add(cache, entry)) + kref_put(&entry->ref, nfs4_xattr_free_entry_cb); + +out: + kref_put(&cache->ref, nfs4_xattr_free_cache_cb); +} + + +/* + * Remove an xattr from the cache. + * + * This also invalidates the xattr list cache. + */ +void nfs4_xattr_cache_remove(struct inode *inode, const char *name) +{ + struct nfs4_xattr_cache *cache; + + dprintk("%s: remove '%s'\n", __func__, name); + + cache = nfs4_xattr_get_cache(inode, 0); + if (cache == NULL) + return; + + (void)nfs4_xattr_set_listcache(cache, NULL); + nfs4_xattr_hash_remove(cache, name); + + kref_put(&cache->ref, nfs4_xattr_free_cache_cb); +} + +/* + * Cache listxattr output, replacing any possible old one. + */ +void nfs4_xattr_cache_set_list(struct inode *inode, const char *buf, + ssize_t buflen) +{ + struct nfs4_xattr_cache *cache; + struct nfs4_xattr_entry *entry; + + cache = nfs4_xattr_get_cache(inode, 1); + if (cache == NULL) + return; + + entry = nfs4_xattr_alloc_entry(NULL, buf, NULL, buflen); + if (entry == NULL) + goto out; + + /* + * This is just there to be able to get to bucket->cache, + * which is obviously the same for all buckets, so just + * use bucket 0. + */ + entry->bucket = &cache->buckets[0]; + + if (!nfs4_xattr_set_listcache(cache, entry)) + kref_put(&entry->ref, nfs4_xattr_free_entry_cb); + +out: + kref_put(&cache->ref, nfs4_xattr_free_cache_cb); +} + +/* + * Zap the entire cache. Called when an inode is evicted. + */ +void nfs4_xattr_cache_zap(struct inode *inode) +{ + struct nfs4_xattr_cache *oldcache; + + spin_lock(&inode->i_lock); + oldcache = nfs4_xattr_cache_unlink(inode); + spin_unlock(&inode->i_lock); + + if (oldcache) + nfs4_xattr_discard_cache(oldcache); +} + +/* + * The entry LRU is shrunk more aggressively than the cache LRU, + * by settings @seeks to 1. + * + * Cache structures are freed only when they've become empty, after + * pruning all but one entry. + */ + +static unsigned long nfs4_xattr_cache_count(struct shrinker *shrink, + struct shrink_control *sc); +static unsigned long nfs4_xattr_entry_count(struct shrinker *shrink, + struct shrink_control *sc); +static unsigned long nfs4_xattr_cache_scan(struct shrinker *shrink, + struct shrink_control *sc); +static unsigned long nfs4_xattr_entry_scan(struct shrinker *shrink, + struct shrink_control *sc); + +static struct shrinker nfs4_xattr_cache_shrinker = { + .count_objects = nfs4_xattr_cache_count, + .scan_objects = nfs4_xattr_cache_scan, + .seeks = DEFAULT_SEEKS, + .flags = SHRINKER_MEMCG_AWARE, +}; + +static struct shrinker nfs4_xattr_entry_shrinker = { + .count_objects = nfs4_xattr_entry_count, + .scan_objects = nfs4_xattr_entry_scan, + .seeks = DEFAULT_SEEKS, + .batch = 512, + .flags = SHRINKER_MEMCG_AWARE, +}; + +static struct shrinker nfs4_xattr_large_entry_shrinker = { + .count_objects = nfs4_xattr_entry_count, + .scan_objects = nfs4_xattr_entry_scan, + .seeks = 1, + .batch = 512, + .flags = SHRINKER_MEMCG_AWARE, +}; + +static enum lru_status +cache_lru_isolate(struct list_head *item, + struct list_lru_one *lru, spinlock_t *lru_lock, void *arg) +{ + struct list_head *dispose = arg; + struct inode *inode; + struct nfs4_xattr_cache *cache = container_of(item, + struct nfs4_xattr_cache, lru); + + if (atomic_long_read(&cache->nent) > 1) + return LRU_SKIP; + + /* + * If a cache structure is on the LRU list, we know that + * its inode is valid. Try to lock it to break the link. + * Since we're inverting the lock order here, only try. + */ + inode = cache->inode; + + if (!spin_trylock(&inode->i_lock)) + return LRU_SKIP; + + kref_get(&cache->ref); + + cache->inode = NULL; + NFS_I(inode)->xattr_cache = NULL; + NFS_I(inode)->cache_validity &= ~NFS_INO_INVALID_XATTR; + list_lru_isolate(lru, &cache->lru); + + spin_unlock(&inode->i_lock); + + list_add_tail(&cache->dispose, dispose); + return LRU_REMOVED; +} + +static unsigned long +nfs4_xattr_cache_scan(struct shrinker *shrink, struct shrink_control *sc) +{ + LIST_HEAD(dispose); + unsigned long freed; + struct nfs4_xattr_cache *cache; + + freed = list_lru_shrink_walk(&nfs4_xattr_cache_lru, sc, + cache_lru_isolate, &dispose); + while (!list_empty(&dispose)) { + cache = list_first_entry(&dispose, struct nfs4_xattr_cache, + dispose); + list_del_init(&cache->dispose); + nfs4_xattr_discard_cache(cache); + kref_put(&cache->ref, nfs4_xattr_free_cache_cb); + } + + return freed; +} + + +static unsigned long +nfs4_xattr_cache_count(struct shrinker *shrink, struct shrink_control *sc) +{ + unsigned long count; + + count = list_lru_count(&nfs4_xattr_cache_lru); + return vfs_pressure_ratio(count); +} + +static enum lru_status +entry_lru_isolate(struct list_head *item, + struct list_lru_one *lru, spinlock_t *lru_lock, void *arg) +{ + struct list_head *dispose = arg; + struct nfs4_xattr_bucket *bucket; + struct nfs4_xattr_cache *cache; + struct nfs4_xattr_entry *entry = container_of(item, + struct nfs4_xattr_entry, lru); + + bucket = entry->bucket; + cache = bucket->cache; + + /* + * Unhook the entry from its parent (either a cache bucket + * or a cache structure if it's a listxattr buf), so that + * it's no longer found. Then add it to the isolate list, + * to be freed later. + * + * In both cases, we're reverting lock order, so use + * trylock and skip the entry if we can't get the lock. + */ + if (entry->xattr_name != NULL) { + /* Regular cache entry */ + if (!spin_trylock(&bucket->lock)) + return LRU_SKIP; + + kref_get(&entry->ref); + + hlist_del_init(&entry->hnode); + atomic_long_dec(&cache->nent); + list_lru_isolate(lru, &entry->lru); + + spin_unlock(&bucket->lock); + } else { + /* Listxattr cache entry */ + if (!spin_trylock(&cache->listxattr_lock)) + return LRU_SKIP; + + kref_get(&entry->ref); + + cache->listxattr = NULL; + list_lru_isolate(lru, &entry->lru); + + spin_unlock(&cache->listxattr_lock); + } + + list_add_tail(&entry->dispose, dispose); + return LRU_REMOVED; +} + +static unsigned long +nfs4_xattr_entry_scan(struct shrinker *shrink, struct shrink_control *sc) +{ + LIST_HEAD(dispose); + unsigned long freed; + struct nfs4_xattr_entry *entry; + struct list_lru *lru; + + lru = (shrink == &nfs4_xattr_large_entry_shrinker) ? + &nfs4_xattr_large_entry_lru : &nfs4_xattr_entry_lru; + + freed = list_lru_shrink_walk(lru, sc, entry_lru_isolate, &dispose); + + while (!list_empty(&dispose)) { + entry = list_first_entry(&dispose, struct nfs4_xattr_entry, + dispose); + list_del_init(&entry->dispose); + + /* + * Drop two references: the one that we just grabbed + * in entry_lru_isolate, and the one that was set + * when the entry was first allocated. + */ + kref_put(&entry->ref, nfs4_xattr_free_entry_cb); + kref_put(&entry->ref, nfs4_xattr_free_entry_cb); + } + + return freed; +} + +static unsigned long +nfs4_xattr_entry_count(struct shrinker *shrink, struct shrink_control *sc) +{ + unsigned long count; + struct list_lru *lru; + + lru = (shrink == &nfs4_xattr_large_entry_shrinker) ? + &nfs4_xattr_large_entry_lru : &nfs4_xattr_entry_lru; + + count = list_lru_count(lru); + return vfs_pressure_ratio(count); +} + + +static void nfs4_xattr_cache_init_once(void *p) +{ + struct nfs4_xattr_cache *cache = (struct nfs4_xattr_cache *)p; + + spin_lock_init(&cache->listxattr_lock); + atomic_long_set(&cache->nent, 0); + nfs4_xattr_hash_init(cache); + cache->listxattr = NULL; + INIT_LIST_HEAD(&cache->lru); + INIT_LIST_HEAD(&cache->dispose); +} + +int __init nfs4_xattr_cache_init(void) +{ + int ret = 0; + + nfs4_xattr_cache_cachep = kmem_cache_create("nfs4_xattr_cache_cache", + sizeof(struct nfs4_xattr_cache), 0, + (SLAB_RECLAIM_ACCOUNT|SLAB_MEM_SPREAD|SLAB_ACCOUNT), + nfs4_xattr_cache_init_once); + if (nfs4_xattr_cache_cachep == NULL) + return -ENOMEM; + + ret = list_lru_init_memcg(&nfs4_xattr_large_entry_lru, + &nfs4_xattr_large_entry_shrinker); + if (ret) + goto out4; + + ret = list_lru_init_memcg(&nfs4_xattr_entry_lru, + &nfs4_xattr_entry_shrinker); + if (ret) + goto out3; + + ret = list_lru_init_memcg(&nfs4_xattr_cache_lru, + &nfs4_xattr_cache_shrinker); + if (ret) + goto out2; + + ret = register_shrinker(&nfs4_xattr_cache_shrinker); + if (ret) + goto out1; + + ret = register_shrinker(&nfs4_xattr_entry_shrinker); + if (ret) + goto out; + + ret = register_shrinker(&nfs4_xattr_large_entry_shrinker); + if (!ret) + return 0; + + unregister_shrinker(&nfs4_xattr_entry_shrinker); +out: + unregister_shrinker(&nfs4_xattr_cache_shrinker); +out1: + list_lru_destroy(&nfs4_xattr_cache_lru); +out2: + list_lru_destroy(&nfs4_xattr_entry_lru); +out3: + list_lru_destroy(&nfs4_xattr_large_entry_lru); +out4: + kmem_cache_destroy(nfs4_xattr_cache_cachep); + + return ret; +} + +void nfs4_xattr_cache_exit(void) +{ + unregister_shrinker(&nfs4_xattr_entry_shrinker); + unregister_shrinker(&nfs4_xattr_cache_shrinker); + list_lru_destroy(&nfs4_xattr_entry_lru); + list_lru_destroy(&nfs4_xattr_cache_lru); + kmem_cache_destroy(nfs4_xattr_cache_cachep); +} diff --git a/fs/nfs/nfs42xdr.c b/fs/nfs/nfs42xdr.c index c03f3246d6c5..cc50085e151c 100644 --- a/fs/nfs/nfs42xdr.c +++ b/fs/nfs/nfs42xdr.c @@ -169,6 +169,78 @@ decode_clone_maxsz + \ decode_getattr_maxsz) +/* Not limited by NFS itself, limited by the generic xattr code */ +#define nfs4_xattr_name_maxsz XDR_QUADLEN(XATTR_NAME_MAX) + +#define encode_getxattr_maxsz (op_encode_hdr_maxsz + 1 + \ + nfs4_xattr_name_maxsz) +#define decode_getxattr_maxsz (op_decode_hdr_maxsz + 1 + 1) +#define encode_setxattr_maxsz (op_encode_hdr_maxsz + \ + 1 + nfs4_xattr_name_maxsz + 1) +#define decode_setxattr_maxsz (op_decode_hdr_maxsz + decode_change_info_maxsz) +#define encode_listxattrs_maxsz (op_encode_hdr_maxsz + 2 + 1) +#define decode_listxattrs_maxsz (op_decode_hdr_maxsz + 2 + 1 + 1) +#define encode_removexattr_maxsz (op_encode_hdr_maxsz + 1 + \ + nfs4_xattr_name_maxsz) +#define decode_removexattr_maxsz (op_decode_hdr_maxsz + \ + decode_change_info_maxsz) + +#define NFS4_enc_getxattr_sz (compound_encode_hdr_maxsz + \ + encode_sequence_maxsz + \ + encode_putfh_maxsz + \ + encode_getxattr_maxsz) +#define NFS4_dec_getxattr_sz (compound_decode_hdr_maxsz + \ + decode_sequence_maxsz + \ + decode_putfh_maxsz + \ + decode_getxattr_maxsz) +#define NFS4_enc_setxattr_sz (compound_encode_hdr_maxsz + \ + encode_sequence_maxsz + \ + encode_putfh_maxsz + \ + encode_setxattr_maxsz) +#define NFS4_dec_setxattr_sz (compound_decode_hdr_maxsz + \ + decode_sequence_maxsz + \ + decode_putfh_maxsz + \ + decode_setxattr_maxsz) +#define NFS4_enc_listxattrs_sz (compound_encode_hdr_maxsz + \ + encode_sequence_maxsz + \ + encode_putfh_maxsz + \ + encode_listxattrs_maxsz) +#define NFS4_dec_listxattrs_sz (compound_decode_hdr_maxsz + \ + decode_sequence_maxsz + \ + decode_putfh_maxsz + \ + decode_listxattrs_maxsz) +#define NFS4_enc_removexattr_sz (compound_encode_hdr_maxsz + \ + encode_sequence_maxsz + \ + encode_putfh_maxsz + \ + encode_removexattr_maxsz) +#define NFS4_dec_removexattr_sz (compound_decode_hdr_maxsz + \ + decode_sequence_maxsz + \ + decode_putfh_maxsz + \ + decode_removexattr_maxsz) + +/* + * These values specify the maximum amount of data that is not + * associated with the extended attribute name or extended + * attribute list in the SETXATTR, GETXATTR and LISTXATTR + * respectively. + */ +const u32 nfs42_maxsetxattr_overhead = ((RPC_MAX_HEADER_WITH_AUTH + + compound_encode_hdr_maxsz + + encode_sequence_maxsz + + encode_putfh_maxsz + 1 + + nfs4_xattr_name_maxsz) + * XDR_UNIT); + +const u32 nfs42_maxgetxattr_overhead = ((RPC_MAX_HEADER_WITH_AUTH + + compound_decode_hdr_maxsz + + decode_sequence_maxsz + + decode_putfh_maxsz + 1) * XDR_UNIT); + +const u32 nfs42_maxlistxattrs_overhead = ((RPC_MAX_HEADER_WITH_AUTH + + compound_decode_hdr_maxsz + + decode_sequence_maxsz + + decode_putfh_maxsz + 3) * XDR_UNIT); + static void encode_fallocate(struct xdr_stream *xdr, const struct nfs42_falloc_args *args) { @@ -333,6 +405,210 @@ static void encode_layouterror(struct xdr_stream *xdr, encode_device_error(xdr, &args->errors[0]); } +static void encode_setxattr(struct xdr_stream *xdr, + const struct nfs42_setxattrargs *arg, + struct compound_hdr *hdr) +{ + __be32 *p; + + BUILD_BUG_ON(XATTR_CREATE != SETXATTR4_CREATE); + BUILD_BUG_ON(XATTR_REPLACE != SETXATTR4_REPLACE); + + encode_op_hdr(xdr, OP_SETXATTR, decode_setxattr_maxsz, hdr); + p = reserve_space(xdr, 4); + *p = cpu_to_be32(arg->xattr_flags); + encode_string(xdr, strlen(arg->xattr_name), arg->xattr_name); + p = reserve_space(xdr, 4); + *p = cpu_to_be32(arg->xattr_len); + if (arg->xattr_len) + xdr_write_pages(xdr, arg->xattr_pages, 0, arg->xattr_len); +} + +static int decode_setxattr(struct xdr_stream *xdr, + struct nfs4_change_info *cinfo) +{ + int status; + + status = decode_op_hdr(xdr, OP_SETXATTR); + if (status) + goto out; + status = decode_change_info(xdr, cinfo); +out: + return status; +} + + +static void encode_getxattr(struct xdr_stream *xdr, const char *name, + struct compound_hdr *hdr) +{ + encode_op_hdr(xdr, OP_GETXATTR, decode_getxattr_maxsz, hdr); + encode_string(xdr, strlen(name), name); +} + +static int decode_getxattr(struct xdr_stream *xdr, + struct nfs42_getxattrres *res, + struct rpc_rqst *req) +{ + int status; + __be32 *p; + u32 len, rdlen; + + status = decode_op_hdr(xdr, OP_GETXATTR); + if (status) + return status; + + p = xdr_inline_decode(xdr, 4); + if (unlikely(!p)) + return -EIO; + + len = be32_to_cpup(p); + if (len > req->rq_rcv_buf.page_len) + return -ERANGE; + + res->xattr_len = len; + + if (len > 0) { + rdlen = xdr_read_pages(xdr, len); + if (rdlen < len) + return -EIO; + } + + return 0; +} + +static void encode_removexattr(struct xdr_stream *xdr, const char *name, + struct compound_hdr *hdr) +{ + encode_op_hdr(xdr, OP_REMOVEXATTR, decode_removexattr_maxsz, hdr); + encode_string(xdr, strlen(name), name); +} + + +static int decode_removexattr(struct xdr_stream *xdr, + struct nfs4_change_info *cinfo) +{ + int status; + + status = decode_op_hdr(xdr, OP_REMOVEXATTR); + if (status) + goto out; + + status = decode_change_info(xdr, cinfo); +out: + return status; +} + +static void encode_listxattrs(struct xdr_stream *xdr, + const struct nfs42_listxattrsargs *arg, + struct compound_hdr *hdr) +{ + __be32 *p; + + encode_op_hdr(xdr, OP_LISTXATTRS, decode_listxattrs_maxsz + 1, hdr); + + p = reserve_space(xdr, 12); + if (unlikely(!p)) + return; + + p = xdr_encode_hyper(p, arg->cookie); + /* + * RFC 8276 says to specify the full max length of the LISTXATTRS + * XDR reply. Count is set to the XDR length of the names array + * plus the EOF marker. So, add the cookie and the names count. + */ + *p = cpu_to_be32(arg->count + 8 + 4); +} + +static int decode_listxattrs(struct xdr_stream *xdr, + struct nfs42_listxattrsres *res) +{ + int status; + __be32 *p; + u32 count, len, ulen; + size_t left, copied; + char *buf; + + status = decode_op_hdr(xdr, OP_LISTXATTRS); + if (status) { + /* + * Special case: for LISTXATTRS, NFS4ERR_TOOSMALL + * should be translated to ERANGE. + */ + if (status == -ETOOSMALL) + status = -ERANGE; + goto out; + } + + p = xdr_inline_decode(xdr, 8); + if (unlikely(!p)) + return -EIO; + + xdr_decode_hyper(p, &res->cookie); + + p = xdr_inline_decode(xdr, 4); + if (unlikely(!p)) + return -EIO; + + left = res->xattr_len; + buf = res->xattr_buf; + + count = be32_to_cpup(p); + copied = 0; + + /* + * We have asked for enough room to encode the maximum number + * of possible attribute names, so everything should fit. + * + * But, don't rely on that assumption. Just decode entries + * until they don't fit anymore, just in case the server did + * something odd. + */ + while (count--) { + p = xdr_inline_decode(xdr, 4); + if (unlikely(!p)) + return -EIO; + + len = be32_to_cpup(p); + if (len > (XATTR_NAME_MAX - XATTR_USER_PREFIX_LEN)) { + status = -ERANGE; + goto out; + } + + p = xdr_inline_decode(xdr, len); + if (unlikely(!p)) + return -EIO; + + ulen = len + XATTR_USER_PREFIX_LEN + 1; + if (buf) { + if (ulen > left) { + status = -ERANGE; + goto out; + } + + memcpy(buf, XATTR_USER_PREFIX, XATTR_USER_PREFIX_LEN); + memcpy(buf + XATTR_USER_PREFIX_LEN, p, len); + + buf[ulen - 1] = 0; + buf += ulen; + left -= ulen; + } + copied += ulen; + } + + p = xdr_inline_decode(xdr, 4); + if (unlikely(!p)) + return -EIO; + + res->eof = be32_to_cpup(p); + res->copied = copied; + +out: + if (status == -ERANGE && res->xattr_len == XATTR_LIST_MAX) + status = -E2BIG; + + return status; +} + /* * Encode ALLOCATE request */ @@ -988,4 +1264,166 @@ out: return status; } +#ifdef CONFIG_NFS_V4_2 +static void nfs4_xdr_enc_setxattr(struct rpc_rqst *req, struct xdr_stream *xdr, + const void *data) +{ + const struct nfs42_setxattrargs *args = data; + struct compound_hdr hdr = { + .minorversion = nfs4_xdr_minorversion(&args->seq_args), + }; + + encode_compound_hdr(xdr, req, &hdr); + encode_sequence(xdr, &args->seq_args, &hdr); + encode_putfh(xdr, args->fh, &hdr); + encode_setxattr(xdr, args, &hdr); + encode_nops(&hdr); +} + +static int nfs4_xdr_dec_setxattr(struct rpc_rqst *req, struct xdr_stream *xdr, + void *data) +{ + struct nfs42_setxattrres *res = data; + struct compound_hdr hdr; + int status; + + status = decode_compound_hdr(xdr, &hdr); + if (status) + goto out; + status = decode_sequence(xdr, &res->seq_res, req); + if (status) + goto out; + status = decode_putfh(xdr); + if (status) + goto out; + + status = decode_setxattr(xdr, &res->cinfo); +out: + return status; +} + +static void nfs4_xdr_enc_getxattr(struct rpc_rqst *req, struct xdr_stream *xdr, + const void *data) +{ + const struct nfs42_getxattrargs *args = data; + struct compound_hdr hdr = { + .minorversion = nfs4_xdr_minorversion(&args->seq_args), + }; + size_t plen; + + encode_compound_hdr(xdr, req, &hdr); + encode_sequence(xdr, &args->seq_args, &hdr); + encode_putfh(xdr, args->fh, &hdr); + encode_getxattr(xdr, args->xattr_name, &hdr); + + plen = args->xattr_len ? args->xattr_len : XATTR_SIZE_MAX; + + rpc_prepare_reply_pages(req, args->xattr_pages, 0, plen, + hdr.replen); + req->rq_rcv_buf.flags |= XDRBUF_SPARSE_PAGES; + + encode_nops(&hdr); +} + +static int nfs4_xdr_dec_getxattr(struct rpc_rqst *rqstp, + struct xdr_stream *xdr, void *data) +{ + struct nfs42_getxattrres *res = data; + struct compound_hdr hdr; + int status; + + status = decode_compound_hdr(xdr, &hdr); + if (status) + goto out; + status = decode_sequence(xdr, &res->seq_res, rqstp); + if (status) + goto out; + status = decode_putfh(xdr); + if (status) + goto out; + status = decode_getxattr(xdr, res, rqstp); +out: + return status; +} + +static void nfs4_xdr_enc_listxattrs(struct rpc_rqst *req, + struct xdr_stream *xdr, const void *data) +{ + const struct nfs42_listxattrsargs *args = data; + struct compound_hdr hdr = { + .minorversion = nfs4_xdr_minorversion(&args->seq_args), + }; + + encode_compound_hdr(xdr, req, &hdr); + encode_sequence(xdr, &args->seq_args, &hdr); + encode_putfh(xdr, args->fh, &hdr); + encode_listxattrs(xdr, args, &hdr); + + rpc_prepare_reply_pages(req, args->xattr_pages, 0, args->count, + hdr.replen); + req->rq_rcv_buf.flags |= XDRBUF_SPARSE_PAGES; + + encode_nops(&hdr); +} + +static int nfs4_xdr_dec_listxattrs(struct rpc_rqst *rqstp, + struct xdr_stream *xdr, void *data) +{ + struct nfs42_listxattrsres *res = data; + struct compound_hdr hdr; + int status; + + xdr_set_scratch_buffer(xdr, page_address(res->scratch), PAGE_SIZE); + + status = decode_compound_hdr(xdr, &hdr); + if (status) + goto out; + status = decode_sequence(xdr, &res->seq_res, rqstp); + if (status) + goto out; + status = decode_putfh(xdr); + if (status) + goto out; + status = decode_listxattrs(xdr, res); +out: + return status; +} + +static void nfs4_xdr_enc_removexattr(struct rpc_rqst *req, + struct xdr_stream *xdr, const void *data) +{ + const struct nfs42_removexattrargs *args = data; + struct compound_hdr hdr = { + .minorversion = nfs4_xdr_minorversion(&args->seq_args), + }; + + encode_compound_hdr(xdr, req, &hdr); + encode_sequence(xdr, &args->seq_args, &hdr); + encode_putfh(xdr, args->fh, &hdr); + encode_removexattr(xdr, args->xattr_name, &hdr); + encode_nops(&hdr); +} + +static int nfs4_xdr_dec_removexattr(struct rpc_rqst *req, + struct xdr_stream *xdr, void *data) +{ + struct nfs42_removexattrres *res = data; + struct compound_hdr hdr; + int status; + + status = decode_compound_hdr(xdr, &hdr); + if (status) + goto out; + status = decode_sequence(xdr, &res->seq_res, req); + if (status) + goto out; + status = decode_putfh(xdr); + if (status) + goto out; + + status = decode_removexattr(xdr, &res->cinfo); +out: + return status; +} +#endif #endif /* __LINUX_FS_NFS_NFS4_2XDR_H */ diff --git a/fs/nfs/nfs4_fs.h b/fs/nfs/nfs4_fs.h index 2b7f6dcd2eb8..0c9505dc852c 100644 --- a/fs/nfs/nfs4_fs.h +++ b/fs/nfs/nfs4_fs.h @@ -117,7 +117,7 @@ struct nfs4_state_owner { unsigned long so_flags; struct list_head so_states; struct nfs_seqid_counter so_seqid; - seqcount_t so_reclaim_seqcount; + seqcount_spinlock_t so_reclaim_seqcount; struct mutex so_delegreturn_mutex; }; @@ -324,6 +324,13 @@ extern int update_open_stateid(struct nfs4_state *state, extern int nfs4_proc_get_lease_time(struct nfs_client *clp, struct nfs_fsinfo *fsinfo); +extern void nfs4_update_changeattr(struct inode *dir, + struct nfs4_change_info *cinfo, + unsigned long timestamp, + unsigned long cache_validity); +extern int nfs4_buf_to_pages_noslab(const void *buf, size_t buflen, + struct page **pages); + #if defined(CONFIG_NFS_V4_1) extern int nfs41_sequence_done(struct rpc_task *, struct nfs4_sequence_res *); extern int nfs4_proc_create_session(struct nfs_client *, const struct cred *); @@ -557,6 +564,12 @@ static inline void nfs4_unregister_sysctl(void) /* nfs4xdr.c */ extern const struct rpc_procinfo nfs4_procedures[]; +#ifdef CONFIG_NFS_V4_2 +extern const u32 nfs42_maxsetxattr_overhead; +extern const u32 nfs42_maxgetxattr_overhead; +extern const u32 nfs42_maxlistxattrs_overhead; +#endif + struct nfs4_mount_data; /* callback_xdr.c */ @@ -613,12 +626,34 @@ static inline bool nfs4_state_match_open_stateid_other(const struct nfs4_state * nfs4_stateid_match_other(&state->open_stateid, stateid); } +/* nfs42xattr.c */ +#ifdef CONFIG_NFS_V4_2 +extern int __init nfs4_xattr_cache_init(void); +extern void nfs4_xattr_cache_exit(void); +extern void nfs4_xattr_cache_add(struct inode *inode, const char *name, + const char *buf, struct page **pages, + ssize_t buflen); +extern void nfs4_xattr_cache_remove(struct inode *inode, const char *name); +extern ssize_t nfs4_xattr_cache_get(struct inode *inode, const char *name, + char *buf, ssize_t buflen); +extern void nfs4_xattr_cache_set_list(struct inode *inode, const char *buf, + ssize_t buflen); +extern ssize_t nfs4_xattr_cache_list(struct inode *inode, char *buf, + ssize_t buflen); +extern void nfs4_xattr_cache_zap(struct inode *inode); #else +static inline void nfs4_xattr_cache_zap(struct inode *inode) +{ +} +#endif /* CONFIG_NFS_V4_2 */ + +#else /* CONFIG_NFS_V4 */ #define nfs4_close_state(a, b) do { } while (0) #define nfs4_close_sync(a, b) do { } while (0) #define nfs4_state_protect(a, b, c, d) do { } while (0) #define nfs4_state_protect_write(a, b, c, d) do { } while (0) + #endif /* CONFIG_NFS_V4 */ #endif /* __LINUX_FS_NFS_NFS4_FS.H */ diff --git a/fs/nfs/nfs4client.c b/fs/nfs/nfs4client.c index 0bd77cc1f639..daacc78a3d48 100644 --- a/fs/nfs/nfs4client.c +++ b/fs/nfs/nfs4client.c @@ -880,7 +880,7 @@ static int nfs4_set_client(struct nfs_server *server, if (minorversion == 0) __set_bit(NFS_CS_REUSEPORT, &cl_init.init_flags); - else if (proto == XPRT_TRANSPORT_TCP) + if (proto == XPRT_TRANSPORT_TCP) cl_init.nconnect = nconnect; if (server->flags & NFS_MOUNT_NORESVPORT) @@ -992,6 +992,36 @@ static void nfs4_session_limit_rwsize(struct nfs_server *server) #endif /* CONFIG_NFS_V4_1 */ } +/* + * Limit xattr sizes using the channel attributes. + */ +static void nfs4_session_limit_xasize(struct nfs_server *server) +{ +#ifdef CONFIG_NFS_V4_2 + struct nfs4_session *sess; + u32 server_gxa_sz; + u32 server_sxa_sz; + u32 server_lxa_sz; + + if (!nfs4_has_session(server->nfs_client)) + return; + + sess = server->nfs_client->cl_session; + + server_gxa_sz = sess->fc_attrs.max_resp_sz - nfs42_maxgetxattr_overhead; + server_sxa_sz = sess->fc_attrs.max_rqst_sz - nfs42_maxsetxattr_overhead; + server_lxa_sz = sess->fc_attrs.max_resp_sz - + nfs42_maxlistxattrs_overhead; + + if (server->gxasize > server_gxa_sz) + server->gxasize = server_gxa_sz; + if (server->sxasize > server_sxa_sz) + server->sxasize = server_sxa_sz; + if (server->lxasize > server_lxa_sz) + server->lxasize = server_lxa_sz; +#endif +} + static int nfs4_server_common_setup(struct nfs_server *server, struct nfs_fh *mntfh, bool auth_probe) { @@ -1039,6 +1069,7 @@ static int nfs4_server_common_setup(struct nfs_server *server, goto out; nfs4_session_limit_rwsize(server); + nfs4_session_limit_xasize(server); if (server->namelen == 0 || server->namelen > NFS4_MAXNAMLEN) server->namelen = NFS4_MAXNAMLEN; diff --git a/fs/nfs/nfs4file.c b/fs/nfs/nfs4file.c index 8e5d6223ddd3..fdfc77486ace 100644 --- a/fs/nfs/nfs4file.c +++ b/fs/nfs/nfs4file.c @@ -110,6 +110,7 @@ static int nfs4_file_flush(struct file *file, fl_owner_t id) { struct inode *inode = file_inode(file); + errseq_t since; dprintk("NFS: flush(%pD2)\n", file); @@ -125,7 +126,9 @@ nfs4_file_flush(struct file *file, fl_owner_t id) return filemap_fdatawrite(file->f_mapping); /* Flush writes to the server and return any errors */ - return nfs_wb_all(inode); + since = filemap_sample_wb_err(file->f_mapping); + nfs_wb_all(inode); + return filemap_check_wb_err(file->f_mapping, since); } #ifdef CONFIG_NFS_V4_2 @@ -208,7 +211,7 @@ static loff_t nfs4_file_llseek(struct file *filep, loff_t offset, int whence) ret = nfs42_proc_llseek(filep, offset, whence); if (ret != -ENOTSUPP) return ret; - /* Fall through */ + fallthrough; default: return nfs_file_llseek(filep, offset, whence); } diff --git a/fs/nfs/nfs4idmap.c b/fs/nfs/nfs4idmap.c index 1e7296395d71..62e6eea5c516 100644 --- a/fs/nfs/nfs4idmap.c +++ b/fs/nfs/nfs4idmap.c @@ -520,7 +520,7 @@ static int nfs_idmap_prepare_message(char *desc, struct idmap *idmap, switch (token) { case Opt_find_uid: im->im_type = IDMAP_TYPE_USER; - /* Fall through */ + fallthrough; case Opt_find_gid: im->im_conv = IDMAP_CONV_NAMETOID; ret = match_strlcpy(im->im_name, &substr, IDMAP_NAMESZ); @@ -528,7 +528,7 @@ static int nfs_idmap_prepare_message(char *desc, struct idmap *idmap, case Opt_find_user: im->im_type = IDMAP_TYPE_USER; - /* Fall through */ + fallthrough; case Opt_find_group: im->im_conv = IDMAP_CONV_IDTONAME; ret = match_int(&substr, &im->im_id); diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c index 2e2dac29a9e9..6e95c85fe395 100644 --- a/fs/nfs/nfs4proc.c +++ b/fs/nfs/nfs4proc.c @@ -66,6 +66,7 @@ #include "nfs4idmap.h" #include "nfs4session.h" #include "fscache.h" +#include "nfs42.h" #include "nfs4trace.h" @@ -256,6 +257,7 @@ const u32 nfs4_fsinfo_bitmap[3] = { FATTR4_WORD0_MAXFILESIZE | FATTR4_WORD1_FS_LAYOUT_TYPES, FATTR4_WORD2_LAYOUT_BLKSIZE | FATTR4_WORD2_CLONE_BLKSIZE + | FATTR4_WORD2_XATTR_SUPPORT }; const u32 nfs4_fs_locations_bitmap[3] = { @@ -414,7 +416,7 @@ static int nfs4_delay_interruptible(long *timeout) { might_sleep(); - freezable_schedule_timeout_interruptible(nfs4_update_delay(timeout)); + freezable_schedule_timeout_interruptible_unsafe(nfs4_update_delay(timeout)); if (!signal_pending(current)) return 0; return __fatal_signal_pending(current) ? -EINTR :-ERESTARTSYS; @@ -481,7 +483,7 @@ static int nfs4_do_handle_exception(struct nfs_server *server, stateid); goto wait_on_recovery; } - /* Fall through */ + fallthrough; case -NFS4ERR_OPENMODE: if (inode) { int err; @@ -532,10 +534,10 @@ static int nfs4_do_handle_exception(struct nfs_server *server, ret = -EBUSY; break; } - /* Fall through */ + fallthrough; case -NFS4ERR_DELAY: nfs_inc_server_stats(server, NFSIOS_DELAY); - /* Fall through */ + fallthrough; case -NFS4ERR_GRACE: case -NFS4ERR_LAYOUTTRYLATER: case -NFS4ERR_RECALLCONFLICT: @@ -1173,37 +1175,49 @@ nfs4_dec_nlink_locked(struct inode *inode) } static void -update_changeattr_locked(struct inode *dir, struct nfs4_change_info *cinfo, +nfs4_update_changeattr_locked(struct inode *inode, + struct nfs4_change_info *cinfo, unsigned long timestamp, unsigned long cache_validity) { - struct nfs_inode *nfsi = NFS_I(dir); + struct nfs_inode *nfsi = NFS_I(inode); nfsi->cache_validity |= NFS_INO_INVALID_CTIME | NFS_INO_INVALID_MTIME - | NFS_INO_INVALID_DATA | cache_validity; - if (cinfo->atomic && cinfo->before == inode_peek_iversion_raw(dir)) { + + if (cinfo->atomic && cinfo->before == inode_peek_iversion_raw(inode)) { nfsi->cache_validity &= ~NFS_INO_REVAL_PAGECACHE; nfsi->attrtimeo_timestamp = jiffies; } else { - nfs_force_lookup_revalidate(dir); - if (cinfo->before != inode_peek_iversion_raw(dir)) + if (S_ISDIR(inode->i_mode)) { + nfsi->cache_validity |= NFS_INO_INVALID_DATA; + nfs_force_lookup_revalidate(inode); + } else { + if (!NFS_PROTO(inode)->have_delegation(inode, + FMODE_READ)) + nfsi->cache_validity |= NFS_INO_REVAL_PAGECACHE; + } + + if (cinfo->before != inode_peek_iversion_raw(inode)) nfsi->cache_validity |= NFS_INO_INVALID_ACCESS | - NFS_INO_INVALID_ACL; + NFS_INO_INVALID_ACL | + NFS_INO_INVALID_XATTR; } - inode_set_iversion_raw(dir, cinfo->after); + inode_set_iversion_raw(inode, cinfo->after); nfsi->read_cache_jiffies = timestamp; nfsi->attr_gencount = nfs_inc_attr_generation_counter(); nfsi->cache_validity &= ~NFS_INO_INVALID_CHANGE; - nfs_fscache_invalidate(dir); + + if (nfsi->cache_validity & NFS_INO_INVALID_DATA) + nfs_fscache_invalidate(inode); } -static void -update_changeattr(struct inode *dir, struct nfs4_change_info *cinfo, +void +nfs4_update_changeattr(struct inode *dir, struct nfs4_change_info *cinfo, unsigned long timestamp, unsigned long cache_validity) { spin_lock(&dir->i_lock); - update_changeattr_locked(dir, cinfo, timestamp, cache_validity); + nfs4_update_changeattr_locked(dir, cinfo, timestamp, cache_validity); spin_unlock(&dir->i_lock); } @@ -1356,6 +1370,12 @@ static struct nfs4_opendata *nfs4_opendata_alloc(struct dentry *dentry, NFS4_ACCESS_MODIFY | NFS4_ACCESS_EXTEND | NFS4_ACCESS_EXECUTE; +#ifdef CONFIG_NFS_V4_2 + if (server->caps & NFS_CAP_XATTR) + p->o_arg.access |= NFS4_ACCESS_XAREAD | + NFS4_ACCESS_XAWRITE | + NFS4_ACCESS_XALIST; +#endif } } p->o_arg.clientid = server->nfs_client->cl_clientid; @@ -1485,7 +1505,7 @@ static int can_open_delegated(struct nfs_delegation *delegation, fmode_t fmode, case NFS4_OPEN_CLAIM_PREVIOUS: if (!test_bit(NFS_DELEGATION_NEED_RECLAIM, &delegation->flags)) break; - /* Fall through */ + fallthrough; default: return 0; } @@ -2419,7 +2439,7 @@ static void nfs4_open_prepare(struct rpc_task *task, void *calldata) case NFS4_OPEN_CLAIM_DELEG_CUR_FH: case NFS4_OPEN_CLAIM_DELEG_PREV_FH: data->o_arg.open_bitmap = &nfs4_open_noattr_bitmap[0]; - /* Fall through */ + fallthrough; case NFS4_OPEN_CLAIM_FH: task->tk_msg.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_OPEN_NOATTR]; } @@ -2653,8 +2673,9 @@ static int _nfs4_proc_open(struct nfs4_opendata *data, data->file_created = true; if (data->file_created || inode_peek_iversion_raw(dir) != o_res->cinfo.after) - update_changeattr(dir, &o_res->cinfo, - o_res->f_attr->time_start, 0); + nfs4_update_changeattr(dir, &o_res->cinfo, + o_res->f_attr->time_start, + NFS_INO_INVALID_DATA); } if ((o_res->rflags & NFS4_OPEN_RESULT_LOCKTYPE_POSIX) == 0) server->caps &= ~NFS_CAP_POSIX_LOCK; @@ -3272,8 +3293,10 @@ static int _nfs4_do_setattr(struct inode *inode, /* Servers should only apply open mode checks for file size changes */ truncate = (arg->iap->ia_valid & ATTR_SIZE) ? true : false; - if (!truncate) + if (!truncate) { + nfs4_inode_make_writeable(inode); goto zero_stateid; + } if (nfs4_copy_delegation_stateid(inode, FMODE_WRITE, &arg->stateid, &delegation_cred)) { /* Use that stateid */ @@ -3524,11 +3547,11 @@ static void nfs4_close_done(struct rpc_task *task, void *data) nfs4_free_revoked_stateid(server, &calldata->arg.stateid, task->tk_msg.rpc_cred); - /* Fallthrough */ + fallthrough; case -NFS4ERR_BAD_STATEID: if (calldata->arg.fmode == 0) break; - /* Fallthrough */ + fallthrough; default: task->tk_status = nfs4_async_handle_exception(task, server, task->tk_status, &exception); @@ -3756,7 +3779,7 @@ static void nfs4_close_context(struct nfs_open_context *ctx, int is_sync) #define FATTR4_WORD1_NFS40_MASK (2*FATTR4_WORD1_MOUNTED_ON_FILEID - 1UL) #define FATTR4_WORD2_NFS41_MASK (2*FATTR4_WORD2_SUPPATTR_EXCLCREAT - 1UL) -#define FATTR4_WORD2_NFS42_MASK (2*FATTR4_WORD2_MODE_UMASK - 1UL) +#define FATTR4_WORD2_NFS42_MASK (2*FATTR4_WORD2_XATTR_SUPPORT - 1UL) static int _nfs4_server_capabilities(struct nfs_server *server, struct nfs_fh *fhandle) { @@ -4540,7 +4563,8 @@ _nfs4_proc_remove(struct inode *dir, const struct qstr *name, u32 ftype) status = nfs4_call_sync(server->client, server, &msg, &args.seq_args, &res.seq_res, 1); if (status == 0) { spin_lock(&dir->i_lock); - update_changeattr_locked(dir, &res.cinfo, timestamp, 0); + nfs4_update_changeattr_locked(dir, &res.cinfo, timestamp, + NFS_INO_INVALID_DATA); /* Removing a directory decrements nlink in the parent */ if (ftype == NF4DIR && dir->i_nlink > 2) nfs4_dec_nlink_locked(dir); @@ -4624,8 +4648,9 @@ static int nfs4_proc_unlink_done(struct rpc_task *task, struct inode *dir) &data->timeout) == -EAGAIN) return 0; if (task->tk_status == 0) - update_changeattr(dir, &res->cinfo, - res->dir_attr->time_start, 0); + nfs4_update_changeattr(dir, &res->cinfo, + res->dir_attr->time_start, + NFS_INO_INVALID_DATA); return 1; } @@ -4669,16 +4694,18 @@ static int nfs4_proc_rename_done(struct rpc_task *task, struct inode *old_dir, if (task->tk_status == 0) { if (new_dir != old_dir) { /* Note: If we moved a directory, nlink will change */ - update_changeattr(old_dir, &res->old_cinfo, + nfs4_update_changeattr(old_dir, &res->old_cinfo, res->old_fattr->time_start, - NFS_INO_INVALID_OTHER); - update_changeattr(new_dir, &res->new_cinfo, + NFS_INO_INVALID_OTHER | + NFS_INO_INVALID_DATA); + nfs4_update_changeattr(new_dir, &res->new_cinfo, res->new_fattr->time_start, - NFS_INO_INVALID_OTHER); + NFS_INO_INVALID_OTHER | + NFS_INO_INVALID_DATA); } else - update_changeattr(old_dir, &res->old_cinfo, + nfs4_update_changeattr(old_dir, &res->old_cinfo, res->old_fattr->time_start, - 0); + NFS_INO_INVALID_DATA); } return 1; } @@ -4719,7 +4746,8 @@ static int _nfs4_proc_link(struct inode *inode, struct inode *dir, const struct status = nfs4_call_sync(server->client, server, &msg, &arg.seq_args, &res.seq_res, 1); if (!status) { - update_changeattr(dir, &res.cinfo, res.fattr->time_start, 0); + nfs4_update_changeattr(dir, &res.cinfo, res.fattr->time_start, + NFS_INO_INVALID_DATA); status = nfs_post_op_update_inode(inode, res.fattr); if (!status) nfs_setsecurity(inode, res.fattr, res.label); @@ -4797,8 +4825,9 @@ static int nfs4_do_create(struct inode *dir, struct dentry *dentry, struct nfs4_ &data->arg.seq_args, &data->res.seq_res, 1); if (status == 0) { spin_lock(&dir->i_lock); - update_changeattr_locked(dir, &data->res.dir_cinfo, - data->res.fattr->time_start, 0); + nfs4_update_changeattr_locked(dir, &data->res.dir_cinfo, + data->res.fattr->time_start, + NFS_INO_INVALID_DATA); /* Creating a directory bumps nlink in the parent */ if (data->arg.ftype == NF4DIR) nfs4_inc_nlink_locked(dir); @@ -5531,7 +5560,7 @@ static inline int nfs4_server_supports_acls(struct nfs_server *server) */ #define NFS4ACL_MAXPAGES DIV_ROUND_UP(XATTR_SIZE_MAX, PAGE_SIZE) -static int buf_to_pages_noslab(const void *buf, size_t buflen, +int nfs4_buf_to_pages_noslab(const void *buf, size_t buflen, struct page **pages) { struct page *newpage, **spages; @@ -5773,7 +5802,7 @@ static int __nfs4_proc_set_acl(struct inode *inode, const void *buf, size_t bufl return -EOPNOTSUPP; if (npages > ARRAY_SIZE(pages)) return -ERANGE; - i = buf_to_pages_noslab(buf, buflen, arg.acl_pages); + i = nfs4_buf_to_pages_noslab(buf, buflen, arg.acl_pages); if (i < 0) return i; nfs4_inode_make_writeable(inode); @@ -5845,8 +5874,6 @@ static int _nfs4_get_security_label(struct inode *inode, void *buf, return ret; if (!(fattr.valid & NFS_ATTR_FATTR_V4_SECURITY_LABEL)) return -ENOENT; - if (buflen < label.len) - return -ERANGE; return 0; } @@ -6269,7 +6296,7 @@ static void nfs4_delegreturn_done(struct rpc_task *task, void *calldata) nfs4_free_revoked_stateid(data->res.server, data->args.stateid, task->tk_msg.rpc_cred); - /* Fallthrough */ + fallthrough; case -NFS4ERR_BAD_STATEID: case -NFS4ERR_STALE_STATEID: case -ETIMEDOUT: @@ -6289,7 +6316,7 @@ static void nfs4_delegreturn_done(struct rpc_task *task, void *calldata) data->res.fattr = NULL; goto out_restart; } - /* Fallthrough */ + fallthrough; default: task->tk_status = nfs4_async_handle_exception(task, data->res.server, task->tk_status, @@ -6597,13 +6624,13 @@ static void nfs4_locku_done(struct rpc_task *task, void *data) if (nfs4_update_lock_stateid(calldata->lsp, &calldata->res.stateid)) break; - /* Fall through */ + fallthrough; case -NFS4ERR_ADMIN_REVOKED: case -NFS4ERR_EXPIRED: nfs4_free_revoked_stateid(calldata->server, &calldata->arg.stateid, task->tk_msg.rpc_cred); - /* Fall through */ + fallthrough; case -NFS4ERR_BAD_STATEID: case -NFS4ERR_STALE_STATEID: if (nfs4_sync_lock_stateid(&calldata->arg.stateid, @@ -7273,7 +7300,12 @@ int nfs4_lock_delegation_recall(struct file_lock *fl, struct nfs4_state *state, err = nfs4_set_lock_state(state, fl); if (err != 0) return err; - err = _nfs4_do_setlk(state, F_SETLK, fl, NFS_LOCK_NEW); + do { + err = _nfs4_do_setlk(state, F_SETLK, fl, NFS_LOCK_NEW); + if (err != -NFS4ERR_DELAY) + break; + ssleep(1); + } while (err == -NFS4ERR_DELAY); return nfs4_handle_delegation_recall_error(server, state, stateid, fl, err); } @@ -7430,6 +7462,133 @@ nfs4_listxattr_nfs4_label(struct inode *inode, char *list, size_t list_len) #endif +#ifdef CONFIG_NFS_V4_2 +static int nfs4_xattr_set_nfs4_user(const struct xattr_handler *handler, + struct dentry *unused, struct inode *inode, + const char *key, const void *buf, + size_t buflen, int flags) +{ + struct nfs_access_entry cache; + int ret; + + if (!nfs_server_capable(inode, NFS_CAP_XATTR)) + return -EOPNOTSUPP; + + /* + * There is no mapping from the MAY_* flags to the NFS_ACCESS_XA* + * flags right now. Handling of xattr operations use the normal + * file read/write permissions. + * + * Just in case the server has other ideas (which RFC 8276 allows), + * do a cached access check for the XA* flags to possibly avoid + * doing an RPC and getting EACCES back. + */ + if (!nfs_access_get_cached(inode, current_cred(), &cache, true)) { + if (!(cache.mask & NFS_ACCESS_XAWRITE)) + return -EACCES; + } + + if (buf == NULL) { + ret = nfs42_proc_removexattr(inode, key); + if (!ret) + nfs4_xattr_cache_remove(inode, key); + } else { + ret = nfs42_proc_setxattr(inode, key, buf, buflen, flags); + if (!ret) + nfs4_xattr_cache_add(inode, key, buf, NULL, buflen); + } + + return ret; +} + +static int nfs4_xattr_get_nfs4_user(const struct xattr_handler *handler, + struct dentry *unused, struct inode *inode, + const char *key, void *buf, size_t buflen) +{ + struct nfs_access_entry cache; + ssize_t ret; + + if (!nfs_server_capable(inode, NFS_CAP_XATTR)) + return -EOPNOTSUPP; + + if (!nfs_access_get_cached(inode, current_cred(), &cache, true)) { + if (!(cache.mask & NFS_ACCESS_XAREAD)) + return -EACCES; + } + + ret = nfs_revalidate_inode(NFS_SERVER(inode), inode); + if (ret) + return ret; + + ret = nfs4_xattr_cache_get(inode, key, buf, buflen); + if (ret >= 0 || (ret < 0 && ret != -ENOENT)) + return ret; + + ret = nfs42_proc_getxattr(inode, key, buf, buflen); + + return ret; +} + +static ssize_t +nfs4_listxattr_nfs4_user(struct inode *inode, char *list, size_t list_len) +{ + u64 cookie; + bool eof; + ssize_t ret, size; + char *buf; + size_t buflen; + struct nfs_access_entry cache; + + if (!nfs_server_capable(inode, NFS_CAP_XATTR)) + return 0; + + if (!nfs_access_get_cached(inode, current_cred(), &cache, true)) { + if (!(cache.mask & NFS_ACCESS_XALIST)) + return 0; + } + + ret = nfs_revalidate_inode(NFS_SERVER(inode), inode); + if (ret) + return ret; + + ret = nfs4_xattr_cache_list(inode, list, list_len); + if (ret >= 0 || (ret < 0 && ret != -ENOENT)) + return ret; + + cookie = 0; + eof = false; + buflen = list_len ? list_len : XATTR_LIST_MAX; + buf = list_len ? list : NULL; + size = 0; + + while (!eof) { + ret = nfs42_proc_listxattrs(inode, buf, buflen, + &cookie, &eof); + if (ret < 0) + return ret; + + if (list_len) { + buf += ret; + buflen -= ret; + } + size += ret; + } + + if (list_len) + nfs4_xattr_cache_set_list(inode, list, size); + + return size; +} + +#else + +static ssize_t +nfs4_listxattr_nfs4_user(struct inode *inode, char *list, size_t list_len) +{ + return 0; +} +#endif /* CONFIG_NFS_V4_2 */ + /* * nfs_fhget will use either the mounted_on_fileid or the fileid */ @@ -8513,7 +8672,7 @@ static void nfs4_get_lease_time_done(struct rpc_task *task, void *calldata) dprintk("%s Retry: tk_status %d\n", __func__, task->tk_status); rpc_delay(task, NFS4_POLL_RETRY_MIN); task->tk_status = 0; - /* fall through */ + fallthrough; case -NFS4ERR_RETRY_UNCACHED_REP: rpc_restart_call_prepare(task); return; @@ -8961,13 +9120,13 @@ static int nfs41_reclaim_complete_handle_errors(struct rpc_task *task, struct nf switch(task->tk_status) { case 0: wake_up_all(&clp->cl_lock_waitq); - /* Fallthrough */ + fallthrough; case -NFS4ERR_COMPLETE_ALREADY: case -NFS4ERR_WRONG_CRED: /* What to do here? */ break; case -NFS4ERR_DELAY: rpc_delay(task, NFS4_POLL_RETRY_MAX); - /* fall through */ + fallthrough; case -NFS4ERR_RETRY_UNCACHED_REP: return -EAGAIN; case -NFS4ERR_BADSESSION: @@ -9282,10 +9441,10 @@ static void nfs4_layoutreturn_done(struct rpc_task *task, void *calldata) &lrp->args.range, lrp->args.inode)) goto out_restart; - /* Fallthrough */ + fallthrough; default: task->tk_status = 0; - /* Fallthrough */ + fallthrough; case 0: break; case -NFS4ERR_DELAY: @@ -10035,7 +10194,7 @@ const struct nfs4_minor_version_ops *nfs_v4_minor_ops[] = { static ssize_t nfs4_listxattr(struct dentry *dentry, char *list, size_t size) { - ssize_t error, error2; + ssize_t error, error2, error3; error = generic_listxattr(dentry, list, size); if (error < 0) @@ -10048,7 +10207,17 @@ static ssize_t nfs4_listxattr(struct dentry *dentry, char *list, size_t size) error2 = nfs4_listxattr_nfs4_label(d_inode(dentry), list, size); if (error2 < 0) return error2; - return error + error2; + + if (list) { + list += error2; + size -= error2; + } + + error3 = nfs4_listxattr_nfs4_user(d_inode(dentry), list, size); + if (error3 < 0) + return error3; + + return error + error2 + error3; } static const struct inode_operations nfs4_dir_inode_operations = { @@ -10136,11 +10305,22 @@ static const struct xattr_handler nfs4_xattr_nfs4_acl_handler = { .set = nfs4_xattr_set_nfs4_acl, }; +#ifdef CONFIG_NFS_V4_2 +static const struct xattr_handler nfs4_xattr_nfs4_user_handler = { + .prefix = XATTR_USER_PREFIX, + .get = nfs4_xattr_get_nfs4_user, + .set = nfs4_xattr_set_nfs4_user, +}; +#endif + const struct xattr_handler *nfs4_xattr_handlers[] = { &nfs4_xattr_nfs4_acl_handler, #ifdef CONFIG_NFS_V4_SECURITY_LABEL &nfs4_xattr_nfs4_label_handler, #endif +#ifdef CONFIG_NFS_V4_2 + &nfs4_xattr_nfs4_user_handler, +#endif NULL }; diff --git a/fs/nfs/nfs4state.c b/fs/nfs/nfs4state.c index a8dc25ce48bb..4bf10792cb5b 100644 --- a/fs/nfs/nfs4state.c +++ b/fs/nfs/nfs4state.c @@ -509,7 +509,7 @@ nfs4_alloc_state_owner(struct nfs_server *server, nfs4_init_seqid_counter(&sp->so_seqid); atomic_set(&sp->so_count, 1); INIT_LIST_HEAD(&sp->so_lru); - seqcount_init(&sp->so_reclaim_seqcount); + seqcount_spinlock_init(&sp->so_reclaim_seqcount, &sp->so_lock); mutex_init(&sp->so_delegreturn_mutex); return sp; } @@ -1530,7 +1530,7 @@ restart: default: pr_err("NFS: %s: unhandled error %d\n", __func__, status); - /* Fall through */ + fallthrough; case -ENOMEM: case -NFS4ERR_DENIED: case -NFS4ERR_RECLAIM_BAD: @@ -1667,7 +1667,7 @@ restart: break; } printk(KERN_ERR "NFS: %s: unhandled error %d\n", __func__, status); - /* Fall through */ + fallthrough; case -ENOENT: case -ENOMEM: case -EACCES: @@ -1683,7 +1683,7 @@ restart: set_bit(ops->state_flag_bit, &state->flags); break; } - /* Fall through */ + fallthrough; case -NFS4ERR_ADMIN_REVOKED: case -NFS4ERR_STALE_STATEID: case -NFS4ERR_OLD_STATEID: @@ -1695,7 +1695,7 @@ restart: case -NFS4ERR_EXPIRED: case -NFS4ERR_NO_GRACE: nfs4_state_mark_reclaim_nograce(sp->so_server->nfs_client, state); - /* Fall through */ + fallthrough; case -NFS4ERR_STALE_CLIENTID: case -NFS4ERR_BADSESSION: case -NFS4ERR_BADSLOT: @@ -2273,11 +2273,11 @@ again: case -ETIMEDOUT: if (clnt->cl_softrtry) break; - /* Fall through */ + fallthrough; case -NFS4ERR_DELAY: case -EAGAIN: ssleep(1); - /* Fall through */ + fallthrough; case -NFS4ERR_STALE_CLIENTID: dprintk("NFS: %s after status %d, retrying\n", __func__, status); @@ -2289,7 +2289,7 @@ again: } if (clnt->cl_auth->au_flavor == RPC_AUTH_UNIX) break; - /* Fall through */ + fallthrough; case -NFS4ERR_CLID_INUSE: case -NFS4ERR_WRONGSEC: /* No point in retrying if we already used RPC_AUTH_UNIX */ diff --git a/fs/nfs/nfs4super.c b/fs/nfs/nfs4super.c index 1475f932d7da..0c1ab846b83d 100644 --- a/fs/nfs/nfs4super.c +++ b/fs/nfs/nfs4super.c @@ -69,6 +69,7 @@ static void nfs4_evict_inode(struct inode *inode) pnfs_destroy_layout(NFS_I(inode)); /* First call standard NFS clear_inode() code */ nfs_clear_inode(inode); + nfs4_xattr_cache_zap(inode); } struct nfs_referral_count { @@ -268,6 +269,12 @@ static int __init init_nfs_v4(void) if (err) goto out1; +#ifdef CONFIG_NFS_V4_2 + err = nfs4_xattr_cache_init(); + if (err) + goto out2; +#endif + err = nfs4_register_sysctl(); if (err) goto out2; @@ -288,6 +295,9 @@ static void __exit exit_nfs_v4(void) nfs4_pnfs_v3_ds_connect_unload(); unregister_nfs_version(&nfs_v4); +#ifdef CONFIG_NFS_V4_2 + nfs4_xattr_cache_exit(); +#endif nfs4_unregister_sysctl(); nfs_idmap_quit(); nfs_dns_resolver_destroy(); diff --git a/fs/nfs/nfs4trace.h b/fs/nfs/nfs4trace.h index 543541173a3d..b4f852d4d099 100644 --- a/fs/nfs/nfs4trace.h +++ b/fs/nfs/nfs4trace.h @@ -1727,6 +1727,13 @@ DEFINE_NFS4_IDMAP_EVENT(nfs4_map_group_to_gid); DEFINE_NFS4_IDMAP_EVENT(nfs4_map_uid_to_name); DEFINE_NFS4_IDMAP_EVENT(nfs4_map_gid_to_group); +#ifdef CONFIG_NFS_V4_1 +#define NFS4_LSEG_LAYOUT_STATEID_HASH(lseg) \ + (lseg ? nfs_stateid_hash(&lseg->pls_layout->plh_stateid) : 0) +#else +#define NFS4_LSEG_LAYOUT_STATEID_HASH(lseg) (0) +#endif + DECLARE_EVENT_CLASS(nfs4_read_event, TP_PROTO( const struct nfs_pgio_header *hdr, @@ -1745,6 +1752,8 @@ DECLARE_EVENT_CLASS(nfs4_read_event, __field(unsigned long, error) __field(int, stateid_seq) __field(u32, stateid_hash) + __field(int, layoutstateid_seq) + __field(u32, layoutstateid_hash) ), TP_fast_assign( @@ -1754,6 +1763,7 @@ DECLARE_EVENT_CLASS(nfs4_read_event, hdr->args.fh : &nfsi->fh; const struct nfs4_state *state = hdr->args.context->state; + const struct pnfs_layout_segment *lseg = hdr->lseg; __entry->dev = inode->i_sb->s_dev; __entry->fileid = nfsi->fileid; @@ -1766,11 +1776,15 @@ DECLARE_EVENT_CLASS(nfs4_read_event, be32_to_cpu(state->stateid.seqid); __entry->stateid_hash = nfs_stateid_hash(&state->stateid); + __entry->layoutstateid_seq = lseg ? lseg->pls_seq : 0; + __entry->layoutstateid_hash = + NFS4_LSEG_LAYOUT_STATEID_HASH(lseg); ), TP_printk( "error=%ld (%s) fileid=%02x:%02x:%llu fhandle=0x%08x " - "offset=%lld count=%u res=%u stateid=%d:0x%08x", + "offset=%lld count=%u res=%u stateid=%d:0x%08x " + "layoutstateid=%d:0x%08x", -__entry->error, show_nfsv4_errors(__entry->error), MAJOR(__entry->dev), MINOR(__entry->dev), @@ -1778,7 +1792,8 @@ DECLARE_EVENT_CLASS(nfs4_read_event, __entry->fhandle, (long long)__entry->offset, __entry->arg_count, __entry->res_count, - __entry->stateid_seq, __entry->stateid_hash + __entry->stateid_seq, __entry->stateid_hash, + __entry->layoutstateid_seq, __entry->layoutstateid_hash ) ); #define DEFINE_NFS4_READ_EVENT(name) \ @@ -1811,6 +1826,8 @@ DECLARE_EVENT_CLASS(nfs4_write_event, __field(unsigned long, error) __field(int, stateid_seq) __field(u32, stateid_hash) + __field(int, layoutstateid_seq) + __field(u32, layoutstateid_hash) ), TP_fast_assign( @@ -1820,6 +1837,7 @@ DECLARE_EVENT_CLASS(nfs4_write_event, hdr->args.fh : &nfsi->fh; const struct nfs4_state *state = hdr->args.context->state; + const struct pnfs_layout_segment *lseg = hdr->lseg; __entry->dev = inode->i_sb->s_dev; __entry->fileid = nfsi->fileid; @@ -1832,11 +1850,15 @@ DECLARE_EVENT_CLASS(nfs4_write_event, be32_to_cpu(state->stateid.seqid); __entry->stateid_hash = nfs_stateid_hash(&state->stateid); + __entry->layoutstateid_seq = lseg ? lseg->pls_seq : 0; + __entry->layoutstateid_hash = + NFS4_LSEG_LAYOUT_STATEID_HASH(lseg); ), TP_printk( "error=%ld (%s) fileid=%02x:%02x:%llu fhandle=0x%08x " - "offset=%lld count=%u res=%u stateid=%d:0x%08x", + "offset=%lld count=%u res=%u stateid=%d:0x%08x " + "layoutstateid=%d:0x%08x", -__entry->error, show_nfsv4_errors(__entry->error), MAJOR(__entry->dev), MINOR(__entry->dev), @@ -1844,7 +1866,8 @@ DECLARE_EVENT_CLASS(nfs4_write_event, __entry->fhandle, (long long)__entry->offset, __entry->arg_count, __entry->res_count, - __entry->stateid_seq, __entry->stateid_hash + __entry->stateid_seq, __entry->stateid_hash, + __entry->layoutstateid_seq, __entry->layoutstateid_hash ) ); @@ -1875,6 +1898,8 @@ DECLARE_EVENT_CLASS(nfs4_commit_event, __field(unsigned long, error) __field(loff_t, offset) __field(u32, count) + __field(int, layoutstateid_seq) + __field(u32, layoutstateid_hash) ), TP_fast_assign( @@ -1882,6 +1907,7 @@ DECLARE_EVENT_CLASS(nfs4_commit_event, const struct nfs_inode *nfsi = NFS_I(inode); const struct nfs_fh *fh = data->args.fh ? data->args.fh : &nfsi->fh; + const struct pnfs_layout_segment *lseg = data->lseg; __entry->dev = inode->i_sb->s_dev; __entry->fileid = nfsi->fileid; @@ -1889,18 +1915,22 @@ DECLARE_EVENT_CLASS(nfs4_commit_event, __entry->offset = data->args.offset; __entry->count = data->args.count; __entry->error = error < 0 ? -error : 0; + __entry->layoutstateid_seq = lseg ? lseg->pls_seq : 0; + __entry->layoutstateid_hash = + NFS4_LSEG_LAYOUT_STATEID_HASH(lseg); ), TP_printk( "error=%ld (%s) fileid=%02x:%02x:%llu fhandle=0x%08x " - "offset=%lld count=%u", + "offset=%lld count=%u layoutstateid=%d:0x%08x", -__entry->error, show_nfsv4_errors(__entry->error), MAJOR(__entry->dev), MINOR(__entry->dev), (unsigned long long)__entry->fileid, __entry->fhandle, (long long)__entry->offset, - __entry->count + __entry->count, + __entry->layoutstateid_seq, __entry->layoutstateid_hash ) ); #define DEFINE_NFS4_COMMIT_EVENT(name) \ @@ -1993,7 +2023,9 @@ TRACE_EVENT(nfs4_layoutget, DEFINE_NFS4_INODE_STATEID_EVENT(nfs4_layoutcommit); DEFINE_NFS4_INODE_STATEID_EVENT(nfs4_layoutreturn); -DEFINE_NFS4_INODE_EVENT(nfs4_layoutreturn_on_close); +DEFINE_NFS4_INODE_STATEID_EVENT(nfs4_layoutreturn_on_close); +DEFINE_NFS4_INODE_STATEID_EVENT(nfs4_layouterror); +DEFINE_NFS4_INODE_STATEID_EVENT(nfs4_layoutstats); TRACE_DEFINE_ENUM(PNFS_UPDATE_LAYOUT_UNKNOWN); TRACE_DEFINE_ENUM(PNFS_UPDATE_LAYOUT_NO_PNFS); diff --git a/fs/nfs/nfs4xdr.c b/fs/nfs/nfs4xdr.c index 47817ef0aadb..0b3510f62623 100644 --- a/fs/nfs/nfs4xdr.c +++ b/fs/nfs/nfs4xdr.c @@ -4166,7 +4166,11 @@ static int decode_attr_security_label(struct xdr_stream *xdr, uint32_t *bitmap, return -EIO; if (len < NFS4_MAXLABELLEN) { if (label) { - memcpy(label->label, p, len); + if (label->len) { + if (label->len < len) + return -ERANGE; + memcpy(label->label, p, len); + } label->len = len; label->pi = pi; label->lfs = lfs; @@ -4201,6 +4205,26 @@ static int decode_attr_time_modify(struct xdr_stream *xdr, uint32_t *bitmap, str return status; } +static int decode_attr_xattrsupport(struct xdr_stream *xdr, uint32_t *bitmap, + uint32_t *res) +{ + __be32 *p; + + *res = 0; + if (unlikely(bitmap[2] & (FATTR4_WORD2_XATTR_SUPPORT - 1U))) + return -EIO; + if (likely(bitmap[2] & FATTR4_WORD2_XATTR_SUPPORT)) { + p = xdr_inline_decode(xdr, 4); + if (unlikely(!p)) + return -EIO; + *res = be32_to_cpup(p); + bitmap[2] &= ~FATTR4_WORD2_XATTR_SUPPORT; + } + dprintk("%s: XATTR support=%s\n", __func__, + *res == 0 ? "false" : "true"); + return 0; +} + static int verify_attr_len(struct xdr_stream *xdr, unsigned int savep, uint32_t attrlen) { unsigned int attrwords = XDR_QUADLEN(attrlen); @@ -4855,6 +4879,11 @@ static int decode_fsinfo(struct xdr_stream *xdr, struct nfs_fsinfo *fsinfo) if (status) goto xdr_error; + status = decode_attr_xattrsupport(xdr, bitmap, + &fsinfo->xattr_support); + if (status) + goto xdr_error; + status = verify_attr_len(xdr, savep, attrlen); xdr_error: dprintk("%s: xdr returned %d!\n", __func__, -status); @@ -5227,7 +5256,7 @@ static int decode_readlink(struct xdr_stream *xdr, struct rpc_rqst *req) * The XDR encode routine has set things up so that * the link text will be copied directly into the * buffer. We just have to do overflow-checking, - * and and null-terminate the text (the VFS expects + * and null-terminate the text (the VFS expects * null-termination). */ xdr_terminate_string(rcvbuf, len); @@ -7456,6 +7485,8 @@ static struct { { NFS4ERR_SYMLINK, -ELOOP }, { NFS4ERR_OP_ILLEGAL, -EOPNOTSUPP }, { NFS4ERR_DEADLOCK, -EDEADLK }, + { NFS4ERR_NOXATTR, -ENODATA }, + { NFS4ERR_XATTR2BIG, -E2BIG }, { -1, -EIO } }; @@ -7584,6 +7615,10 @@ const struct rpc_procinfo nfs4_procedures[] = { PROC42(COPY_NOTIFY, enc_copy_notify, dec_copy_notify), PROC(LOOKUPP, enc_lookupp, dec_lookupp), PROC42(LAYOUTERROR, enc_layouterror, dec_layouterror), + PROC42(GETXATTR, enc_getxattr, dec_getxattr), + PROC42(SETXATTR, enc_setxattr, dec_setxattr), + PROC42(LISTXATTRS, enc_listxattrs, dec_listxattrs), + PROC42(REMOVEXATTR, enc_removexattr, dec_removexattr), }; static unsigned int nfs_version4_counts[ARRAY_SIZE(nfs4_procedures)]; diff --git a/fs/nfs/nfstrace.h b/fs/nfs/nfstrace.h index 547cec79899f..5a59dcdce0b2 100644 --- a/fs/nfs/nfstrace.h +++ b/fs/nfs/nfstrace.h @@ -59,7 +59,8 @@ TRACE_DEFINE_ENUM(NFS_INO_INVALID_OTHER); { NFS_INO_INVALID_CTIME, "INVALID_CTIME" }, \ { NFS_INO_INVALID_MTIME, "INVALID_MTIME" }, \ { NFS_INO_INVALID_SIZE, "INVALID_SIZE" }, \ - { NFS_INO_INVALID_OTHER, "INVALID_OTHER" }) + { NFS_INO_INVALID_OTHER, "INVALID_OTHER" }, \ + { NFS_INO_INVALID_XATTR, "INVALID_XATTR" }) TRACE_DEFINE_ENUM(NFS_INO_ADVISE_RDPLUS); TRACE_DEFINE_ENUM(NFS_INO_STALE); diff --git a/fs/nfs/pagelist.c b/fs/nfs/pagelist.c index 6ea4cac41e46..6985cacf4700 100644 --- a/fs/nfs/pagelist.c +++ b/fs/nfs/pagelist.c @@ -711,7 +711,7 @@ static void nfs_pgio_rpcsetup(struct nfs_pgio_header *hdr, case FLUSH_COND_STABLE: if (nfs_reqs_to_commit(cinfo)) break; - /* fall through */ + fallthrough; default: hdr->args.stable = NFS_FILE_SYNC; } diff --git a/fs/nfs/pnfs.c b/fs/nfs/pnfs.c index dd2e14f5875d..71f7741126b6 100644 --- a/fs/nfs/pnfs.c +++ b/fs/nfs/pnfs.c @@ -1226,31 +1226,27 @@ out: return status; } +static bool +pnfs_layout_segments_returnable(struct pnfs_layout_hdr *lo, + enum pnfs_iomode iomode, + u32 seq) +{ + struct pnfs_layout_range recall_range = { + .length = NFS4_MAX_UINT64, + .iomode = iomode, + }; + return pnfs_mark_matching_lsegs_return(lo, &lo->plh_return_segs, + &recall_range, seq) != -EBUSY; +} + /* Return true if layoutreturn is needed */ static bool pnfs_layout_need_return(struct pnfs_layout_hdr *lo) { - struct pnfs_layout_segment *s; - enum pnfs_iomode iomode; - u32 seq; - if (!test_bit(NFS_LAYOUT_RETURN_REQUESTED, &lo->plh_flags)) return false; - - seq = lo->plh_return_seq; - iomode = lo->plh_return_iomode; - - /* Defer layoutreturn until all recalled lsegs are done */ - list_for_each_entry(s, &lo->plh_segs, pls_list) { - if (seq && pnfs_seqid_is_newer(s->pls_seq, seq)) - continue; - if (iomode != IOMODE_ANY && s->pls_range.iomode != iomode) - continue; - if (test_bit(NFS_LSEG_LAYOUTRETURN, &s->pls_flags)) - return false; - } - - return true; + return pnfs_layout_segments_returnable(lo, lo->plh_return_iomode, + lo->plh_return_seq); } static void pnfs_layoutreturn_before_put_layout_hdr(struct pnfs_layout_hdr *lo) @@ -1545,16 +1541,16 @@ void pnfs_roc_release(struct nfs4_layoutreturn_args *args, case 0: if (res->lrs_present) res_stateid = &res->stateid; - /* Fallthrough */ + fallthrough; default: arg_stateid = &args->stateid; } + trace_nfs4_layoutreturn_on_close(args->inode, &args->stateid, ret); pnfs_layoutreturn_free_lsegs(lo, arg_stateid, &args->range, res_stateid); if (ld_private && ld_private->ops && ld_private->ops->free) ld_private->ops->free(ld_private); pnfs_put_layout_hdr(lo); - trace_nfs4_layoutreturn_on_close(args->inode, 0); } bool pnfs_wait_on_layoutreturn(struct inode *ino, struct rpc_task *task) @@ -2392,16 +2388,6 @@ out_forget: return ERR_PTR(-EAGAIN); } -static int -mark_lseg_invalid_or_return(struct pnfs_layout_segment *lseg, - struct list_head *tmp_list) -{ - if (!mark_lseg_invalid(lseg, tmp_list)) - return 0; - pnfs_cache_lseg_for_layoutreturn(lseg->pls_layout, lseg); - return 1; -} - /** * pnfs_mark_matching_lsegs_return - Free or return matching layout segments * @lo: pointer to layout header @@ -2438,7 +2424,7 @@ pnfs_mark_matching_lsegs_return(struct pnfs_layout_hdr *lo, lseg, lseg->pls_range.iomode, lseg->pls_range.offset, lseg->pls_range.length); - if (mark_lseg_invalid_or_return(lseg, tmp_list)) + if (mark_lseg_invalid(lseg, tmp_list)) continue; remaining++; set_bit(NFS_LSEG_LAYOUTRETURN, &lseg->pls_flags); @@ -2953,7 +2939,8 @@ pnfs_try_to_read_data(struct nfs_pgio_header *hdr, } /* Resend all requests through pnfs. */ -void pnfs_read_resend_pnfs(struct nfs_pgio_header *hdr) +void pnfs_read_resend_pnfs(struct nfs_pgio_header *hdr, + unsigned int mirror_idx) { struct nfs_pageio_descriptor pgio; @@ -2964,6 +2951,7 @@ void pnfs_read_resend_pnfs(struct nfs_pgio_header *hdr) nfs_pageio_init_read(&pgio, hdr->inode, false, hdr->completion_ops); + pgio.pg_mirror_idx = mirror_idx; hdr->task.tk_status = nfs_pageio_resend(&pgio, hdr); } } diff --git a/fs/nfs/pnfs.h b/fs/nfs/pnfs.h index 8e0ada581b92..2661c44c62db 100644 --- a/fs/nfs/pnfs.h +++ b/fs/nfs/pnfs.h @@ -311,7 +311,7 @@ int _pnfs_return_layout(struct inode *); int pnfs_commit_and_return_layout(struct inode *); void pnfs_ld_write_done(struct nfs_pgio_header *); void pnfs_ld_read_done(struct nfs_pgio_header *); -void pnfs_read_resend_pnfs(struct nfs_pgio_header *); +void pnfs_read_resend_pnfs(struct nfs_pgio_header *, unsigned int mirror_idx); struct pnfs_layout_segment *pnfs_update_layout(struct inode *ino, struct nfs_open_context *ctx, loff_t pos, diff --git a/fs/nfs_common/nfsacl.c b/fs/nfs_common/nfsacl.c index 8ceb6425e01a..d056ad2fdefd 100644 --- a/fs/nfs_common/nfsacl.c +++ b/fs/nfs_common/nfsacl.c @@ -237,7 +237,7 @@ posix_acl_from_nfsacl(struct posix_acl *acl) break; case ACL_MASK: mask = pa; - /* fall through */ + fallthrough; case ACL_OTHER: break; } diff --git a/fs/nfsd/blocklayout.c b/fs/nfsd/blocklayout.c index 9bbaa671c079..311e5ce80cfc 100644 --- a/fs/nfsd/blocklayout.c +++ b/fs/nfsd/blocklayout.c @@ -83,13 +83,13 @@ nfsd4_block_proc_layoutget(struct inode *inode, const struct svc_fh *fhp, bex->soff = iomap.addr; break; } - /*FALLTHRU*/ + fallthrough; case IOMAP_HOLE: if (seg->iomode == IOMODE_READ) { bex->es = PNFS_BLOCK_NONE_DATA; break; } - /*FALLTHRU*/ + fallthrough; case IOMAP_DELALLOC: default: WARN(1, "pnfsd: filesystem returned %d extent\n", iomap.type); diff --git a/fs/nfsd/filecache.c b/fs/nfsd/filecache.c index 82198d747c4c..c8b9d2667ee6 100644 --- a/fs/nfsd/filecache.c +++ b/fs/nfsd/filecache.c @@ -598,11 +598,9 @@ static struct notifier_block nfsd_file_lease_notifier = { }; static int -nfsd_file_fsnotify_handle_event(struct fsnotify_group *group, - struct inode *inode, - u32 mask, const void *data, int data_type, - const struct qstr *file_name, u32 cookie, - struct fsnotify_iter_info *iter_info) +nfsd_file_fsnotify_handle_event(struct fsnotify_mark *mark, u32 mask, + struct inode *inode, struct inode *dir, + const struct qstr *name) { trace_nfsd_file_fsnotify_handle_event(inode, mask); @@ -624,7 +622,7 @@ nfsd_file_fsnotify_handle_event(struct fsnotify_group *group, static const struct fsnotify_ops nfsd_file_fsnotify_ops = { - .handle_event = nfsd_file_fsnotify_handle_event, + .handle_inode_event = nfsd_file_fsnotify_handle_event, .free_mark = nfsd_file_mark_free, }; diff --git a/fs/nfsd/netns.h b/fs/nfsd/netns.h index 9217cb64bf0e..7346acda9d76 100644 --- a/fs/nfsd/netns.h +++ b/fs/nfsd/netns.h @@ -171,7 +171,7 @@ struct nfsd_net { unsigned int longest_chain_cachesize; struct shrinker nfsd_reply_cache_shrinker; - /* utsname taken from the the process that starts the server */ + /* utsname taken from the process that starts the server */ char nfsd_name[UNX_MAXNODENAME+1]; }; diff --git a/fs/nfsd/nfs4callback.c b/fs/nfsd/nfs4callback.c index 7fbe9840a03e..052be5bf9ef5 100644 --- a/fs/nfsd/nfs4callback.c +++ b/fs/nfsd/nfs4callback.c @@ -1119,7 +1119,7 @@ static bool nfsd4_cb_sequence_done(struct rpc_task *task, struct nfsd4_callback break; case -ESERVERFAULT: ++session->se_cb_seq_nr; - /* Fall through */ + fallthrough; case 1: case -NFS4ERR_BADSESSION: nfsd4_mark_cb_fault(cb->cb_clp, cb->cb_seq_status); diff --git a/fs/nfsd/nfs4idmap.c b/fs/nfsd/nfs4idmap.c index 9460be8a8321..f92161ce1f97 100644 --- a/fs/nfsd/nfs4idmap.c +++ b/fs/nfsd/nfs4idmap.c @@ -168,7 +168,7 @@ idtoname_show(struct seq_file *m, struct cache_detail *cd, struct cache_head *h) ent->id); if (test_bit(CACHE_VALID, &h->flags)) seq_printf(m, " %s", ent->name); - seq_printf(m, "\n"); + seq_putc(m, '\n'); return 0; } @@ -346,7 +346,7 @@ nametoid_show(struct seq_file *m, struct cache_detail *cd, struct cache_head *h) ent->name); if (test_bit(CACHE_VALID, &h->flags)) seq_printf(m, " %u", ent->id); - seq_printf(m, "\n"); + seq_putc(m, '\n'); return 0; } diff --git a/fs/nfsd/nfs4layouts.c b/fs/nfsd/nfs4layouts.c index e12409eca7cc..a97873f2d22b 100644 --- a/fs/nfsd/nfs4layouts.c +++ b/fs/nfsd/nfs4layouts.c @@ -681,7 +681,7 @@ nfsd4_cb_layout_done(struct nfsd4_callback *cb, struct rpc_task *task) rpc_delay(task, HZ/100); /* 10 mili-seconds */ return 0; } - /* Fallthrough */ + fallthrough; default: /* * Unknown error or non-responding client, we'll need to fence. diff --git a/fs/nfsd/nfs4proc.c b/fs/nfsd/nfs4proc.c index a09c35f0f6f0..eaf50eafa935 100644 --- a/fs/nfsd/nfs4proc.c +++ b/fs/nfsd/nfs4proc.c @@ -428,7 +428,7 @@ nfsd4_open(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate, goto out; open->op_openowner->oo_flags |= NFS4_OO_CONFIRMED; reclaim = true; - /* fall through */ + fallthrough; case NFS4_OPEN_CLAIM_FH: case NFS4_OPEN_CLAIM_DELEG_CUR_FH: status = do_open_fhandle(rqstp, cstate, open); @@ -566,8 +566,14 @@ nfsd4_access(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate, union nfsd4_op_u *u) { struct nfsd4_access *access = &u->access; + u32 access_full; - if (access->ac_req_access & ~NFS3_ACCESS_FULL) + access_full = NFS3_ACCESS_FULL; + if (cstate->minorversion >= 2) + access_full |= NFS4_ACCESS_XALIST | NFS4_ACCESS_XAREAD | + NFS4_ACCESS_XAWRITE; + + if (access->ac_req_access & ~access_full) return nfserr_inval; access->ac_resp_access = access->ac_req_access; @@ -2091,6 +2097,68 @@ out: } #endif /* CONFIG_NFSD_PNFS */ +static __be32 +nfsd4_getxattr(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate, + union nfsd4_op_u *u) +{ + struct nfsd4_getxattr *getxattr = &u->getxattr; + + return nfsd_getxattr(rqstp, &cstate->current_fh, + getxattr->getxa_name, &getxattr->getxa_buf, + &getxattr->getxa_len); +} + +static __be32 +nfsd4_setxattr(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate, + union nfsd4_op_u *u) +{ + struct nfsd4_setxattr *setxattr = &u->setxattr; + __be32 ret; + + if (opens_in_grace(SVC_NET(rqstp))) + return nfserr_grace; + + ret = nfsd_setxattr(rqstp, &cstate->current_fh, setxattr->setxa_name, + setxattr->setxa_buf, setxattr->setxa_len, + setxattr->setxa_flags); + + if (!ret) + set_change_info(&setxattr->setxa_cinfo, &cstate->current_fh); + + return ret; +} + +static __be32 +nfsd4_listxattrs(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate, + union nfsd4_op_u *u) +{ + /* + * Get the entire list, then copy out only the user attributes + * in the encode function. + */ + return nfsd_listxattr(rqstp, &cstate->current_fh, + &u->listxattrs.lsxa_buf, &u->listxattrs.lsxa_len); +} + +static __be32 +nfsd4_removexattr(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate, + union nfsd4_op_u *u) +{ + struct nfsd4_removexattr *removexattr = &u->removexattr; + __be32 ret; + + if (opens_in_grace(SVC_NET(rqstp))) + return nfserr_grace; + + ret = nfsd_removexattr(rqstp, &cstate->current_fh, + removexattr->rmxa_name); + + if (!ret) + set_change_info(&removexattr->rmxa_cinfo, &cstate->current_fh); + + return ret; +} + /* * NULL call. */ @@ -2700,6 +2768,42 @@ static inline u32 nfsd4_seek_rsize(struct svc_rqst *rqstp, struct nfsd4_op *op) return (op_encode_hdr_size + 3) * sizeof(__be32); } +static inline u32 nfsd4_getxattr_rsize(struct svc_rqst *rqstp, + struct nfsd4_op *op) +{ + u32 maxcount, rlen; + + maxcount = svc_max_payload(rqstp); + rlen = min_t(u32, XATTR_SIZE_MAX, maxcount); + + return (op_encode_hdr_size + 1 + XDR_QUADLEN(rlen)) * sizeof(__be32); +} + +static inline u32 nfsd4_setxattr_rsize(struct svc_rqst *rqstp, + struct nfsd4_op *op) +{ + return (op_encode_hdr_size + op_encode_change_info_maxsz) + * sizeof(__be32); +} +static inline u32 nfsd4_listxattrs_rsize(struct svc_rqst *rqstp, + struct nfsd4_op *op) +{ + u32 maxcount, rlen; + + maxcount = svc_max_payload(rqstp); + rlen = min(op->u.listxattrs.lsxa_maxcount, maxcount); + + return (op_encode_hdr_size + 4 + XDR_QUADLEN(rlen)) * sizeof(__be32); +} + +static inline u32 nfsd4_removexattr_rsize(struct svc_rqst *rqstp, + struct nfsd4_op *op) +{ + return (op_encode_hdr_size + op_encode_change_info_maxsz) + * sizeof(__be32); +} + + static const struct nfsd4_operation nfsd4_ops[] = { [OP_ACCESS] = { .op_func = nfsd4_access, @@ -3081,6 +3185,28 @@ static const struct nfsd4_operation nfsd4_ops[] = { .op_name = "OP_COPY_NOTIFY", .op_rsize_bop = nfsd4_copy_notify_rsize, }, + [OP_GETXATTR] = { + .op_func = nfsd4_getxattr, + .op_name = "OP_GETXATTR", + .op_rsize_bop = nfsd4_getxattr_rsize, + }, + [OP_SETXATTR] = { + .op_func = nfsd4_setxattr, + .op_flags = OP_MODIFIES_SOMETHING | OP_CACHEME, + .op_name = "OP_SETXATTR", + .op_rsize_bop = nfsd4_setxattr_rsize, + }, + [OP_LISTXATTRS] = { + .op_func = nfsd4_listxattrs, + .op_name = "OP_LISTXATTRS", + .op_rsize_bop = nfsd4_listxattrs_rsize, + }, + [OP_REMOVEXATTR] = { + .op_func = nfsd4_removexattr, + .op_flags = OP_MODIFIES_SOMETHING | OP_CACHEME, + .op_name = "OP_REMOVEXATTR", + .op_rsize_bop = nfsd4_removexattr_rsize, + }, }; /** diff --git a/fs/nfsd/nfs4recover.c b/fs/nfsd/nfs4recover.c index 9e40dfecf1b1..186fa2c2c6ba 100644 --- a/fs/nfsd/nfs4recover.c +++ b/fs/nfsd/nfs4recover.c @@ -747,13 +747,11 @@ struct cld_upcall { }; static int -__cld_pipe_upcall(struct rpc_pipe *pipe, void *cmsg) +__cld_pipe_upcall(struct rpc_pipe *pipe, void *cmsg, struct nfsd_net *nn) { int ret; struct rpc_pipe_msg msg; struct cld_upcall *cup = container_of(cmsg, struct cld_upcall, cu_u); - struct nfsd_net *nn = net_generic(pipe->dentry->d_sb->s_fs_info, - nfsd_net_id); memset(&msg, 0, sizeof(msg)); msg.data = cmsg; @@ -773,7 +771,7 @@ out: } static int -cld_pipe_upcall(struct rpc_pipe *pipe, void *cmsg) +cld_pipe_upcall(struct rpc_pipe *pipe, void *cmsg, struct nfsd_net *nn) { int ret; @@ -782,7 +780,7 @@ cld_pipe_upcall(struct rpc_pipe *pipe, void *cmsg) * upcalls queued. */ do { - ret = __cld_pipe_upcall(pipe, cmsg); + ret = __cld_pipe_upcall(pipe, cmsg, nn); } while (ret == -EAGAIN); return ret; @@ -1115,7 +1113,7 @@ nfsd4_cld_create(struct nfs4_client *clp) memcpy(cup->cu_u.cu_msg.cm_u.cm_name.cn_id, clp->cl_name.data, clp->cl_name.len); - ret = cld_pipe_upcall(cn->cn_pipe, &cup->cu_u.cu_msg); + ret = cld_pipe_upcall(cn->cn_pipe, &cup->cu_u.cu_msg, nn); if (!ret) { ret = cup->cu_u.cu_msg.cm_status; set_bit(NFSD4_CLIENT_STABLE, &clp->cl_flags); @@ -1180,7 +1178,7 @@ nfsd4_cld_create_v2(struct nfs4_client *clp) } else cmsg->cm_u.cm_clntinfo.cc_princhash.cp_len = 0; - ret = cld_pipe_upcall(cn->cn_pipe, cmsg); + ret = cld_pipe_upcall(cn->cn_pipe, cmsg, nn); if (!ret) { ret = cmsg->cm_status; set_bit(NFSD4_CLIENT_STABLE, &clp->cl_flags); @@ -1218,7 +1216,7 @@ nfsd4_cld_remove(struct nfs4_client *clp) memcpy(cup->cu_u.cu_msg.cm_u.cm_name.cn_id, clp->cl_name.data, clp->cl_name.len); - ret = cld_pipe_upcall(cn->cn_pipe, &cup->cu_u.cu_msg); + ret = cld_pipe_upcall(cn->cn_pipe, &cup->cu_u.cu_msg, nn); if (!ret) { ret = cup->cu_u.cu_msg.cm_status; clear_bit(NFSD4_CLIENT_STABLE, &clp->cl_flags); @@ -1261,7 +1259,7 @@ nfsd4_cld_check_v0(struct nfs4_client *clp) memcpy(cup->cu_u.cu_msg.cm_u.cm_name.cn_id, clp->cl_name.data, clp->cl_name.len); - ret = cld_pipe_upcall(cn->cn_pipe, &cup->cu_u.cu_msg); + ret = cld_pipe_upcall(cn->cn_pipe, &cup->cu_u.cu_msg, nn); if (!ret) { ret = cup->cu_u.cu_msg.cm_status; set_bit(NFSD4_CLIENT_STABLE, &clp->cl_flags); @@ -1404,7 +1402,7 @@ nfsd4_cld_grace_start(struct nfsd_net *nn) } cup->cu_u.cu_msg.cm_cmd = Cld_GraceStart; - ret = cld_pipe_upcall(cn->cn_pipe, &cup->cu_u.cu_msg); + ret = cld_pipe_upcall(cn->cn_pipe, &cup->cu_u.cu_msg, nn); if (!ret) ret = cup->cu_u.cu_msg.cm_status; @@ -1432,7 +1430,7 @@ nfsd4_cld_grace_done_v0(struct nfsd_net *nn) cup->cu_u.cu_msg.cm_cmd = Cld_GraceDone; cup->cu_u.cu_msg.cm_u.cm_gracetime = nn->boot_time; - ret = cld_pipe_upcall(cn->cn_pipe, &cup->cu_u.cu_msg); + ret = cld_pipe_upcall(cn->cn_pipe, &cup->cu_u.cu_msg, nn); if (!ret) ret = cup->cu_u.cu_msg.cm_status; @@ -1460,7 +1458,7 @@ nfsd4_cld_grace_done(struct nfsd_net *nn) } cup->cu_u.cu_msg.cm_cmd = Cld_GraceDone; - ret = cld_pipe_upcall(cn->cn_pipe, &cup->cu_u.cu_msg); + ret = cld_pipe_upcall(cn->cn_pipe, &cup->cu_u.cu_msg, nn); if (!ret) ret = cup->cu_u.cu_msg.cm_status; @@ -1524,7 +1522,7 @@ nfsd4_cld_get_version(struct nfsd_net *nn) goto out_err; } cup->cu_u.cu_msg.cm_cmd = Cld_GetVersion; - ret = cld_pipe_upcall(cn->cn_pipe, &cup->cu_u.cu_msg); + ret = cld_pipe_upcall(cn->cn_pipe, &cup->cu_u.cu_msg, nn); if (!ret) { ret = cup->cu_u.cu_msg.cm_status; if (ret) diff --git a/fs/nfsd/nfs4state.c b/fs/nfsd/nfs4state.c index c9056316a0b3..c09a2a4281ec 100644 --- a/fs/nfsd/nfs4state.c +++ b/fs/nfsd/nfs4state.c @@ -3117,7 +3117,7 @@ nfsd4_exchange_id(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate, break; default: /* checked by xdr code */ WARN_ON_ONCE(1); - /* fall through */ + fallthrough; case SP4_SSV: status = nfserr_encr_alg_unsupp; goto out_nolock; @@ -4532,7 +4532,7 @@ static int nfsd4_cb_recall_done(struct nfsd4_callback *cb, rpc_delay(task, 2 * HZ); return 0; } - /*FALLTHRU*/ + fallthrough; default: return 1; } @@ -4597,6 +4597,8 @@ static bool nfsd_breaker_owns_lease(struct file_lock *fl) if (!i_am_nfsd()) return NULL; rqst = kthread_data(current); + if (!rqst->rq_lease_breaker) + return NULL; clp = *(rqst->rq_lease_breaker); return dl->dl_stid.sc_client == clp; } @@ -4940,6 +4942,32 @@ static struct file_lock *nfs4_alloc_init_lease(struct nfs4_delegation *dp, return fl; } +static int nfsd4_check_conflicting_opens(struct nfs4_client *clp, + struct nfs4_file *fp) +{ + struct nfs4_clnt_odstate *co; + struct file *f = fp->fi_deleg_file->nf_file; + struct inode *ino = locks_inode(f); + int writes = atomic_read(&ino->i_writecount); + + if (fp->fi_fds[O_WRONLY]) + writes--; + if (fp->fi_fds[O_RDWR]) + writes--; + WARN_ON_ONCE(writes < 0); + if (writes > 0) + return -EAGAIN; + spin_lock(&fp->fi_lock); + list_for_each_entry(co, &fp->fi_clnt_odstate, co_perfile) { + if (co->co_client != clp) { + spin_unlock(&fp->fi_lock); + return -EAGAIN; + } + } + spin_unlock(&fp->fi_lock); + return 0; +} + static struct nfs4_delegation * nfs4_set_delegation(struct nfs4_client *clp, struct svc_fh *fh, struct nfs4_file *fp, struct nfs4_clnt_odstate *odstate) @@ -4959,9 +4987,12 @@ nfs4_set_delegation(struct nfs4_client *clp, struct svc_fh *fh, nf = find_readable_file(fp); if (!nf) { - /* We should always have a readable file here */ - WARN_ON_ONCE(1); - return ERR_PTR(-EBADF); + /* + * We probably could attempt another open and get a read + * delegation, but for now, don't bother until the + * client actually sends us one. + */ + return ERR_PTR(-EAGAIN); } spin_lock(&state_lock); spin_lock(&fp->fi_lock); @@ -4991,11 +5022,19 @@ nfs4_set_delegation(struct nfs4_client *clp, struct svc_fh *fh, if (!fl) goto out_clnt_odstate; + status = nfsd4_check_conflicting_opens(clp, fp); + if (status) { + locks_free_lock(fl); + goto out_clnt_odstate; + } status = vfs_setlease(fp->fi_deleg_file->nf_file, fl->fl_type, &fl, NULL); if (fl) locks_free_lock(fl); if (status) goto out_clnt_odstate; + status = nfsd4_check_conflicting_opens(clp, fp); + if (status) + goto out_clnt_odstate; spin_lock(&state_lock); spin_lock(&fp->fi_lock); @@ -5077,17 +5116,6 @@ nfs4_open_delegation(struct svc_fh *fh, struct nfsd4_open *open, goto out_no_deleg; if (!cb_up || !(oo->oo_flags & NFS4_OO_CONFIRMED)) goto out_no_deleg; - /* - * Also, if the file was opened for write or - * create, there's a good chance the client's - * about to write to it, resulting in an - * immediate recall (since we don't support - * write delegations): - */ - if (open->op_share_access & NFS4_SHARE_ACCESS_WRITE) - goto out_no_deleg; - if (open->op_create == NFS4_OPEN_CREATE) - goto out_no_deleg; break; default: goto out_no_deleg; @@ -5626,7 +5654,7 @@ static __be32 nfsd4_validate_stateid(struct nfs4_client *cl, stateid_t *stateid) break; default: printk("unknown stateid type %x\n", s->sc_type); - /* Fallthrough */ + fallthrough; case NFS4_CLOSED_STID: case NFS4_CLOSED_DELEG_STID: status = nfserr_bad_stateid; @@ -6716,7 +6744,7 @@ nfsd4_lock(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate, case NFS4_READW_LT: if (nfsd4_has_session(cstate)) fl_flags |= FL_SLEEP; - /* Fallthrough */ + fallthrough; case NFS4_READ_LT: spin_lock(&fp->fi_lock); nf = find_readable_file_locked(fp); @@ -6728,7 +6756,7 @@ nfsd4_lock(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate, case NFS4_WRITEW_LT: if (nfsd4_has_session(cstate)) fl_flags |= FL_SLEEP; - /* Fallthrough */ + fallthrough; case NFS4_WRITE_LT: spin_lock(&fp->fi_lock); nf = find_writeable_file_locked(fp); @@ -6790,7 +6818,7 @@ nfsd4_lock(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate, break; case FILE_LOCK_DEFERRED: nbl = NULL; - /* Fallthrough */ + fallthrough; case -EAGAIN: /* conflock holds conflicting lock */ status = nfserr_denied; dprintk("NFSD: nfsd4_lock: conflicting lock found!\n"); diff --git a/fs/nfsd/nfs4xdr.c b/fs/nfsd/nfs4xdr.c index 996ac01ee977..259d5ad0e3f4 100644 --- a/fs/nfsd/nfs4xdr.c +++ b/fs/nfsd/nfs4xdr.c @@ -41,6 +41,8 @@ #include <linux/pagemap.h> #include <linux/sunrpc/svcauth_gss.h> #include <linux/sunrpc/addr.h> +#include <linux/xattr.h> +#include <uapi/linux/xattr.h> #include "idmap.h" #include "acl.h" @@ -257,6 +259,44 @@ svcxdr_dupstr(struct nfsd4_compoundargs *argp, void *buf, u32 len) return p; } +static __be32 +svcxdr_construct_vector(struct nfsd4_compoundargs *argp, struct kvec *head, + struct page ***pagelist, u32 buflen) +{ + int avail; + int len; + int pages; + + /* Sorry .. no magic macros for this.. * + * READ_BUF(write->wr_buflen); + * SAVEMEM(write->wr_buf, write->wr_buflen); + */ + avail = (char *)argp->end - (char *)argp->p; + if (avail + argp->pagelen < buflen) { + dprintk("NFSD: xdr error (%s:%d)\n", + __FILE__, __LINE__); + return nfserr_bad_xdr; + } + head->iov_base = argp->p; + head->iov_len = avail; + *pagelist = argp->pagelist; + + len = XDR_QUADLEN(buflen) << 2; + if (len >= avail) { + len -= avail; + + pages = len >> PAGE_SHIFT; + argp->pagelist += pages; + argp->pagelen -= pages * PAGE_SIZE; + len -= pages * PAGE_SIZE; + + next_decode_page(argp); + } + argp->p += XDR_QUADLEN(len); + + return 0; +} + /** * savemem - duplicate a chunk of memory for later processing * @argp: NFSv4 compound argument structure to be freed with @@ -1265,8 +1305,6 @@ nfsd4_decode_verify(struct nfsd4_compoundargs *argp, struct nfsd4_verify *verify static __be32 nfsd4_decode_write(struct nfsd4_compoundargs *argp, struct nfsd4_write *write) { - int avail; - int len; DECODE_HEAD; status = nfsd4_decode_stateid(argp, &write->wr_stateid); @@ -1279,34 +1317,10 @@ nfsd4_decode_write(struct nfsd4_compoundargs *argp, struct nfsd4_write *write) goto xdr_error; write->wr_buflen = be32_to_cpup(p++); - /* Sorry .. no magic macros for this.. * - * READ_BUF(write->wr_buflen); - * SAVEMEM(write->wr_buf, write->wr_buflen); - */ - avail = (char*)argp->end - (char*)argp->p; - if (avail + argp->pagelen < write->wr_buflen) { - dprintk("NFSD: xdr error (%s:%d)\n", - __FILE__, __LINE__); - goto xdr_error; - } - write->wr_head.iov_base = p; - write->wr_head.iov_len = avail; - write->wr_pagelist = argp->pagelist; - - len = XDR_QUADLEN(write->wr_buflen) << 2; - if (len >= avail) { - int pages; - - len -= avail; - - pages = len >> PAGE_SHIFT; - argp->pagelist += pages; - argp->pagelen -= pages * PAGE_SIZE; - len -= pages * PAGE_SIZE; - - next_decode_page(argp); - } - argp->p += XDR_QUADLEN(len); + status = svcxdr_construct_vector(argp, &write->wr_head, + &write->wr_pagelist, write->wr_buflen); + if (status) + return status; DECODE_TAIL; } @@ -1865,6 +1879,208 @@ nfsd4_decode_seek(struct nfsd4_compoundargs *argp, struct nfsd4_seek *seek) DECODE_TAIL; } +/* + * XDR data that is more than PAGE_SIZE in size is normally part of a + * read or write. However, the size of extended attributes is limited + * by the maximum request size, and then further limited by the underlying + * filesystem limits. This can exceed PAGE_SIZE (currently, XATTR_SIZE_MAX + * is 64k). Since there is no kvec- or page-based interface to xattrs, + * and we're not dealing with contiguous pages, we need to do some copying. + */ + +/* + * Decode data into buffer. Uses head and pages constructed by + * svcxdr_construct_vector. + */ +static __be32 +nfsd4_vbuf_from_vector(struct nfsd4_compoundargs *argp, struct kvec *head, + struct page **pages, char **bufp, u32 buflen) +{ + char *tmp, *dp; + u32 len; + + if (buflen <= head->iov_len) { + /* + * We're in luck, the head has enough space. Just return + * the head, no need for copying. + */ + *bufp = head->iov_base; + return 0; + } + + tmp = svcxdr_tmpalloc(argp, buflen); + if (tmp == NULL) + return nfserr_jukebox; + + dp = tmp; + memcpy(dp, head->iov_base, head->iov_len); + buflen -= head->iov_len; + dp += head->iov_len; + + while (buflen > 0) { + len = min_t(u32, buflen, PAGE_SIZE); + memcpy(dp, page_address(*pages), len); + + buflen -= len; + dp += len; + pages++; + } + + *bufp = tmp; + return 0; +} + +/* + * Get a user extended attribute name from the XDR buffer. + * It will not have the "user." prefix, so prepend it. + * Lastly, check for nul characters in the name. + */ +static __be32 +nfsd4_decode_xattr_name(struct nfsd4_compoundargs *argp, char **namep) +{ + DECODE_HEAD; + char *name, *sp, *dp; + u32 namelen, cnt; + + READ_BUF(4); + namelen = be32_to_cpup(p++); + + if (namelen > (XATTR_NAME_MAX - XATTR_USER_PREFIX_LEN)) + return nfserr_nametoolong; + + if (namelen == 0) + goto xdr_error; + + READ_BUF(namelen); + + name = svcxdr_tmpalloc(argp, namelen + XATTR_USER_PREFIX_LEN + 1); + if (!name) + return nfserr_jukebox; + + memcpy(name, XATTR_USER_PREFIX, XATTR_USER_PREFIX_LEN); + + /* + * Copy the extended attribute name over while checking for 0 + * characters. + */ + sp = (char *)p; + dp = name + XATTR_USER_PREFIX_LEN; + cnt = namelen; + + while (cnt-- > 0) { + if (*sp == '\0') + goto xdr_error; + *dp++ = *sp++; + } + *dp = '\0'; + + *namep = name; + + DECODE_TAIL; +} + +/* + * A GETXATTR op request comes without a length specifier. We just set the + * maximum length for the reply based on XATTR_SIZE_MAX and the maximum + * channel reply size. nfsd_getxattr will probe the length of the xattr, + * check it against getxa_len, and allocate + return the value. + */ +static __be32 +nfsd4_decode_getxattr(struct nfsd4_compoundargs *argp, + struct nfsd4_getxattr *getxattr) +{ + __be32 status; + u32 maxcount; + + status = nfsd4_decode_xattr_name(argp, &getxattr->getxa_name); + if (status) + return status; + + maxcount = svc_max_payload(argp->rqstp); + maxcount = min_t(u32, XATTR_SIZE_MAX, maxcount); + + getxattr->getxa_len = maxcount; + + return status; +} + +static __be32 +nfsd4_decode_setxattr(struct nfsd4_compoundargs *argp, + struct nfsd4_setxattr *setxattr) +{ + DECODE_HEAD; + u32 flags, maxcount, size; + struct kvec head; + struct page **pagelist; + + READ_BUF(4); + flags = be32_to_cpup(p++); + + if (flags > SETXATTR4_REPLACE) + return nfserr_inval; + setxattr->setxa_flags = flags; + + status = nfsd4_decode_xattr_name(argp, &setxattr->setxa_name); + if (status) + return status; + + maxcount = svc_max_payload(argp->rqstp); + maxcount = min_t(u32, XATTR_SIZE_MAX, maxcount); + + READ_BUF(4); + size = be32_to_cpup(p++); + if (size > maxcount) + return nfserr_xattr2big; + + setxattr->setxa_len = size; + if (size > 0) { + status = svcxdr_construct_vector(argp, &head, &pagelist, size); + if (status) + return status; + + status = nfsd4_vbuf_from_vector(argp, &head, pagelist, + &setxattr->setxa_buf, size); + } + + DECODE_TAIL; +} + +static __be32 +nfsd4_decode_listxattrs(struct nfsd4_compoundargs *argp, + struct nfsd4_listxattrs *listxattrs) +{ + DECODE_HEAD; + u32 maxcount; + + READ_BUF(12); + p = xdr_decode_hyper(p, &listxattrs->lsxa_cookie); + + /* + * If the cookie is too large to have even one user.x attribute + * plus trailing '\0' left in a maximum size buffer, it's invalid. + */ + if (listxattrs->lsxa_cookie >= + (XATTR_LIST_MAX / (XATTR_USER_PREFIX_LEN + 2))) + return nfserr_badcookie; + + maxcount = be32_to_cpup(p++); + if (maxcount < 8) + /* Always need at least 2 words (length and one character) */ + return nfserr_inval; + + maxcount = min(maxcount, svc_max_payload(argp->rqstp)); + listxattrs->lsxa_maxcount = maxcount; + + DECODE_TAIL; +} + +static __be32 +nfsd4_decode_removexattr(struct nfsd4_compoundargs *argp, + struct nfsd4_removexattr *removexattr) +{ + return nfsd4_decode_xattr_name(argp, &removexattr->rmxa_name); +} + static __be32 nfsd4_decode_noop(struct nfsd4_compoundargs *argp, void *p) { @@ -1961,6 +2177,11 @@ static const nfsd4_dec nfsd4_dec_ops[] = { [OP_SEEK] = (nfsd4_dec)nfsd4_decode_seek, [OP_WRITE_SAME] = (nfsd4_dec)nfsd4_decode_notsupp, [OP_CLONE] = (nfsd4_dec)nfsd4_decode_clone, + /* RFC 8276 extended atributes operations */ + [OP_GETXATTR] = (nfsd4_dec)nfsd4_decode_getxattr, + [OP_SETXATTR] = (nfsd4_dec)nfsd4_decode_setxattr, + [OP_LISTXATTRS] = (nfsd4_dec)nfsd4_decode_listxattrs, + [OP_REMOVEXATTR] = (nfsd4_dec)nfsd4_decode_removexattr, }; static inline bool @@ -2992,6 +3213,15 @@ out_acl: } #endif + if (bmval2 & FATTR4_WORD2_XATTR_SUPPORT) { + p = xdr_reserve_space(xdr, 4); + if (!p) + goto out_resource; + err = xattr_supported_namespace(d_inode(dentry), + XATTR_USER_PREFIX); + *p++ = cpu_to_be32(err == 0); + } + attrlen = htonl(xdr->buf->len - attrlen_offset - 4); write_bytes_to_xdr_buf(xdr->buf, attrlen_offset, &attrlen, 4); status = nfs_ok; @@ -4446,6 +4676,241 @@ nfsd4_encode_noop(struct nfsd4_compoundres *resp, __be32 nfserr, void *p) return nfserr; } +/* + * Encode kmalloc-ed buffer in to XDR stream. + */ +static int +nfsd4_vbuf_to_stream(struct xdr_stream *xdr, char *buf, u32 buflen) +{ + u32 cplen; + __be32 *p; + + cplen = min_t(unsigned long, buflen, + ((void *)xdr->end - (void *)xdr->p)); + p = xdr_reserve_space(xdr, cplen); + if (!p) + return nfserr_resource; + + memcpy(p, buf, cplen); + buf += cplen; + buflen -= cplen; + + while (buflen) { + cplen = min_t(u32, buflen, PAGE_SIZE); + p = xdr_reserve_space(xdr, cplen); + if (!p) + return nfserr_resource; + + memcpy(p, buf, cplen); + + if (cplen < PAGE_SIZE) { + /* + * We're done, with a length that wasn't page + * aligned, so possibly not word aligned. Pad + * any trailing bytes with 0. + */ + xdr_encode_opaque_fixed(p, NULL, cplen); + break; + } + + buflen -= PAGE_SIZE; + buf += PAGE_SIZE; + } + + return 0; +} + +static __be32 +nfsd4_encode_getxattr(struct nfsd4_compoundres *resp, __be32 nfserr, + struct nfsd4_getxattr *getxattr) +{ + struct xdr_stream *xdr = &resp->xdr; + __be32 *p, err; + + p = xdr_reserve_space(xdr, 4); + if (!p) + return nfserr_resource; + + *p = cpu_to_be32(getxattr->getxa_len); + + if (getxattr->getxa_len == 0) + return 0; + + err = nfsd4_vbuf_to_stream(xdr, getxattr->getxa_buf, + getxattr->getxa_len); + + kvfree(getxattr->getxa_buf); + + return err; +} + +static __be32 +nfsd4_encode_setxattr(struct nfsd4_compoundres *resp, __be32 nfserr, + struct nfsd4_setxattr *setxattr) +{ + struct xdr_stream *xdr = &resp->xdr; + __be32 *p; + + p = xdr_reserve_space(xdr, 20); + if (!p) + return nfserr_resource; + + encode_cinfo(p, &setxattr->setxa_cinfo); + + return 0; +} + +/* + * See if there are cookie values that can be rejected outright. + */ +static __be32 +nfsd4_listxattr_validate_cookie(struct nfsd4_listxattrs *listxattrs, + u32 *offsetp) +{ + u64 cookie = listxattrs->lsxa_cookie; + + /* + * If the cookie is larger than the maximum number we can fit + * in either the buffer we just got back from vfs_listxattr, or, + * XDR-encoded, in the return buffer, it's invalid. + */ + if (cookie > (listxattrs->lsxa_len) / (XATTR_USER_PREFIX_LEN + 2)) + return nfserr_badcookie; + + if (cookie > (listxattrs->lsxa_maxcount / + (XDR_QUADLEN(XATTR_USER_PREFIX_LEN + 2) + 4))) + return nfserr_badcookie; + + *offsetp = (u32)cookie; + return 0; +} + +static __be32 +nfsd4_encode_listxattrs(struct nfsd4_compoundres *resp, __be32 nfserr, + struct nfsd4_listxattrs *listxattrs) +{ + struct xdr_stream *xdr = &resp->xdr; + u32 cookie_offset, count_offset, eof; + u32 left, xdrleft, slen, count; + u32 xdrlen, offset; + u64 cookie; + char *sp; + __be32 status; + __be32 *p; + u32 nuser; + + eof = 1; + + status = nfsd4_listxattr_validate_cookie(listxattrs, &offset); + if (status) + goto out; + + /* + * Reserve space for the cookie and the name array count. Record + * the offsets to save them later. + */ + cookie_offset = xdr->buf->len; + count_offset = cookie_offset + 8; + p = xdr_reserve_space(xdr, 12); + if (!p) { + status = nfserr_resource; + goto out; + } + + count = 0; + left = listxattrs->lsxa_len; + sp = listxattrs->lsxa_buf; + nuser = 0; + + xdrleft = listxattrs->lsxa_maxcount; + + while (left > 0 && xdrleft > 0) { + slen = strlen(sp); + + /* + * Check if this a user. attribute, skip it if not. + */ + if (strncmp(sp, XATTR_USER_PREFIX, XATTR_USER_PREFIX_LEN)) + goto contloop; + + slen -= XATTR_USER_PREFIX_LEN; + xdrlen = 4 + ((slen + 3) & ~3); + if (xdrlen > xdrleft) { + if (count == 0) { + /* + * Can't even fit the first attribute name. + */ + status = nfserr_toosmall; + goto out; + } + eof = 0; + goto wreof; + } + + left -= XATTR_USER_PREFIX_LEN; + sp += XATTR_USER_PREFIX_LEN; + if (nuser++ < offset) + goto contloop; + + + p = xdr_reserve_space(xdr, xdrlen); + if (!p) { + status = nfserr_resource; + goto out; + } + + p = xdr_encode_opaque(p, sp, slen); + + xdrleft -= xdrlen; + count++; +contloop: + sp += slen + 1; + left -= slen + 1; + } + + /* + * If there were user attributes to copy, but we didn't copy + * any, the offset was too large (e.g. the cookie was invalid). + */ + if (nuser > 0 && count == 0) { + status = nfserr_badcookie; + goto out; + } + +wreof: + p = xdr_reserve_space(xdr, 4); + if (!p) { + status = nfserr_resource; + goto out; + } + *p = cpu_to_be32(eof); + + cookie = offset + count; + + write_bytes_to_xdr_buf(xdr->buf, cookie_offset, &cookie, 8); + count = htonl(count); + write_bytes_to_xdr_buf(xdr->buf, count_offset, &count, 4); +out: + if (listxattrs->lsxa_len) + kvfree(listxattrs->lsxa_buf); + return status; +} + +static __be32 +nfsd4_encode_removexattr(struct nfsd4_compoundres *resp, __be32 nfserr, + struct nfsd4_removexattr *removexattr) +{ + struct xdr_stream *xdr = &resp->xdr; + __be32 *p; + + p = xdr_reserve_space(xdr, 20); + if (!p) + return nfserr_resource; + + p = encode_cinfo(p, &removexattr->rmxa_cinfo); + return 0; +} + typedef __be32(* nfsd4_enc)(struct nfsd4_compoundres *, __be32, void *); /* @@ -4535,6 +5000,12 @@ static const nfsd4_enc nfsd4_enc_ops[] = { [OP_SEEK] = (nfsd4_enc)nfsd4_encode_seek, [OP_WRITE_SAME] = (nfsd4_enc)nfsd4_encode_noop, [OP_CLONE] = (nfsd4_enc)nfsd4_encode_noop, + + /* RFC 8276 extended atributes operations */ + [OP_GETXATTR] = (nfsd4_enc)nfsd4_encode_getxattr, + [OP_SETXATTR] = (nfsd4_enc)nfsd4_encode_setxattr, + [OP_LISTXATTRS] = (nfsd4_enc)nfsd4_encode_listxattrs, + [OP_REMOVEXATTR] = (nfsd4_enc)nfsd4_encode_removexattr, }; /* diff --git a/fs/nfsd/nfsctl.c b/fs/nfsd/nfsctl.c index cd05732f8eaa..7ae236113040 100644 --- a/fs/nfsd/nfsctl.c +++ b/fs/nfsd/nfsctl.c @@ -351,7 +351,7 @@ static ssize_t write_unlock_fs(struct file *file, char *buf, size_t size) static ssize_t write_filehandle(struct file *file, char *buf, size_t size) { char *dname, *path; - int uninitialized_var(maxsize); + int maxsize; char *mesg = buf; int len; struct auth_domain *dom; diff --git a/fs/nfsd/nfsd.h b/fs/nfsd/nfsd.h index 57c832d1b30f..cb742e17e04a 100644 --- a/fs/nfsd/nfsd.h +++ b/fs/nfsd/nfsd.h @@ -286,6 +286,8 @@ void nfsd_lockd_shutdown(void); #define nfserr_wrong_lfs cpu_to_be32(NFS4ERR_WRONG_LFS) #define nfserr_badlabel cpu_to_be32(NFS4ERR_BADLABEL) #define nfserr_file_open cpu_to_be32(NFS4ERR_FILE_OPEN) +#define nfserr_xattr2big cpu_to_be32(NFS4ERR_XATTR2BIG) +#define nfserr_noxattr cpu_to_be32(NFS4ERR_NOXATTR) /* error codes for internal use */ /* if a request fails due to kmalloc failure, it gets dropped. @@ -387,7 +389,8 @@ void nfsd_lockd_shutdown(void); (NFSD4_1_SUPPORTED_ATTRS_WORD2 | \ FATTR4_WORD2_CHANGE_ATTR_TYPE | \ FATTR4_WORD2_MODE_UMASK | \ - NFSD4_2_SECURITY_ATTRS) + NFSD4_2_SECURITY_ATTRS | \ + FATTR4_WORD2_XATTR_SUPPORT) extern const u32 nfsd_suppattrs[3][3]; diff --git a/fs/nfsd/nfsfh.c b/fs/nfsd/nfsfh.c index 37bc8f5f4514..c81dbbad8792 100644 --- a/fs/nfsd/nfsfh.c +++ b/fs/nfsd/nfsfh.c @@ -459,7 +459,7 @@ static bool fsid_type_ok_for_exp(u8 fsid_type, struct svc_export *exp) case FSID_DEV: if (!old_valid_dev(exp_sb(exp)->s_dev)) return false; - /* FALL THROUGH */ + fallthrough; case FSID_MAJOR_MINOR: case FSID_ENCODE_DEV: return exp_sb(exp)->s_type->fs_flags & FS_REQUIRES_DEV; @@ -469,7 +469,7 @@ static bool fsid_type_ok_for_exp(u8 fsid_type, struct svc_export *exp) case FSID_UUID16: if (!is_root_export(exp)) return false; - /* fall through */ + fallthrough; case FSID_UUID4_INUM: case FSID_UUID16_INUM: return exp->ex_uuid != NULL; diff --git a/fs/nfsd/nfsproc.c b/fs/nfsd/nfsproc.c index 543bbe0a556e..6e0b066480c5 100644 --- a/fs/nfsd/nfsproc.c +++ b/fs/nfsd/nfsproc.c @@ -314,7 +314,7 @@ nfsd_proc_create(struct svc_rqst *rqstp) rdev = inode->i_rdev; attr->ia_valid |= ATTR_SIZE; - /* FALLTHROUGH */ + fallthrough; case S_IFIFO: /* this is probably a permission check.. * at least IRIX implements perm checking on diff --git a/fs/nfsd/nfssvc.c b/fs/nfsd/nfssvc.c index b603dfcdd361..f7f6473578af 100644 --- a/fs/nfsd/nfssvc.c +++ b/fs/nfsd/nfssvc.c @@ -221,7 +221,7 @@ int nfsd_vers(struct nfsd_net *nn, int vers, enum vers_op change) case NFSD_TEST: if (nn->nfsd_versions) return nn->nfsd_versions[vers]; - /* Fallthrough */ + fallthrough; case NFSD_AVAIL: return nfsd_support_version(vers); } diff --git a/fs/nfsd/vfs.c b/fs/nfsd/vfs.c index d22a056da477..aba5af9df328 100644 --- a/fs/nfsd/vfs.c +++ b/fs/nfsd/vfs.c @@ -612,6 +612,12 @@ static struct accessmap nfs3_regaccess[] = { { NFS3_ACCESS_MODIFY, NFSD_MAY_WRITE|NFSD_MAY_TRUNC }, { NFS3_ACCESS_EXTEND, NFSD_MAY_WRITE }, +#ifdef CONFIG_NFSD_V4 + { NFS4_ACCESS_XAREAD, NFSD_MAY_READ }, + { NFS4_ACCESS_XAWRITE, NFSD_MAY_WRITE }, + { NFS4_ACCESS_XALIST, NFSD_MAY_READ }, +#endif + { 0, 0 } }; @@ -622,6 +628,12 @@ static struct accessmap nfs3_diraccess[] = { { NFS3_ACCESS_EXTEND, NFSD_MAY_EXEC|NFSD_MAY_WRITE }, { NFS3_ACCESS_DELETE, NFSD_MAY_REMOVE }, +#ifdef CONFIG_NFSD_V4 + { NFS4_ACCESS_XAREAD, NFSD_MAY_READ }, + { NFS4_ACCESS_XAWRITE, NFSD_MAY_WRITE }, + { NFS4_ACCESS_XALIST, NFSD_MAY_READ }, +#endif + { 0, 0 } }; @@ -1444,7 +1456,7 @@ do_nfsd_create(struct svc_rqst *rqstp, struct svc_fh *fhp, *created = true; break; } - /* fall through */ + fallthrough; case NFS4_CREATE_EXCLUSIVE4_1: if ( d_inode(dchild)->i_mtime.tv_sec == v_mtime && d_inode(dchild)->i_atime.tv_sec == v_atime @@ -1453,7 +1465,7 @@ do_nfsd_create(struct svc_rqst *rqstp, struct svc_fh *fhp, *created = true; goto set_attr; } - /* fall through */ + fallthrough; case NFS3_CREATE_GUARDED: err = nfserr_exist; } @@ -2065,6 +2077,233 @@ static int exp_rdonly(struct svc_rqst *rqstp, struct svc_export *exp) return nfsexp_flags(rqstp, exp) & NFSEXP_READONLY; } +#ifdef CONFIG_NFSD_V4 +/* + * Helper function to translate error numbers. In the case of xattr operations, + * some error codes need to be translated outside of the standard translations. + * + * ENODATA needs to be translated to nfserr_noxattr. + * E2BIG to nfserr_xattr2big. + * + * Additionally, vfs_listxattr can return -ERANGE. This means that the + * file has too many extended attributes to retrieve inside an + * XATTR_LIST_MAX sized buffer. This is a bug in the xattr implementation: + * filesystems will allow the adding of extended attributes until they hit + * their own internal limit. This limit may be larger than XATTR_LIST_MAX. + * So, at that point, the attributes are present and valid, but can't + * be retrieved using listxattr, since the upper level xattr code enforces + * the XATTR_LIST_MAX limit. + * + * This bug means that we need to deal with listxattr returning -ERANGE. The + * best mapping is to return TOOSMALL. + */ +static __be32 +nfsd_xattr_errno(int err) +{ + switch (err) { + case -ENODATA: + return nfserr_noxattr; + case -E2BIG: + return nfserr_xattr2big; + case -ERANGE: + return nfserr_toosmall; + } + return nfserrno(err); +} + +/* + * Retrieve the specified user extended attribute. To avoid always + * having to allocate the maximum size (since we are not getting + * a maximum size from the RPC), do a probe + alloc. Hold a reader + * lock on i_rwsem to prevent the extended attribute from changing + * size while we're doing this. + */ +__be32 +nfsd_getxattr(struct svc_rqst *rqstp, struct svc_fh *fhp, char *name, + void **bufp, int *lenp) +{ + ssize_t len; + __be32 err; + char *buf; + struct inode *inode; + struct dentry *dentry; + + err = fh_verify(rqstp, fhp, 0, NFSD_MAY_READ); + if (err) + return err; + + err = nfs_ok; + dentry = fhp->fh_dentry; + inode = d_inode(dentry); + + inode_lock_shared(inode); + + len = vfs_getxattr(dentry, name, NULL, 0); + + /* + * Zero-length attribute, just return. + */ + if (len == 0) { + *bufp = NULL; + *lenp = 0; + goto out; + } + + if (len < 0) { + err = nfsd_xattr_errno(len); + goto out; + } + + if (len > *lenp) { + err = nfserr_toosmall; + goto out; + } + + buf = kvmalloc(len, GFP_KERNEL | GFP_NOFS); + if (buf == NULL) { + err = nfserr_jukebox; + goto out; + } + + len = vfs_getxattr(dentry, name, buf, len); + if (len <= 0) { + kvfree(buf); + buf = NULL; + err = nfsd_xattr_errno(len); + } + + *lenp = len; + *bufp = buf; + +out: + inode_unlock_shared(inode); + + return err; +} + +/* + * Retrieve the xattr names. Since we can't know how many are + * user extended attributes, we must get all attributes here, + * and have the XDR encode filter out the "user." ones. + * + * While this could always just allocate an XATTR_LIST_MAX + * buffer, that's a waste, so do a probe + allocate. To + * avoid any changes between the probe and allocate, wrap + * this in inode_lock. + */ +__be32 +nfsd_listxattr(struct svc_rqst *rqstp, struct svc_fh *fhp, char **bufp, + int *lenp) +{ + ssize_t len; + __be32 err; + char *buf; + struct inode *inode; + struct dentry *dentry; + + err = fh_verify(rqstp, fhp, 0, NFSD_MAY_READ); + if (err) + return err; + + dentry = fhp->fh_dentry; + inode = d_inode(dentry); + *lenp = 0; + + inode_lock_shared(inode); + + len = vfs_listxattr(dentry, NULL, 0); + if (len <= 0) { + err = nfsd_xattr_errno(len); + goto out; + } + + if (len > XATTR_LIST_MAX) { + err = nfserr_xattr2big; + goto out; + } + + /* + * We're holding i_rwsem - use GFP_NOFS. + */ + buf = kvmalloc(len, GFP_KERNEL | GFP_NOFS); + if (buf == NULL) { + err = nfserr_jukebox; + goto out; + } + + len = vfs_listxattr(dentry, buf, len); + if (len <= 0) { + kvfree(buf); + err = nfsd_xattr_errno(len); + goto out; + } + + *lenp = len; + *bufp = buf; + + err = nfs_ok; +out: + inode_unlock_shared(inode); + + return err; +} + +/* + * Removexattr and setxattr need to call fh_lock to both lock the inode + * and set the change attribute. Since the top-level vfs_removexattr + * and vfs_setxattr calls already do their own inode_lock calls, call + * the _locked variant. Pass in a NULL pointer for delegated_inode, + * and let the client deal with NFS4ERR_DELAY (same as with e.g. + * setattr and remove). + */ +__be32 +nfsd_removexattr(struct svc_rqst *rqstp, struct svc_fh *fhp, char *name) +{ + int err, ret; + + err = fh_verify(rqstp, fhp, 0, NFSD_MAY_WRITE); + if (err) + return err; + + ret = fh_want_write(fhp); + if (ret) + return nfserrno(ret); + + fh_lock(fhp); + + ret = __vfs_removexattr_locked(fhp->fh_dentry, name, NULL); + + fh_unlock(fhp); + fh_drop_write(fhp); + + return nfsd_xattr_errno(ret); +} + +__be32 +nfsd_setxattr(struct svc_rqst *rqstp, struct svc_fh *fhp, char *name, + void *buf, u32 len, u32 flags) +{ + int err, ret; + + err = fh_verify(rqstp, fhp, 0, NFSD_MAY_WRITE); + if (err) + return err; + + ret = fh_want_write(fhp); + if (ret) + return nfserrno(ret); + fh_lock(fhp); + + ret = __vfs_setxattr_locked(fhp->fh_dentry, name, buf, len, flags, + NULL); + + fh_unlock(fhp); + fh_drop_write(fhp); + + return nfsd_xattr_errno(ret); +} +#endif + /* * Check for a user's access permissions to this inode. */ diff --git a/fs/nfsd/vfs.h b/fs/nfsd/vfs.h index 3eb660ad80d1..a2442ebe5acf 100644 --- a/fs/nfsd/vfs.h +++ b/fs/nfsd/vfs.h @@ -76,6 +76,16 @@ __be32 do_nfsd_create(struct svc_rqst *, struct svc_fh *, __be32 nfsd_commit(struct svc_rqst *, struct svc_fh *, loff_t, unsigned long, __be32 *verf); #endif /* CONFIG_NFSD_V3 */ +#ifdef CONFIG_NFSD_V4 +__be32 nfsd_getxattr(struct svc_rqst *rqstp, struct svc_fh *fhp, + char *name, void **bufp, int *lenp); +__be32 nfsd_listxattr(struct svc_rqst *rqstp, struct svc_fh *fhp, + char **bufp, int *lenp); +__be32 nfsd_removexattr(struct svc_rqst *rqstp, struct svc_fh *fhp, + char *name); +__be32 nfsd_setxattr(struct svc_rqst *rqstp, struct svc_fh *fhp, + char *name, void *buf, u32 len, u32 flags); +#endif int nfsd_open_break_lease(struct inode *, int); __be32 nfsd_open(struct svc_rqst *, struct svc_fh *, umode_t, int, struct file **); diff --git a/fs/nfsd/xdr4.h b/fs/nfsd/xdr4.h index db63d39b1507..66499fb6b567 100644 --- a/fs/nfsd/xdr4.h +++ b/fs/nfsd/xdr4.h @@ -224,6 +224,32 @@ struct nfsd4_putfh { bool no_verify; /* represents foreigh fh */ }; +struct nfsd4_getxattr { + char *getxa_name; /* request */ + u32 getxa_len; /* request */ + void *getxa_buf; +}; + +struct nfsd4_setxattr { + u32 setxa_flags; /* request */ + char *setxa_name; /* request */ + char *setxa_buf; /* request */ + u32 setxa_len; /* request */ + struct nfsd4_change_info setxa_cinfo; /* response */ +}; + +struct nfsd4_removexattr { + char *rmxa_name; /* request */ + struct nfsd4_change_info rmxa_cinfo; /* response */ +}; + +struct nfsd4_listxattrs { + u64 lsxa_cookie; /* request */ + u32 lsxa_maxcount; /* request */ + char *lsxa_buf; /* unfiltered buffer (reply) */ + u32 lsxa_len; /* unfiltered len (reply) */ +}; + struct nfsd4_open { u32 op_claim_type; /* request */ struct xdr_netobj op_fname; /* request - everything but CLAIM_PREV */ @@ -649,6 +675,11 @@ struct nfsd4_op { struct nfsd4_offload_status offload_status; struct nfsd4_copy_notify copy_notify; struct nfsd4_seek seek; + + struct nfsd4_getxattr getxattr; + struct nfsd4_setxattr setxattr; + struct nfsd4_listxattrs listxattrs; + struct nfsd4_removexattr removexattr; } u; struct nfs4_replay * replay; }; diff --git a/fs/nilfs2/alloc.c b/fs/nilfs2/alloc.c index 235b959fc2b3..adf3bb0a8048 100644 --- a/fs/nilfs2/alloc.c +++ b/fs/nilfs2/alloc.c @@ -613,10 +613,10 @@ void nilfs_palloc_commit_free_entry(struct inode *inode, lock = nilfs_mdt_bgl_lock(inode, group); if (!nilfs_clear_bit_atomic(lock, group_offset, bitmap)) - nilfs_msg(inode->i_sb, KERN_WARNING, - "%s (ino=%lu): entry number %llu already freed", - __func__, inode->i_ino, - (unsigned long long)req->pr_entry_nr); + nilfs_warn(inode->i_sb, + "%s (ino=%lu): entry number %llu already freed", + __func__, inode->i_ino, + (unsigned long long)req->pr_entry_nr); else nilfs_palloc_group_desc_add_entries(desc, lock, 1); @@ -654,10 +654,10 @@ void nilfs_palloc_abort_alloc_entry(struct inode *inode, lock = nilfs_mdt_bgl_lock(inode, group); if (!nilfs_clear_bit_atomic(lock, group_offset, bitmap)) - nilfs_msg(inode->i_sb, KERN_WARNING, - "%s (ino=%lu): entry number %llu already freed", - __func__, inode->i_ino, - (unsigned long long)req->pr_entry_nr); + nilfs_warn(inode->i_sb, + "%s (ino=%lu): entry number %llu already freed", + __func__, inode->i_ino, + (unsigned long long)req->pr_entry_nr); else nilfs_palloc_group_desc_add_entries(desc, lock, 1); @@ -763,10 +763,10 @@ int nilfs_palloc_freev(struct inode *inode, __u64 *entry_nrs, size_t nitems) do { if (!nilfs_clear_bit_atomic(lock, group_offset, bitmap)) { - nilfs_msg(inode->i_sb, KERN_WARNING, - "%s (ino=%lu): entry number %llu already freed", - __func__, inode->i_ino, - (unsigned long long)entry_nrs[j]); + nilfs_warn(inode->i_sb, + "%s (ino=%lu): entry number %llu already freed", + __func__, inode->i_ino, + (unsigned long long)entry_nrs[j]); } else { n++; } @@ -808,10 +808,10 @@ int nilfs_palloc_freev(struct inode *inode, __u64 *entry_nrs, size_t nitems) ret = nilfs_palloc_delete_entry_block(inode, last_nrs[k]); if (ret && ret != -ENOENT) - nilfs_msg(inode->i_sb, KERN_WARNING, - "error %d deleting block that object (entry=%llu, ino=%lu) belongs to", - ret, (unsigned long long)last_nrs[k], - inode->i_ino); + nilfs_warn(inode->i_sb, + "error %d deleting block that object (entry=%llu, ino=%lu) belongs to", + ret, (unsigned long long)last_nrs[k], + inode->i_ino); } desc_kaddr = kmap_atomic(desc_bh->b_page); @@ -826,9 +826,9 @@ int nilfs_palloc_freev(struct inode *inode, __u64 *entry_nrs, size_t nitems) if (nfree == nilfs_palloc_entries_per_group(inode)) { ret = nilfs_palloc_delete_bitmap_block(inode, group); if (ret && ret != -ENOENT) - nilfs_msg(inode->i_sb, KERN_WARNING, - "error %d deleting bitmap block of group=%lu, ino=%lu", - ret, group, inode->i_ino); + nilfs_warn(inode->i_sb, + "error %d deleting bitmap block of group=%lu, ino=%lu", + ret, group, inode->i_ino); } } return 0; diff --git a/fs/nilfs2/bmap.c b/fs/nilfs2/bmap.c index fb5a9a8a13cf..e516ae389ca5 100644 --- a/fs/nilfs2/bmap.c +++ b/fs/nilfs2/bmap.c @@ -519,7 +519,7 @@ int nilfs_bmap_read(struct nilfs_bmap *bmap, struct nilfs_inode *raw_inode) break; case NILFS_IFILE_INO: lockdep_set_class(&bmap->b_sem, &nilfs_bmap_mdt_lock_key); - /* Fall through */ + fallthrough; default: bmap->b_ptr_type = NILFS_BMAP_PTR_VM; bmap->b_last_allocated_key = 0; diff --git a/fs/nilfs2/btree.c b/fs/nilfs2/btree.c index 23e043eca237..f42ab57201e7 100644 --- a/fs/nilfs2/btree.c +++ b/fs/nilfs2/btree.c @@ -351,10 +351,10 @@ static int nilfs_btree_node_broken(const struct nilfs_btree_node *node, (flags & NILFS_BTREE_NODE_ROOT) || nchildren < 0 || nchildren > NILFS_BTREE_NODE_NCHILDREN_MAX(size))) { - nilfs_msg(inode->i_sb, KERN_CRIT, - "bad btree node (ino=%lu, blocknr=%llu): level = %d, flags = 0x%x, nchildren = %d", - inode->i_ino, (unsigned long long)blocknr, level, - flags, nchildren); + nilfs_crit(inode->i_sb, + "bad btree node (ino=%lu, blocknr=%llu): level = %d, flags = 0x%x, nchildren = %d", + inode->i_ino, (unsigned long long)blocknr, level, + flags, nchildren); ret = 1; } return ret; @@ -381,9 +381,9 @@ static int nilfs_btree_root_broken(const struct nilfs_btree_node *node, level >= NILFS_BTREE_LEVEL_MAX || nchildren < 0 || nchildren > NILFS_BTREE_ROOT_NCHILDREN_MAX)) { - nilfs_msg(inode->i_sb, KERN_CRIT, - "bad btree root (ino=%lu): level = %d, flags = 0x%x, nchildren = %d", - inode->i_ino, level, flags, nchildren); + nilfs_crit(inode->i_sb, + "bad btree root (ino=%lu): level = %d, flags = 0x%x, nchildren = %d", + inode->i_ino, level, flags, nchildren); ret = 1; } return ret; @@ -450,10 +450,10 @@ static int nilfs_btree_bad_node(const struct nilfs_bmap *btree, { if (unlikely(nilfs_btree_node_get_level(node) != level)) { dump_stack(); - nilfs_msg(btree->b_inode->i_sb, KERN_CRIT, - "btree level mismatch (ino=%lu): %d != %d", - btree->b_inode->i_ino, - nilfs_btree_node_get_level(node), level); + nilfs_crit(btree->b_inode->i_sb, + "btree level mismatch (ino=%lu): %d != %d", + btree->b_inode->i_ino, + nilfs_btree_node_get_level(node), level); return 1; } return 0; @@ -508,7 +508,7 @@ static int __nilfs_btree_get_block(const struct nilfs_bmap *btree, __u64 ptr, out_no_wait: if (!buffer_uptodate(bh)) { - nilfs_msg(btree->b_inode->i_sb, KERN_ERR, + nilfs_err(btree->b_inode->i_sb, "I/O error reading b-tree node block (ino=%lu, blocknr=%llu)", btree->b_inode->i_ino, (unsigned long long)ptr); brelse(bh); @@ -2074,10 +2074,10 @@ static int nilfs_btree_propagate(struct nilfs_bmap *btree, ret = nilfs_btree_do_lookup(btree, path, key, NULL, level + 1, 0); if (ret < 0) { if (unlikely(ret == -ENOENT)) - nilfs_msg(btree->b_inode->i_sb, KERN_CRIT, - "writing node/leaf block does not appear in b-tree (ino=%lu) at key=%llu, level=%d", - btree->b_inode->i_ino, - (unsigned long long)key, level); + nilfs_crit(btree->b_inode->i_sb, + "writing node/leaf block does not appear in b-tree (ino=%lu) at key=%llu, level=%d", + btree->b_inode->i_ino, + (unsigned long long)key, level); goto out; } @@ -2114,11 +2114,11 @@ static void nilfs_btree_add_dirty_buffer(struct nilfs_bmap *btree, if (level < NILFS_BTREE_LEVEL_NODE_MIN || level >= NILFS_BTREE_LEVEL_MAX) { dump_stack(); - nilfs_msg(btree->b_inode->i_sb, KERN_WARNING, - "invalid btree level: %d (key=%llu, ino=%lu, blocknr=%llu)", - level, (unsigned long long)key, - btree->b_inode->i_ino, - (unsigned long long)bh->b_blocknr); + nilfs_warn(btree->b_inode->i_sb, + "invalid btree level: %d (key=%llu, ino=%lu, blocknr=%llu)", + level, (unsigned long long)key, + btree->b_inode->i_ino, + (unsigned long long)bh->b_blocknr); return; } diff --git a/fs/nilfs2/cpfile.c b/fs/nilfs2/cpfile.c index 8d41311b5db4..86d4d850d130 100644 --- a/fs/nilfs2/cpfile.c +++ b/fs/nilfs2/cpfile.c @@ -322,7 +322,7 @@ int nilfs_cpfile_delete_checkpoints(struct inode *cpfile, int ret, ncps, nicps, nss, count, i; if (unlikely(start == 0 || start > end)) { - nilfs_msg(cpfile->i_sb, KERN_ERR, + nilfs_err(cpfile->i_sb, "cannot delete checkpoints: invalid range [%llu, %llu)", (unsigned long long)start, (unsigned long long)end); return -EINVAL; @@ -376,7 +376,7 @@ int nilfs_cpfile_delete_checkpoints(struct inode *cpfile, cpfile, cno); if (ret == 0) continue; - nilfs_msg(cpfile->i_sb, KERN_ERR, + nilfs_err(cpfile->i_sb, "error %d deleting checkpoint block", ret); break; @@ -981,12 +981,10 @@ int nilfs_cpfile_read(struct super_block *sb, size_t cpsize, int err; if (cpsize > sb->s_blocksize) { - nilfs_msg(sb, KERN_ERR, - "too large checkpoint size: %zu bytes", cpsize); + nilfs_err(sb, "too large checkpoint size: %zu bytes", cpsize); return -EINVAL; } else if (cpsize < NILFS_MIN_CHECKPOINT_SIZE) { - nilfs_msg(sb, KERN_ERR, - "too small checkpoint size: %zu bytes", cpsize); + nilfs_err(sb, "too small checkpoint size: %zu bytes", cpsize); return -EINVAL; } diff --git a/fs/nilfs2/dat.c b/fs/nilfs2/dat.c index 6f4066636be9..8bccdf1158fc 100644 --- a/fs/nilfs2/dat.c +++ b/fs/nilfs2/dat.c @@ -340,11 +340,11 @@ int nilfs_dat_move(struct inode *dat, __u64 vblocknr, sector_t blocknr) kaddr = kmap_atomic(entry_bh->b_page); entry = nilfs_palloc_block_get_entry(dat, vblocknr, entry_bh, kaddr); if (unlikely(entry->de_blocknr == cpu_to_le64(0))) { - nilfs_msg(dat->i_sb, KERN_CRIT, - "%s: invalid vblocknr = %llu, [%llu, %llu)", - __func__, (unsigned long long)vblocknr, - (unsigned long long)le64_to_cpu(entry->de_start), - (unsigned long long)le64_to_cpu(entry->de_end)); + nilfs_crit(dat->i_sb, + "%s: invalid vblocknr = %llu, [%llu, %llu)", + __func__, (unsigned long long)vblocknr, + (unsigned long long)le64_to_cpu(entry->de_start), + (unsigned long long)le64_to_cpu(entry->de_end)); kunmap_atomic(kaddr); brelse(entry_bh); return -EINVAL; @@ -471,11 +471,11 @@ int nilfs_dat_read(struct super_block *sb, size_t entry_size, int err; if (entry_size > sb->s_blocksize) { - nilfs_msg(sb, KERN_ERR, "too large DAT entry size: %zu bytes", + nilfs_err(sb, "too large DAT entry size: %zu bytes", entry_size); return -EINVAL; } else if (entry_size < NILFS_MIN_DAT_ENTRY_SIZE) { - nilfs_msg(sb, KERN_ERR, "too small DAT entry size: %zu bytes", + nilfs_err(sb, "too small DAT entry size: %zu bytes", entry_size); return -EINVAL; } diff --git a/fs/nilfs2/direct.c b/fs/nilfs2/direct.c index 533e24ea3a88..f353101955e3 100644 --- a/fs/nilfs2/direct.c +++ b/fs/nilfs2/direct.c @@ -328,16 +328,18 @@ static int nilfs_direct_assign(struct nilfs_bmap *bmap, key = nilfs_bmap_data_get_key(bmap, *bh); if (unlikely(key > NILFS_DIRECT_KEY_MAX)) { - nilfs_msg(bmap->b_inode->i_sb, KERN_CRIT, - "%s (ino=%lu): invalid key: %llu", __func__, - bmap->b_inode->i_ino, (unsigned long long)key); + nilfs_crit(bmap->b_inode->i_sb, + "%s (ino=%lu): invalid key: %llu", + __func__, + bmap->b_inode->i_ino, (unsigned long long)key); return -EINVAL; } ptr = nilfs_direct_get_ptr(bmap, key); if (unlikely(ptr == NILFS_BMAP_INVALID_PTR)) { - nilfs_msg(bmap->b_inode->i_sb, KERN_CRIT, - "%s (ino=%lu): invalid pointer: %llu", __func__, - bmap->b_inode->i_ino, (unsigned long long)ptr); + nilfs_crit(bmap->b_inode->i_sb, + "%s (ino=%lu): invalid pointer: %llu", + __func__, + bmap->b_inode->i_ino, (unsigned long long)ptr); return -EINVAL; } diff --git a/fs/nilfs2/gcinode.c b/fs/nilfs2/gcinode.c index aa3c328ee189..448320496856 100644 --- a/fs/nilfs2/gcinode.c +++ b/fs/nilfs2/gcinode.c @@ -142,7 +142,7 @@ int nilfs_gccache_wait_and_mark_dirty(struct buffer_head *bh) if (!buffer_uptodate(bh)) { struct inode *inode = bh->b_page->mapping->host; - nilfs_msg(inode->i_sb, KERN_ERR, + nilfs_err(inode->i_sb, "I/O error reading %s block for GC (ino=%lu, vblocknr=%llu)", buffer_nilfs_node(bh) ? "node" : "data", inode->i_ino, (unsigned long long)bh->b_blocknr); diff --git a/fs/nilfs2/ifile.c b/fs/nilfs2/ifile.c index 4140d232cadc..02727ed3a7c6 100644 --- a/fs/nilfs2/ifile.c +++ b/fs/nilfs2/ifile.c @@ -142,8 +142,8 @@ int nilfs_ifile_get_inode_block(struct inode *ifile, ino_t ino, err = nilfs_palloc_get_entry_block(ifile, ino, 0, out_bh); if (unlikely(err)) - nilfs_msg(sb, KERN_WARNING, "error %d reading inode: ino=%lu", - err, (unsigned long)ino); + nilfs_warn(sb, "error %d reading inode: ino=%lu", + err, (unsigned long)ino); return err; } diff --git a/fs/nilfs2/inode.c b/fs/nilfs2/inode.c index 28009ec54420..745d371d6fea 100644 --- a/fs/nilfs2/inode.c +++ b/fs/nilfs2/inode.c @@ -104,10 +104,10 @@ int nilfs_get_block(struct inode *inode, sector_t blkoff, * However, the page having this block must * be locked in this case. */ - nilfs_msg(inode->i_sb, KERN_WARNING, - "%s (ino=%lu): a race condition while inserting a data block at offset=%llu", - __func__, inode->i_ino, - (unsigned long long)blkoff); + nilfs_warn(inode->i_sb, + "%s (ino=%lu): a race condition while inserting a data block at offset=%llu", + __func__, inode->i_ino, + (unsigned long long)blkoff); err = 0; } nilfs_transaction_abort(inode->i_sb); @@ -388,7 +388,8 @@ struct inode *nilfs_new_inode(struct inode *dir, umode_t mode) failed_after_creation: clear_nlink(inode); - unlock_new_inode(inode); + if (inode->i_state & I_NEW) + unlock_new_inode(inode); iput(inode); /* * raw_inode will be deleted through * nilfs_evict_inode(). @@ -706,9 +707,8 @@ repeat: goto repeat; failed: - nilfs_msg(ii->vfs_inode.i_sb, KERN_WARNING, - "error %d truncating bmap (ino=%lu)", ret, - ii->vfs_inode.i_ino); + nilfs_warn(ii->vfs_inode.i_sb, "error %d truncating bmap (ino=%lu)", + ret, ii->vfs_inode.i_ino); } void nilfs_truncate(struct inode *inode) @@ -919,9 +919,9 @@ int nilfs_set_file_dirty(struct inode *inode, unsigned int nr_dirty) * This will happen when somebody is freeing * this inode. */ - nilfs_msg(inode->i_sb, KERN_WARNING, - "cannot set file dirty (ino=%lu): the file is being freed", - inode->i_ino); + nilfs_warn(inode->i_sb, + "cannot set file dirty (ino=%lu): the file is being freed", + inode->i_ino); spin_unlock(&nilfs->ns_inode_lock); return -EINVAL; /* * NILFS_I_DIRTY may remain for @@ -942,9 +942,9 @@ int __nilfs_mark_inode_dirty(struct inode *inode, int flags) err = nilfs_load_inode_block(inode, &ibh); if (unlikely(err)) { - nilfs_msg(inode->i_sb, KERN_WARNING, - "cannot mark inode dirty (ino=%lu): error %d loading inode block", - inode->i_ino, err); + nilfs_warn(inode->i_sb, + "cannot mark inode dirty (ino=%lu): error %d loading inode block", + inode->i_ino, err); return err; } nilfs_update_inode(inode, ibh, flags); @@ -970,8 +970,8 @@ void nilfs_dirty_inode(struct inode *inode, int flags) struct nilfs_mdt_info *mdi = NILFS_MDT(inode); if (is_bad_inode(inode)) { - nilfs_msg(inode->i_sb, KERN_WARNING, - "tried to mark bad_inode dirty. ignored."); + nilfs_warn(inode->i_sb, + "tried to mark bad_inode dirty. ignored."); dump_stack(); return; } diff --git a/fs/nilfs2/ioctl.c b/fs/nilfs2/ioctl.c index 4ba73dbf3e8d..07d26f61f22a 100644 --- a/fs/nilfs2/ioctl.c +++ b/fs/nilfs2/ioctl.c @@ -569,25 +569,25 @@ static int nilfs_ioctl_move_inode_block(struct inode *inode, if (unlikely(ret < 0)) { if (ret == -ENOENT) - nilfs_msg(inode->i_sb, KERN_CRIT, - "%s: invalid virtual block address (%s): ino=%llu, cno=%llu, offset=%llu, blocknr=%llu, vblocknr=%llu", - __func__, vdesc->vd_flags ? "node" : "data", - (unsigned long long)vdesc->vd_ino, - (unsigned long long)vdesc->vd_cno, - (unsigned long long)vdesc->vd_offset, - (unsigned long long)vdesc->vd_blocknr, - (unsigned long long)vdesc->vd_vblocknr); + nilfs_crit(inode->i_sb, + "%s: invalid virtual block address (%s): ino=%llu, cno=%llu, offset=%llu, blocknr=%llu, vblocknr=%llu", + __func__, vdesc->vd_flags ? "node" : "data", + (unsigned long long)vdesc->vd_ino, + (unsigned long long)vdesc->vd_cno, + (unsigned long long)vdesc->vd_offset, + (unsigned long long)vdesc->vd_blocknr, + (unsigned long long)vdesc->vd_vblocknr); return ret; } if (unlikely(!list_empty(&bh->b_assoc_buffers))) { - nilfs_msg(inode->i_sb, KERN_CRIT, - "%s: conflicting %s buffer: ino=%llu, cno=%llu, offset=%llu, blocknr=%llu, vblocknr=%llu", - __func__, vdesc->vd_flags ? "node" : "data", - (unsigned long long)vdesc->vd_ino, - (unsigned long long)vdesc->vd_cno, - (unsigned long long)vdesc->vd_offset, - (unsigned long long)vdesc->vd_blocknr, - (unsigned long long)vdesc->vd_vblocknr); + nilfs_crit(inode->i_sb, + "%s: conflicting %s buffer: ino=%llu, cno=%llu, offset=%llu, blocknr=%llu, vblocknr=%llu", + __func__, vdesc->vd_flags ? "node" : "data", + (unsigned long long)vdesc->vd_ino, + (unsigned long long)vdesc->vd_cno, + (unsigned long long)vdesc->vd_offset, + (unsigned long long)vdesc->vd_blocknr, + (unsigned long long)vdesc->vd_vblocknr); brelse(bh); return -EEXIST; } @@ -837,8 +837,7 @@ int nilfs_ioctl_prepare_clean_segments(struct the_nilfs *nilfs, return 0; failed: - nilfs_msg(nilfs->ns_sb, KERN_ERR, "error %d preparing GC: %s", ret, - msg); + nilfs_err(nilfs->ns_sb, "error %d preparing GC: %s", ret, msg); return ret; } @@ -947,7 +946,7 @@ static int nilfs_ioctl_clean_segments(struct inode *inode, struct file *filp, ret = nilfs_ioctl_move_blocks(inode->i_sb, &argv[0], kbufs[0]); if (ret < 0) { - nilfs_msg(inode->i_sb, KERN_ERR, + nilfs_err(inode->i_sb, "error %d preparing GC: cannot read source blocks", ret); } else { diff --git a/fs/nilfs2/mdt.c b/fs/nilfs2/mdt.c index 700870a92bc4..c0361ce45f62 100644 --- a/fs/nilfs2/mdt.c +++ b/fs/nilfs2/mdt.c @@ -199,7 +199,7 @@ static int nilfs_mdt_read_block(struct inode *inode, unsigned long block, out_no_wait: err = -EIO; if (!buffer_uptodate(first_bh)) { - nilfs_msg(inode->i_sb, KERN_ERR, + nilfs_err(inode->i_sb, "I/O error reading meta-data file (ino=%lu, block-offset=%lu)", inode->i_ino, block); goto failed_bh; diff --git a/fs/nilfs2/namei.c b/fs/nilfs2/namei.c index 9fe6d4ab74f0..a6ec7961d4f5 100644 --- a/fs/nilfs2/namei.c +++ b/fs/nilfs2/namei.c @@ -272,9 +272,9 @@ static int nilfs_do_unlink(struct inode *dir, struct dentry *dentry) goto out; if (!inode->i_nlink) { - nilfs_msg(inode->i_sb, KERN_WARNING, - "deleting nonexistent file (ino=%lu), %d", - inode->i_ino, inode->i_nlink); + nilfs_warn(inode->i_sb, + "deleting nonexistent file (ino=%lu), %d", + inode->i_ino, inode->i_nlink); set_nlink(inode, 1); } err = nilfs_delete_entry(de, page); diff --git a/fs/nilfs2/nilfs.h b/fs/nilfs2/nilfs.h index 42395ba52da6..f8450ee3fd06 100644 --- a/fs/nilfs2/nilfs.h +++ b/fs/nilfs2/nilfs.h @@ -289,9 +289,8 @@ static inline int nilfs_mark_inode_dirty_sync(struct inode *inode) /* super.c */ extern struct inode *nilfs_alloc_inode(struct super_block *); -extern __printf(3, 4) -void __nilfs_msg(struct super_block *sb, const char *level, - const char *fmt, ...); +__printf(2, 3) +void __nilfs_msg(struct super_block *sb, const char *fmt, ...); extern __printf(3, 4) void __nilfs_error(struct super_block *sb, const char *function, const char *fmt, ...); @@ -299,7 +298,7 @@ void __nilfs_error(struct super_block *sb, const char *function, #ifdef CONFIG_PRINTK #define nilfs_msg(sb, level, fmt, ...) \ - __nilfs_msg(sb, level, fmt, ##__VA_ARGS__) + __nilfs_msg(sb, level fmt, ##__VA_ARGS__) #define nilfs_error(sb, fmt, ...) \ __nilfs_error(sb, __func__, fmt, ##__VA_ARGS__) @@ -307,7 +306,7 @@ void __nilfs_error(struct super_block *sb, const char *function, #define nilfs_msg(sb, level, fmt, ...) \ do { \ - no_printk(fmt, ##__VA_ARGS__); \ + no_printk(level fmt, ##__VA_ARGS__); \ (void)(sb); \ } while (0) #define nilfs_error(sb, fmt, ...) \ @@ -318,6 +317,15 @@ void __nilfs_error(struct super_block *sb, const char *function, #endif /* CONFIG_PRINTK */ +#define nilfs_crit(sb, fmt, ...) \ + nilfs_msg(sb, KERN_CRIT, fmt, ##__VA_ARGS__) +#define nilfs_err(sb, fmt, ...) \ + nilfs_msg(sb, KERN_ERR, fmt, ##__VA_ARGS__) +#define nilfs_warn(sb, fmt, ...) \ + nilfs_msg(sb, KERN_WARNING, fmt, ##__VA_ARGS__) +#define nilfs_info(sb, fmt, ...) \ + nilfs_msg(sb, KERN_INFO, fmt, ##__VA_ARGS__) + extern struct nilfs_super_block * nilfs_read_super_block(struct super_block *, u64, int, struct buffer_head **); extern int nilfs_store_magic_and_option(struct super_block *, diff --git a/fs/nilfs2/page.c b/fs/nilfs2/page.c index d7fc8d369d89..b175f1330408 100644 --- a/fs/nilfs2/page.c +++ b/fs/nilfs2/page.c @@ -391,9 +391,8 @@ void nilfs_clear_dirty_page(struct page *page, bool silent) BUG_ON(!PageLocked(page)); if (!silent) - nilfs_msg(sb, KERN_WARNING, - "discard dirty page: offset=%lld, ino=%lu", - page_offset(page), inode->i_ino); + nilfs_warn(sb, "discard dirty page: offset=%lld, ino=%lu", + page_offset(page), inode->i_ino); ClearPageUptodate(page); ClearPageMappedToDisk(page); @@ -409,9 +408,9 @@ void nilfs_clear_dirty_page(struct page *page, bool silent) do { lock_buffer(bh); if (!silent) - nilfs_msg(sb, KERN_WARNING, - "discard dirty block: blocknr=%llu, size=%zu", - (u64)bh->b_blocknr, bh->b_size); + nilfs_warn(sb, + "discard dirty block: blocknr=%llu, size=%zu", + (u64)bh->b_blocknr, bh->b_size); set_mask_bits(&bh->b_state, clear_bits, 0); unlock_buffer(bh); diff --git a/fs/nilfs2/recovery.c b/fs/nilfs2/recovery.c index 140b663e91c7..2217f904a7cf 100644 --- a/fs/nilfs2/recovery.c +++ b/fs/nilfs2/recovery.c @@ -51,7 +51,7 @@ static int nilfs_warn_segment_error(struct super_block *sb, int err) switch (err) { case NILFS_SEG_FAIL_IO: - nilfs_msg(sb, KERN_ERR, "I/O error reading segment"); + nilfs_err(sb, "I/O error reading segment"); return -EIO; case NILFS_SEG_FAIL_MAGIC: msg = "Magic number mismatch"; @@ -72,10 +72,10 @@ static int nilfs_warn_segment_error(struct super_block *sb, int err) msg = "No super root in the last segment"; break; default: - nilfs_msg(sb, KERN_ERR, "unrecognized segment error %d", err); + nilfs_err(sb, "unrecognized segment error %d", err); return -EINVAL; } - nilfs_msg(sb, KERN_WARNING, "invalid segment: %s", msg); + nilfs_warn(sb, "invalid segment: %s", msg); return -EINVAL; } @@ -543,10 +543,10 @@ static int nilfs_recover_dsync_blocks(struct the_nilfs *nilfs, put_page(page); failed_inode: - nilfs_msg(sb, KERN_WARNING, - "error %d recovering data block (ino=%lu, block-offset=%llu)", - err, (unsigned long)rb->ino, - (unsigned long long)rb->blkoff); + nilfs_warn(sb, + "error %d recovering data block (ino=%lu, block-offset=%llu)", + err, (unsigned long)rb->ino, + (unsigned long long)rb->blkoff); if (!err2) err2 = err; next: @@ -626,7 +626,7 @@ static int nilfs_do_roll_forward(struct the_nilfs *nilfs, !(flags & NILFS_SS_SYNDT)) goto try_next_pseg; state = RF_DSYNC_ST; - /* Fall through */ + fallthrough; case RF_DSYNC_ST: if (!(flags & NILFS_SS_SYNDT)) goto confused; @@ -669,8 +669,7 @@ static int nilfs_do_roll_forward(struct the_nilfs *nilfs, } if (nsalvaged_blocks) { - nilfs_msg(sb, KERN_INFO, "salvaged %lu blocks", - nsalvaged_blocks); + nilfs_info(sb, "salvaged %lu blocks", nsalvaged_blocks); ri->ri_need_recovery = NILFS_RECOVERY_ROLLFORWARD_DONE; } out: @@ -681,7 +680,7 @@ static int nilfs_do_roll_forward(struct the_nilfs *nilfs, confused: err = -EINVAL; failed: - nilfs_msg(sb, KERN_ERR, + nilfs_err(sb, "error %d roll-forwarding partial segment at blocknr = %llu", err, (unsigned long long)pseg_start); goto out; @@ -703,8 +702,8 @@ static void nilfs_finish_roll_forward(struct the_nilfs *nilfs, set_buffer_dirty(bh); err = sync_dirty_buffer(bh); if (unlikely(err)) - nilfs_msg(nilfs->ns_sb, KERN_WARNING, - "buffer sync write failed during post-cleaning of recovery."); + nilfs_warn(nilfs->ns_sb, + "buffer sync write failed during post-cleaning of recovery."); brelse(bh); } @@ -739,8 +738,7 @@ int nilfs_salvage_orphan_logs(struct the_nilfs *nilfs, err = nilfs_attach_checkpoint(sb, ri->ri_cno, true, &root); if (unlikely(err)) { - nilfs_msg(sb, KERN_ERR, - "error %d loading the latest checkpoint", err); + nilfs_err(sb, "error %d loading the latest checkpoint", err); return err; } @@ -751,8 +749,7 @@ int nilfs_salvage_orphan_logs(struct the_nilfs *nilfs, if (ri->ri_need_recovery == NILFS_RECOVERY_ROLLFORWARD_DONE) { err = nilfs_prepare_segment_for_recovery(nilfs, sb, ri); if (unlikely(err)) { - nilfs_msg(sb, KERN_ERR, - "error %d preparing segment for recovery", + nilfs_err(sb, "error %d preparing segment for recovery", err); goto failed; } @@ -766,8 +763,7 @@ int nilfs_salvage_orphan_logs(struct the_nilfs *nilfs, nilfs_detach_log_writer(sb); if (unlikely(err)) { - nilfs_msg(sb, KERN_ERR, - "error %d writing segment for recovery", + nilfs_err(sb, "error %d writing segment for recovery", err); goto failed; } diff --git a/fs/nilfs2/segbuf.c b/fs/nilfs2/segbuf.c index 20c479b5e41b..1a8729eded8b 100644 --- a/fs/nilfs2/segbuf.c +++ b/fs/nilfs2/segbuf.c @@ -505,7 +505,7 @@ static int nilfs_segbuf_wait(struct nilfs_segment_buffer *segbuf) } while (--segbuf->sb_nbio > 0); if (unlikely(atomic_read(&segbuf->sb_err) > 0)) { - nilfs_msg(segbuf->sb_super, KERN_ERR, + nilfs_err(segbuf->sb_super, "I/O error writing log (start-blocknr=%llu, block-count=%lu) in segment %llu", (unsigned long long)segbuf->sb_pseg_start, segbuf->sb_sum.nblocks, diff --git a/fs/nilfs2/segment.c b/fs/nilfs2/segment.c index 91b58c897f92..e3726aca28ed 100644 --- a/fs/nilfs2/segment.c +++ b/fs/nilfs2/segment.c @@ -158,7 +158,7 @@ static int nilfs_prepare_segment_lock(struct super_block *sb, * it is saved and will be restored on * nilfs_transaction_commit(). */ - nilfs_msg(sb, KERN_WARNING, "journal info from a different FS"); + nilfs_warn(sb, "journal info from a different FS"); save = current->journal_info; } if (!ti) { @@ -1138,7 +1138,8 @@ static int nilfs_segctor_collect_blocks(struct nilfs_sc_info *sci, int mode) nilfs_sc_cstage_set(sci, NILFS_ST_DAT); goto dat_stage; } - nilfs_sc_cstage_inc(sci); /* Fall through */ + nilfs_sc_cstage_inc(sci); + fallthrough; case NILFS_ST_GC: if (nilfs_doing_gc()) { head = &sci->sc_gc_inodes; @@ -1159,7 +1160,8 @@ static int nilfs_segctor_collect_blocks(struct nilfs_sc_info *sci, int mode) } sci->sc_stage.gc_inode_ptr = NULL; } - nilfs_sc_cstage_inc(sci); /* Fall through */ + nilfs_sc_cstage_inc(sci); + fallthrough; case NILFS_ST_FILE: head = &sci->sc_dirty_files; ii = list_prepare_entry(sci->sc_stage.dirty_file_ptr, head, @@ -1186,7 +1188,7 @@ static int nilfs_segctor_collect_blocks(struct nilfs_sc_info *sci, int mode) } nilfs_sc_cstage_inc(sci); sci->sc_stage.flags |= NILFS_CF_IFILE_STARTED; - /* Fall through */ + fallthrough; case NILFS_ST_IFILE: err = nilfs_segctor_scan_file(sci, sci->sc_root->ifile, &nilfs_sc_file_ops); @@ -1197,13 +1199,14 @@ static int nilfs_segctor_collect_blocks(struct nilfs_sc_info *sci, int mode) err = nilfs_segctor_create_checkpoint(sci); if (unlikely(err)) break; - /* Fall through */ + fallthrough; case NILFS_ST_CPFILE: err = nilfs_segctor_scan_file(sci, nilfs->ns_cpfile, &nilfs_sc_file_ops); if (unlikely(err)) break; - nilfs_sc_cstage_inc(sci); /* Fall through */ + nilfs_sc_cstage_inc(sci); + fallthrough; case NILFS_ST_SUFILE: err = nilfs_sufile_freev(nilfs->ns_sufile, sci->sc_freesegs, sci->sc_nfreesegs, &ndone); @@ -1219,7 +1222,8 @@ static int nilfs_segctor_collect_blocks(struct nilfs_sc_info *sci, int mode) &nilfs_sc_file_ops); if (unlikely(err)) break; - nilfs_sc_cstage_inc(sci); /* Fall through */ + nilfs_sc_cstage_inc(sci); + fallthrough; case NILFS_ST_DAT: dat_stage: err = nilfs_segctor_scan_file(sci, nilfs->ns_dat, @@ -1230,7 +1234,8 @@ static int nilfs_segctor_collect_blocks(struct nilfs_sc_info *sci, int mode) nilfs_sc_cstage_set(sci, NILFS_ST_DONE); return 0; } - nilfs_sc_cstage_inc(sci); /* Fall through */ + nilfs_sc_cstage_inc(sci); + fallthrough; case NILFS_ST_SR: if (mode == SC_LSEG_SR) { /* Appending a super root */ @@ -1940,9 +1945,9 @@ static int nilfs_segctor_collect_dirty_files(struct nilfs_sc_info *sci, err = nilfs_ifile_get_inode_block( ifile, ii->vfs_inode.i_ino, &ibh); if (unlikely(err)) { - nilfs_msg(sci->sc_super, KERN_WARNING, - "log writer: error %d getting inode block (ino=%lu)", - err, ii->vfs_inode.i_ino); + nilfs_warn(sci->sc_super, + "log writer: error %d getting inode block (ino=%lu)", + err, ii->vfs_inode.i_ino); return err; } spin_lock(&nilfs->ns_inode_lock); @@ -2449,7 +2454,7 @@ int nilfs_clean_segments(struct super_block *sb, struct nilfs_argv *argv, if (likely(!err)) break; - nilfs_msg(sb, KERN_WARNING, "error %d cleaning segments", err); + nilfs_warn(sb, "error %d cleaning segments", err); set_current_state(TASK_INTERRUPTIBLE); schedule_timeout(sci->sc_interval); } @@ -2457,9 +2462,9 @@ int nilfs_clean_segments(struct super_block *sb, struct nilfs_argv *argv, int ret = nilfs_discard_segments(nilfs, sci->sc_freesegs, sci->sc_nfreesegs); if (ret) { - nilfs_msg(sb, KERN_WARNING, - "error %d on discard request, turning discards off for the device", - ret); + nilfs_warn(sb, + "error %d on discard request, turning discards off for the device", + ret); nilfs_clear_opt(nilfs, DISCARD); } } @@ -2540,9 +2545,9 @@ static int nilfs_segctor_thread(void *arg) /* start sync. */ sci->sc_task = current; wake_up(&sci->sc_wait_task); /* for nilfs_segctor_start_thread() */ - nilfs_msg(sci->sc_super, KERN_INFO, - "segctord starting. Construction interval = %lu seconds, CP frequency < %lu seconds", - sci->sc_interval / HZ, sci->sc_mjcp_freq / HZ); + nilfs_info(sci->sc_super, + "segctord starting. Construction interval = %lu seconds, CP frequency < %lu seconds", + sci->sc_interval / HZ, sci->sc_mjcp_freq / HZ); spin_lock(&sci->sc_state_lock); loop: @@ -2616,8 +2621,8 @@ static int nilfs_segctor_start_thread(struct nilfs_sc_info *sci) if (IS_ERR(t)) { int err = PTR_ERR(t); - nilfs_msg(sci->sc_super, KERN_ERR, - "error %d creating segctord thread", err); + nilfs_err(sci->sc_super, "error %d creating segctord thread", + err); return err; } wait_event(sci->sc_wait_task, sci->sc_task != NULL); @@ -2727,14 +2732,14 @@ static void nilfs_segctor_destroy(struct nilfs_sc_info *sci) nilfs_segctor_write_out(sci); if (!list_empty(&sci->sc_dirty_files)) { - nilfs_msg(sci->sc_super, KERN_WARNING, - "disposed unprocessed dirty file(s) when stopping log writer"); + nilfs_warn(sci->sc_super, + "disposed unprocessed dirty file(s) when stopping log writer"); nilfs_dispose_list(nilfs, &sci->sc_dirty_files, 1); } if (!list_empty(&sci->sc_iput_queue)) { - nilfs_msg(sci->sc_super, KERN_WARNING, - "disposed unprocessed inode(s) in iput queue when stopping log writer"); + nilfs_warn(sci->sc_super, + "disposed unprocessed inode(s) in iput queue when stopping log writer"); nilfs_dispose_list(nilfs, &sci->sc_iput_queue, 1); } @@ -2812,8 +2817,8 @@ void nilfs_detach_log_writer(struct super_block *sb) spin_lock(&nilfs->ns_inode_lock); if (!list_empty(&nilfs->ns_dirty_files)) { list_splice_init(&nilfs->ns_dirty_files, &garbage_list); - nilfs_msg(sb, KERN_WARNING, - "disposed unprocessed dirty file(s) when detaching log writer"); + nilfs_warn(sb, + "disposed unprocessed dirty file(s) when detaching log writer"); } spin_unlock(&nilfs->ns_inode_lock); up_write(&nilfs->ns_segctor_sem); diff --git a/fs/nilfs2/sufile.c b/fs/nilfs2/sufile.c index bf3f8f05c89b..42ff67c0c14f 100644 --- a/fs/nilfs2/sufile.c +++ b/fs/nilfs2/sufile.c @@ -171,9 +171,9 @@ int nilfs_sufile_updatev(struct inode *sufile, __u64 *segnumv, size_t nsegs, down_write(&NILFS_MDT(sufile)->mi_sem); for (seg = segnumv; seg < segnumv + nsegs; seg++) { if (unlikely(*seg >= nilfs_sufile_get_nsegments(sufile))) { - nilfs_msg(sufile->i_sb, KERN_WARNING, - "%s: invalid segment number: %llu", - __func__, (unsigned long long)*seg); + nilfs_warn(sufile->i_sb, + "%s: invalid segment number: %llu", + __func__, (unsigned long long)*seg); nerr++; } } @@ -230,9 +230,8 @@ int nilfs_sufile_update(struct inode *sufile, __u64 segnum, int create, int ret; if (unlikely(segnum >= nilfs_sufile_get_nsegments(sufile))) { - nilfs_msg(sufile->i_sb, KERN_WARNING, - "%s: invalid segment number: %llu", - __func__, (unsigned long long)segnum); + nilfs_warn(sufile->i_sb, "%s: invalid segment number: %llu", + __func__, (unsigned long long)segnum); return -EINVAL; } down_write(&NILFS_MDT(sufile)->mi_sem); @@ -410,9 +409,8 @@ void nilfs_sufile_do_cancel_free(struct inode *sufile, __u64 segnum, kaddr = kmap_atomic(su_bh->b_page); su = nilfs_sufile_block_get_segment_usage(sufile, segnum, su_bh, kaddr); if (unlikely(!nilfs_segment_usage_clean(su))) { - nilfs_msg(sufile->i_sb, KERN_WARNING, - "%s: segment %llu must be clean", __func__, - (unsigned long long)segnum); + nilfs_warn(sufile->i_sb, "%s: segment %llu must be clean", + __func__, (unsigned long long)segnum); kunmap_atomic(kaddr); return; } @@ -468,9 +466,8 @@ void nilfs_sufile_do_free(struct inode *sufile, __u64 segnum, kaddr = kmap_atomic(su_bh->b_page); su = nilfs_sufile_block_get_segment_usage(sufile, segnum, su_bh, kaddr); if (nilfs_segment_usage_clean(su)) { - nilfs_msg(sufile->i_sb, KERN_WARNING, - "%s: segment %llu is already clean", - __func__, (unsigned long long)segnum); + nilfs_warn(sufile->i_sb, "%s: segment %llu is already clean", + __func__, (unsigned long long)segnum); kunmap_atomic(kaddr); return; } @@ -1168,12 +1165,12 @@ int nilfs_sufile_read(struct super_block *sb, size_t susize, int err; if (susize > sb->s_blocksize) { - nilfs_msg(sb, KERN_ERR, - "too large segment usage size: %zu bytes", susize); + nilfs_err(sb, "too large segment usage size: %zu bytes", + susize); return -EINVAL; } else if (susize < NILFS_MIN_SEGMENT_USAGE_SIZE) { - nilfs_msg(sb, KERN_ERR, - "too small segment usage size: %zu bytes", susize); + nilfs_err(sb, "too small segment usage size: %zu bytes", + susize); return -EINVAL; } diff --git a/fs/nilfs2/super.c b/fs/nilfs2/super.c index 5729ee86da9a..2eee5fb1a882 100644 --- a/fs/nilfs2/super.c +++ b/fs/nilfs2/super.c @@ -62,19 +62,25 @@ struct kmem_cache *nilfs_btree_path_cache; static int nilfs_setup_super(struct super_block *sb, int is_mount); static int nilfs_remount(struct super_block *sb, int *flags, char *data); -void __nilfs_msg(struct super_block *sb, const char *level, const char *fmt, - ...) +void __nilfs_msg(struct super_block *sb, const char *fmt, ...) { struct va_format vaf; va_list args; + int level; va_start(args, fmt); - vaf.fmt = fmt; + + level = printk_get_level(fmt); + vaf.fmt = printk_skip_level(fmt); vaf.va = &args; + if (sb) - printk("%sNILFS (%s): %pV\n", level, sb->s_id, &vaf); + printk("%c%cNILFS (%s): %pV\n", + KERN_SOH_ASCII, level, sb->s_id, &vaf); else - printk("%sNILFS: %pV\n", level, &vaf); + printk("%c%cNILFS: %pV\n", + KERN_SOH_ASCII, level, &vaf); + va_end(args); } @@ -106,7 +112,7 @@ static void nilfs_set_error(struct super_block *sb) * * This implements the body of nilfs_error() macro. Normally, * nilfs_error() should be used. As for sustainable errors such as a - * single-shot I/O error, nilfs_msg() should be used instead. + * single-shot I/O error, nilfs_err() should be used instead. * * Callers should not add a trailing newline since this will do it. */ @@ -178,8 +184,7 @@ static int nilfs_sync_super(struct super_block *sb, int flag) } if (unlikely(err)) { - nilfs_msg(sb, KERN_ERR, "unable to write superblock: err=%d", - err); + nilfs_err(sb, "unable to write superblock: err=%d", err); if (err == -EIO && nilfs->ns_sbh[1]) { /* * sbp[0] points to newer log than sbp[1], @@ -249,7 +254,7 @@ struct nilfs_super_block **nilfs_prepare_super(struct super_block *sb, sbp[1]->s_magic == cpu_to_le16(NILFS_SUPER_MAGIC)) { memcpy(sbp[0], sbp[1], nilfs->ns_sbsize); } else { - nilfs_msg(sb, KERN_CRIT, "superblock broke"); + nilfs_crit(sb, "superblock broke"); return NULL; } } else if (sbp[1] && @@ -359,9 +364,9 @@ static int nilfs_move_2nd_super(struct super_block *sb, loff_t sb2off) offset = sb2off & (nilfs->ns_blocksize - 1); nsbh = sb_getblk(sb, newblocknr); if (!nsbh) { - nilfs_msg(sb, KERN_WARNING, - "unable to move secondary superblock to block %llu", - (unsigned long long)newblocknr); + nilfs_warn(sb, + "unable to move secondary superblock to block %llu", + (unsigned long long)newblocknr); ret = -EIO; goto out; } @@ -524,7 +529,7 @@ int nilfs_attach_checkpoint(struct super_block *sb, __u64 cno, int curr_mnt, up_read(&nilfs->ns_segctor_sem); if (unlikely(err)) { if (err == -ENOENT || err == -EINVAL) { - nilfs_msg(sb, KERN_ERR, + nilfs_err(sb, "Invalid checkpoint (checkpoint number=%llu)", (unsigned long long)cno); err = -EINVAL; @@ -622,8 +627,7 @@ static int nilfs_statfs(struct dentry *dentry, struct kstatfs *buf) err = nilfs_ifile_count_free_inodes(root->ifile, &nmaxinodes, &nfreeinodes); if (unlikely(err)) { - nilfs_msg(sb, KERN_WARNING, - "failed to count free inodes: err=%d", err); + nilfs_warn(sb, "failed to count free inodes: err=%d", err); if (err == -ERANGE) { /* * If nilfs_palloc_count_max_entries() returns @@ -755,7 +759,7 @@ static int parse_options(char *options, struct super_block *sb, int is_remount) break; case Opt_snapshot: if (is_remount) { - nilfs_msg(sb, KERN_ERR, + nilfs_err(sb, "\"%s\" option is invalid for remount", p); return 0; @@ -771,8 +775,7 @@ static int parse_options(char *options, struct super_block *sb, int is_remount) nilfs_clear_opt(nilfs, DISCARD); break; default: - nilfs_msg(sb, KERN_ERR, - "unrecognized mount option \"%s\"", p); + nilfs_err(sb, "unrecognized mount option \"%s\"", p); return 0; } } @@ -808,10 +811,10 @@ static int nilfs_setup_super(struct super_block *sb, int is_mount) mnt_count = le16_to_cpu(sbp[0]->s_mnt_count); if (nilfs->ns_mount_state & NILFS_ERROR_FS) { - nilfs_msg(sb, KERN_WARNING, "mounting fs with errors"); + nilfs_warn(sb, "mounting fs with errors"); #if 0 } else if (max_mnt_count >= 0 && mnt_count >= max_mnt_count) { - nilfs_msg(sb, KERN_WARNING, "maximal mount count reached"); + nilfs_warn(sb, "maximal mount count reached"); #endif } if (!max_mnt_count) @@ -874,7 +877,7 @@ int nilfs_check_feature_compatibility(struct super_block *sb, features = le64_to_cpu(sbp->s_feature_incompat) & ~NILFS_FEATURE_INCOMPAT_SUPP; if (features) { - nilfs_msg(sb, KERN_ERR, + nilfs_err(sb, "couldn't mount because of unsupported optional features (%llx)", (unsigned long long)features); return -EINVAL; @@ -882,7 +885,7 @@ int nilfs_check_feature_compatibility(struct super_block *sb, features = le64_to_cpu(sbp->s_feature_compat_ro) & ~NILFS_FEATURE_COMPAT_RO_SUPP; if (!sb_rdonly(sb) && features) { - nilfs_msg(sb, KERN_ERR, + nilfs_err(sb, "couldn't mount RDWR because of unsupported optional features (%llx)", (unsigned long long)features); return -EINVAL; @@ -901,12 +904,12 @@ static int nilfs_get_root_dentry(struct super_block *sb, inode = nilfs_iget(sb, root, NILFS_ROOT_INO); if (IS_ERR(inode)) { ret = PTR_ERR(inode); - nilfs_msg(sb, KERN_ERR, "error %d getting root inode", ret); + nilfs_err(sb, "error %d getting root inode", ret); goto out; } if (!S_ISDIR(inode->i_mode) || !inode->i_blocks || !inode->i_size) { iput(inode); - nilfs_msg(sb, KERN_ERR, "corrupt root inode"); + nilfs_err(sb, "corrupt root inode"); ret = -EINVAL; goto out; } @@ -934,7 +937,7 @@ static int nilfs_get_root_dentry(struct super_block *sb, return ret; failed_dentry: - nilfs_msg(sb, KERN_ERR, "error %d getting root dentry", ret); + nilfs_err(sb, "error %d getting root dentry", ret); goto out; } @@ -954,7 +957,7 @@ static int nilfs_attach_snapshot(struct super_block *s, __u64 cno, ret = (ret == -ENOENT) ? -EINVAL : ret; goto out; } else if (!ret) { - nilfs_msg(s, KERN_ERR, + nilfs_err(s, "The specified checkpoint is not a snapshot (checkpoint number=%llu)", (unsigned long long)cno); ret = -EINVAL; @@ -963,7 +966,7 @@ static int nilfs_attach_snapshot(struct super_block *s, __u64 cno, ret = nilfs_attach_checkpoint(s, cno, false, &root); if (ret) { - nilfs_msg(s, KERN_ERR, + nilfs_err(s, "error %d while loading snapshot (checkpoint number=%llu)", ret, (unsigned long long)cno); goto out; @@ -1060,7 +1063,7 @@ nilfs_fill_super(struct super_block *sb, void *data, int silent) cno = nilfs_last_cno(nilfs); err = nilfs_attach_checkpoint(sb, cno, true, &fsroot); if (err) { - nilfs_msg(sb, KERN_ERR, + nilfs_err(sb, "error %d while loading last checkpoint (checkpoint number=%llu)", err, (unsigned long long)cno); goto failed_unload; @@ -1122,8 +1125,8 @@ static int nilfs_remount(struct super_block *sb, int *flags, char *data) err = -EINVAL; if (!nilfs_valid_fs(nilfs)) { - nilfs_msg(sb, KERN_WARNING, - "couldn't remount because the filesystem is in an incomplete recovery state"); + nilfs_warn(sb, + "couldn't remount because the filesystem is in an incomplete recovery state"); goto restore_opts; } @@ -1155,9 +1158,9 @@ static int nilfs_remount(struct super_block *sb, int *flags, char *data) ~NILFS_FEATURE_COMPAT_RO_SUPP; up_read(&nilfs->ns_sem); if (features) { - nilfs_msg(sb, KERN_WARNING, - "couldn't remount RDWR because of unsupported optional features (%llx)", - (unsigned long long)features); + nilfs_warn(sb, + "couldn't remount RDWR because of unsupported optional features (%llx)", + (unsigned long long)features); err = -EROFS; goto restore_opts; } @@ -1216,7 +1219,7 @@ static int nilfs_parse_snapshot_option(const char *option, return 0; parse_error: - nilfs_msg(NULL, KERN_ERR, "invalid option \"%s\": %s", option, msg); + nilfs_err(NULL, "invalid option \"%s\": %s", option, msg); return 1; } @@ -1319,7 +1322,7 @@ nilfs_mount(struct file_system_type *fs_type, int flags, } else if (!sd.cno) { if (nilfs_tree_is_busy(s->s_root)) { if ((flags ^ s->s_flags) & SB_RDONLY) { - nilfs_msg(s, KERN_ERR, + nilfs_err(s, "the device already has a %s mount.", sb_rdonly(s) ? "read-only" : "read/write"); err = -EBUSY; diff --git a/fs/nilfs2/sysfs.c b/fs/nilfs2/sysfs.c index e60be7bb55b0..303d71430bdd 100644 --- a/fs/nilfs2/sysfs.c +++ b/fs/nilfs2/sysfs.c @@ -263,8 +263,8 @@ nilfs_checkpoints_checkpoints_number_show(struct nilfs_checkpoints_attr *attr, err = nilfs_cpfile_get_stat(nilfs->ns_cpfile, &cpstat); up_read(&nilfs->ns_segctor_sem); if (err < 0) { - nilfs_msg(nilfs->ns_sb, KERN_ERR, - "unable to get checkpoint stat: err=%d", err); + nilfs_err(nilfs->ns_sb, "unable to get checkpoint stat: err=%d", + err); return err; } @@ -286,8 +286,8 @@ nilfs_checkpoints_snapshots_number_show(struct nilfs_checkpoints_attr *attr, err = nilfs_cpfile_get_stat(nilfs->ns_cpfile, &cpstat); up_read(&nilfs->ns_segctor_sem); if (err < 0) { - nilfs_msg(nilfs->ns_sb, KERN_ERR, - "unable to get checkpoint stat: err=%d", err); + nilfs_err(nilfs->ns_sb, "unable to get checkpoint stat: err=%d", + err); return err; } @@ -405,8 +405,8 @@ nilfs_segments_dirty_segments_show(struct nilfs_segments_attr *attr, err = nilfs_sufile_get_stat(nilfs->ns_sufile, &sustat); up_read(&nilfs->ns_segctor_sem); if (err < 0) { - nilfs_msg(nilfs->ns_sb, KERN_ERR, - "unable to get segment stat: err=%d", err); + nilfs_err(nilfs->ns_sb, "unable to get segment stat: err=%d", + err); return err; } @@ -779,15 +779,15 @@ nilfs_superblock_sb_update_frequency_store(struct nilfs_superblock_attr *attr, err = kstrtouint(skip_spaces(buf), 0, &val); if (err) { - nilfs_msg(nilfs->ns_sb, KERN_ERR, - "unable to convert string: err=%d", err); + nilfs_err(nilfs->ns_sb, "unable to convert string: err=%d", + err); return err; } if (val < NILFS_SB_FREQ) { val = NILFS_SB_FREQ; - nilfs_msg(nilfs->ns_sb, KERN_WARNING, - "superblock update frequency cannot be lesser than 10 seconds"); + nilfs_warn(nilfs->ns_sb, + "superblock update frequency cannot be lesser than 10 seconds"); } down_write(&nilfs->ns_sem); @@ -990,8 +990,7 @@ int nilfs_sysfs_create_device_group(struct super_block *sb) nilfs->ns_dev_subgroups = kzalloc(devgrp_size, GFP_KERNEL); if (unlikely(!nilfs->ns_dev_subgroups)) { err = -ENOMEM; - nilfs_msg(sb, KERN_ERR, - "unable to allocate memory for device group"); + nilfs_err(sb, "unable to allocate memory for device group"); goto failed_create_device_group; } @@ -1101,15 +1100,13 @@ int __init nilfs_sysfs_init(void) nilfs_kset = kset_create_and_add(NILFS_ROOT_GROUP_NAME, NULL, fs_kobj); if (!nilfs_kset) { err = -ENOMEM; - nilfs_msg(NULL, KERN_ERR, - "unable to create sysfs entry: err=%d", err); + nilfs_err(NULL, "unable to create sysfs entry: err=%d", err); goto failed_sysfs_init; } err = sysfs_create_group(&nilfs_kset->kobj, &nilfs_feature_attr_group); if (unlikely(err)) { - nilfs_msg(NULL, KERN_ERR, - "unable to create feature group: err=%d", err); + nilfs_err(NULL, "unable to create feature group: err=%d", err); goto cleanup_sysfs_init; } diff --git a/fs/nilfs2/the_nilfs.c b/fs/nilfs2/the_nilfs.c index 484785cdf96e..221a1cc597f0 100644 --- a/fs/nilfs2/the_nilfs.c +++ b/fs/nilfs2/the_nilfs.c @@ -183,7 +183,7 @@ static int nilfs_store_log_cursor(struct the_nilfs *nilfs, nilfs_get_segnum_of_block(nilfs, nilfs->ns_last_pseg); nilfs->ns_cno = nilfs->ns_last_cno + 1; if (nilfs->ns_segnum >= nilfs->ns_nsegments) { - nilfs_msg(nilfs->ns_sb, KERN_ERR, + nilfs_err(nilfs->ns_sb, "pointed segment number is out of range: segnum=%llu, nsegments=%lu", (unsigned long long)nilfs->ns_segnum, nilfs->ns_nsegments); @@ -210,12 +210,12 @@ int load_nilfs(struct the_nilfs *nilfs, struct super_block *sb) int err; if (!valid_fs) { - nilfs_msg(sb, KERN_WARNING, "mounting unchecked fs"); + nilfs_warn(sb, "mounting unchecked fs"); if (s_flags & SB_RDONLY) { - nilfs_msg(sb, KERN_INFO, - "recovery required for readonly filesystem"); - nilfs_msg(sb, KERN_INFO, - "write access will be enabled during recovery"); + nilfs_info(sb, + "recovery required for readonly filesystem"); + nilfs_info(sb, + "write access will be enabled during recovery"); } } @@ -230,12 +230,11 @@ int load_nilfs(struct the_nilfs *nilfs, struct super_block *sb) goto scan_error; if (!nilfs_valid_sb(sbp[1])) { - nilfs_msg(sb, KERN_WARNING, - "unable to fall back to spare super block"); + nilfs_warn(sb, + "unable to fall back to spare super block"); goto scan_error; } - nilfs_msg(sb, KERN_INFO, - "trying rollback from an earlier position"); + nilfs_info(sb, "trying rollback from an earlier position"); /* * restore super block with its spare and reconfigure @@ -248,9 +247,9 @@ int load_nilfs(struct the_nilfs *nilfs, struct super_block *sb) /* verify consistency between two super blocks */ blocksize = BLOCK_SIZE << le32_to_cpu(sbp[0]->s_log_block_size); if (blocksize != nilfs->ns_blocksize) { - nilfs_msg(sb, KERN_WARNING, - "blocksize differs between two super blocks (%d != %d)", - blocksize, nilfs->ns_blocksize); + nilfs_warn(sb, + "blocksize differs between two super blocks (%d != %d)", + blocksize, nilfs->ns_blocksize); goto scan_error; } @@ -269,8 +268,7 @@ int load_nilfs(struct the_nilfs *nilfs, struct super_block *sb) err = nilfs_load_super_root(nilfs, sb, ri.ri_super_root); if (unlikely(err)) { - nilfs_msg(sb, KERN_ERR, "error %d while loading super root", - err); + nilfs_err(sb, "error %d while loading super root", err); goto failed; } @@ -281,28 +279,28 @@ int load_nilfs(struct the_nilfs *nilfs, struct super_block *sb) __u64 features; if (nilfs_test_opt(nilfs, NORECOVERY)) { - nilfs_msg(sb, KERN_INFO, - "norecovery option specified, skipping roll-forward recovery"); + nilfs_info(sb, + "norecovery option specified, skipping roll-forward recovery"); goto skip_recovery; } features = le64_to_cpu(nilfs->ns_sbp[0]->s_feature_compat_ro) & ~NILFS_FEATURE_COMPAT_RO_SUPP; if (features) { - nilfs_msg(sb, KERN_ERR, + nilfs_err(sb, "couldn't proceed with recovery because of unsupported optional features (%llx)", (unsigned long long)features); err = -EROFS; goto failed_unload; } if (really_read_only) { - nilfs_msg(sb, KERN_ERR, + nilfs_err(sb, "write access unavailable, cannot proceed"); err = -EROFS; goto failed_unload; } sb->s_flags &= ~SB_RDONLY; } else if (nilfs_test_opt(nilfs, NORECOVERY)) { - nilfs_msg(sb, KERN_ERR, + nilfs_err(sb, "recovery cancelled because norecovery option was specified for a read/write mount"); err = -EINVAL; goto failed_unload; @@ -318,12 +316,12 @@ int load_nilfs(struct the_nilfs *nilfs, struct super_block *sb) up_write(&nilfs->ns_sem); if (err) { - nilfs_msg(sb, KERN_ERR, + nilfs_err(sb, "error %d updating super block. recovery unfinished.", err); goto failed_unload; } - nilfs_msg(sb, KERN_INFO, "recovery complete"); + nilfs_info(sb, "recovery complete"); skip_recovery: nilfs_clear_recovery_info(&ri); @@ -331,7 +329,7 @@ int load_nilfs(struct the_nilfs *nilfs, struct super_block *sb) return 0; scan_error: - nilfs_msg(sb, KERN_ERR, "error %d while searching super root", err); + nilfs_err(sb, "error %d while searching super root", err); goto failed; failed_unload: @@ -378,7 +376,7 @@ static int nilfs_store_disk_layout(struct the_nilfs *nilfs, struct nilfs_super_block *sbp) { if (le32_to_cpu(sbp->s_rev_level) < NILFS_MIN_SUPP_REV) { - nilfs_msg(nilfs->ns_sb, KERN_ERR, + nilfs_err(nilfs->ns_sb, "unsupported revision (superblock rev.=%d.%d, current rev.=%d.%d). Please check the version of mkfs.nilfs(2).", le32_to_cpu(sbp->s_rev_level), le16_to_cpu(sbp->s_minor_rev_level), @@ -391,13 +389,11 @@ static int nilfs_store_disk_layout(struct the_nilfs *nilfs, nilfs->ns_inode_size = le16_to_cpu(sbp->s_inode_size); if (nilfs->ns_inode_size > nilfs->ns_blocksize) { - nilfs_msg(nilfs->ns_sb, KERN_ERR, - "too large inode size: %d bytes", + nilfs_err(nilfs->ns_sb, "too large inode size: %d bytes", nilfs->ns_inode_size); return -EINVAL; } else if (nilfs->ns_inode_size < NILFS_MIN_INODE_SIZE) { - nilfs_msg(nilfs->ns_sb, KERN_ERR, - "too small inode size: %d bytes", + nilfs_err(nilfs->ns_sb, "too small inode size: %d bytes", nilfs->ns_inode_size); return -EINVAL; } @@ -406,8 +402,7 @@ static int nilfs_store_disk_layout(struct the_nilfs *nilfs, nilfs->ns_blocks_per_segment = le32_to_cpu(sbp->s_blocks_per_segment); if (nilfs->ns_blocks_per_segment < NILFS_SEG_MIN_BLOCKS) { - nilfs_msg(nilfs->ns_sb, KERN_ERR, - "too short segment: %lu blocks", + nilfs_err(nilfs->ns_sb, "too short segment: %lu blocks", nilfs->ns_blocks_per_segment); return -EINVAL; } @@ -417,7 +412,7 @@ static int nilfs_store_disk_layout(struct the_nilfs *nilfs, le32_to_cpu(sbp->s_r_segments_percentage); if (nilfs->ns_r_segments_percentage < 1 || nilfs->ns_r_segments_percentage > 99) { - nilfs_msg(nilfs->ns_sb, KERN_ERR, + nilfs_err(nilfs->ns_sb, "invalid reserved segments percentage: %lu", nilfs->ns_r_segments_percentage); return -EINVAL; @@ -503,16 +498,16 @@ static int nilfs_load_super_block(struct the_nilfs *nilfs, if (!sbp[0]) { if (!sbp[1]) { - nilfs_msg(sb, KERN_ERR, "unable to read superblock"); + nilfs_err(sb, "unable to read superblock"); return -EIO; } - nilfs_msg(sb, KERN_WARNING, - "unable to read primary superblock (blocksize = %d)", - blocksize); + nilfs_warn(sb, + "unable to read primary superblock (blocksize = %d)", + blocksize); } else if (!sbp[1]) { - nilfs_msg(sb, KERN_WARNING, - "unable to read secondary superblock (blocksize = %d)", - blocksize); + nilfs_warn(sb, + "unable to read secondary superblock (blocksize = %d)", + blocksize); } /* @@ -534,14 +529,14 @@ static int nilfs_load_super_block(struct the_nilfs *nilfs, } if (!valid[swp]) { nilfs_release_super_block(nilfs); - nilfs_msg(sb, KERN_ERR, "couldn't find nilfs on the device"); + nilfs_err(sb, "couldn't find nilfs on the device"); return -EINVAL; } if (!valid[!swp]) - nilfs_msg(sb, KERN_WARNING, - "broken superblock, retrying with spare superblock (blocksize = %d)", - blocksize); + nilfs_warn(sb, + "broken superblock, retrying with spare superblock (blocksize = %d)", + blocksize); if (swp) nilfs_swap_super_block(nilfs); @@ -575,7 +570,7 @@ int init_nilfs(struct the_nilfs *nilfs, struct super_block *sb, char *data) blocksize = sb_min_blocksize(sb, NILFS_MIN_BLOCK_SIZE); if (!blocksize) { - nilfs_msg(sb, KERN_ERR, "unable to set blocksize"); + nilfs_err(sb, "unable to set blocksize"); err = -EINVAL; goto out; } @@ -594,7 +589,7 @@ int init_nilfs(struct the_nilfs *nilfs, struct super_block *sb, char *data) blocksize = BLOCK_SIZE << le32_to_cpu(sbp->s_log_block_size); if (blocksize < NILFS_MIN_BLOCK_SIZE || blocksize > NILFS_MAX_BLOCK_SIZE) { - nilfs_msg(sb, KERN_ERR, + nilfs_err(sb, "couldn't mount because of unsupported filesystem blocksize %d", blocksize); err = -EINVAL; @@ -604,7 +599,7 @@ int init_nilfs(struct the_nilfs *nilfs, struct super_block *sb, char *data) int hw_blocksize = bdev_logical_block_size(sb->s_bdev); if (blocksize < hw_blocksize) { - nilfs_msg(sb, KERN_ERR, + nilfs_err(sb, "blocksize %d too small for device (sector-size = %d)", blocksize, hw_blocksize); err = -EINVAL; diff --git a/fs/notify/dnotify/dnotify.c b/fs/notify/dnotify/dnotify.c index 7a42c2ebe28d..5dcda8f20c04 100644 --- a/fs/notify/dnotify/dnotify.c +++ b/fs/notify/dnotify/dnotify.c @@ -70,13 +70,10 @@ static void dnotify_recalc_inode_mask(struct fsnotify_mark *fsn_mark) * destroy the dnotify struct if it was not registered to receive multiple * events. */ -static int dnotify_handle_event(struct fsnotify_group *group, - struct inode *inode, - u32 mask, const void *data, int data_type, - const struct qstr *file_name, u32 cookie, - struct fsnotify_iter_info *iter_info) +static int dnotify_handle_event(struct fsnotify_mark *inode_mark, u32 mask, + struct inode *inode, struct inode *dir, + const struct qstr *name) { - struct fsnotify_mark *inode_mark = fsnotify_iter_inode_mark(iter_info); struct dnotify_mark *dn_mark; struct dnotify_struct *dn; struct dnotify_struct **prev; @@ -84,10 +81,7 @@ static int dnotify_handle_event(struct fsnotify_group *group, __u32 test_mask = mask & ~FS_EVENT_ON_CHILD; /* not a dir, dnotify doesn't care */ - if (!S_ISDIR(inode->i_mode)) - return 0; - - if (WARN_ON(fsnotify_iter_vfsmount_mark(iter_info))) + if (!dir && !(mask & FS_ISDIR)) return 0; dn_mark = container_of(inode_mark, struct dnotify_mark, fsn_mark); @@ -127,7 +121,7 @@ static void dnotify_free_mark(struct fsnotify_mark *fsn_mark) } static const struct fsnotify_ops dnotify_fsnotify_ops = { - .handle_event = dnotify_handle_event, + .handle_inode_event = dnotify_handle_event, .free_mark = dnotify_free_mark, }; diff --git a/fs/notify/fanotify/fanotify.c b/fs/notify/fanotify/fanotify.c index 85eda539b35f..c942910a8649 100644 --- a/fs/notify/fanotify/fanotify.c +++ b/fs/notify/fanotify/fanotify.c @@ -34,10 +34,6 @@ static bool fanotify_fh_equal(struct fanotify_fh *fh1, if (fh1->type != fh2->type || fh1->len != fh2->len) return false; - /* Do not merge events if we failed to encode fh */ - if (fh1->type == FILEID_INVALID) - return false; - return !fh1->len || !memcmp(fanotify_fh_buf(fh1), fanotify_fh_buf(fh2), fh1->len); } @@ -53,25 +49,47 @@ static bool fanotify_fid_event_equal(struct fanotify_fid_event *ffe1, fanotify_fh_equal(&ffe1->object_fh, &ffe2->object_fh); } +static bool fanotify_info_equal(struct fanotify_info *info1, + struct fanotify_info *info2) +{ + if (info1->dir_fh_totlen != info2->dir_fh_totlen || + info1->file_fh_totlen != info2->file_fh_totlen || + info1->name_len != info2->name_len) + return false; + + if (info1->dir_fh_totlen && + !fanotify_fh_equal(fanotify_info_dir_fh(info1), + fanotify_info_dir_fh(info2))) + return false; + + if (info1->file_fh_totlen && + !fanotify_fh_equal(fanotify_info_file_fh(info1), + fanotify_info_file_fh(info2))) + return false; + + return !info1->name_len || + !memcmp(fanotify_info_name(info1), fanotify_info_name(info2), + info1->name_len); +} + static bool fanotify_name_event_equal(struct fanotify_name_event *fne1, struct fanotify_name_event *fne2) { - /* - * Do not merge name events without dir fh. - * FAN_DIR_MODIFY does not encode object fh, so it may be empty. - */ - if (!fne1->dir_fh.len) + struct fanotify_info *info1 = &fne1->info; + struct fanotify_info *info2 = &fne2->info; + + /* Do not merge name events without dir fh */ + if (!info1->dir_fh_totlen) return false; - if (fne1->name_len != fne2->name_len || - !fanotify_fh_equal(&fne1->dir_fh, &fne2->dir_fh)) + if (!fanotify_fsid_equal(&fne1->fsid, &fne2->fsid)) return false; - return !memcmp(fne1->name, fne2->name, fne1->name_len); + return fanotify_info_equal(info1, info2); } static bool fanotify_should_merge(struct fsnotify_event *old_fsn, - struct fsnotify_event *new_fsn) + struct fsnotify_event *new_fsn) { struct fanotify_event *old, *new; @@ -83,22 +101,22 @@ static bool fanotify_should_merge(struct fsnotify_event *old_fsn, old->type != new->type || old->pid != new->pid) return false; + /* + * We want to merge many dirent events in the same dir (i.e. + * creates/unlinks/renames), but we do not want to merge dirent + * events referring to subdirs with dirent events referring to + * non subdirs, otherwise, user won't be able to tell from a + * mask FAN_CREATE|FAN_DELETE|FAN_ONDIR if it describes mkdir+ + * unlink pair or rmdir+create pair of events. + */ + if ((old->mask & FS_ISDIR) != (new->mask & FS_ISDIR)) + return false; + switch (old->type) { case FANOTIFY_EVENT_TYPE_PATH: return fanotify_path_equal(fanotify_event_path(old), fanotify_event_path(new)); case FANOTIFY_EVENT_TYPE_FID: - /* - * We want to merge many dirent events in the same dir (i.e. - * creates/unlinks/renames), but we do not want to merge dirent - * events referring to subdirs with dirent events referring to - * non subdirs, otherwise, user won't be able to tell from a - * mask FAN_CREATE|FAN_DELETE|FAN_ONDIR if it describes mkdir+ - * unlink pair or rmdir+create pair of events. - */ - if ((old->mask & FS_ISDIR) != (new->mask & FS_ISDIR)) - return false; - return fanotify_fid_event_equal(FANOTIFY_FE(old), FANOTIFY_FE(new)); case FANOTIFY_EVENT_TYPE_FID_NAME: @@ -208,24 +226,30 @@ out: static u32 fanotify_group_event_mask(struct fsnotify_group *group, struct fsnotify_iter_info *iter_info, u32 event_mask, const void *data, - int data_type) + int data_type, struct inode *dir) { __u32 marks_mask = 0, marks_ignored_mask = 0; - __u32 test_mask, user_mask = FANOTIFY_OUTGOING_EVENTS; + __u32 test_mask, user_mask = FANOTIFY_OUTGOING_EVENTS | + FANOTIFY_EVENT_FLAGS; const struct path *path = fsnotify_data_path(data, data_type); + unsigned int fid_mode = FAN_GROUP_FLAG(group, FANOTIFY_FID_BITS); struct fsnotify_mark *mark; int type; pr_debug("%s: report_mask=%x mask=%x data=%p data_type=%d\n", __func__, iter_info->report_mask, event_mask, data, data_type); - if (!FAN_GROUP_FLAG(group, FAN_REPORT_FID)) { + if (!fid_mode) { /* Do we have path to open a file descriptor? */ if (!path) return 0; /* Path type events are only relevant for files and dirs */ if (!d_is_reg(path->dentry) && !d_can_lookup(path->dentry)) return 0; + } else if (!(fid_mode & FAN_REPORT_FID)) { + /* Do we have a directory inode to report? */ + if (!dir && !(event_mask & FS_ISDIR)) + return 0; } fsnotify_foreach_obj_type(type) { @@ -244,12 +268,12 @@ static u32 fanotify_group_event_mask(struct fsnotify_group *group, continue; /* - * If the event is for a child and this mark doesn't care about - * events on a child, don't send it! + * If the event is for a child and this mark is on a parent not + * watching children, don't send it! */ if (event_mask & FS_EVENT_ON_CHILD && - (type != FSNOTIFY_OBJ_TYPE_INODE || - !(mark->mask & FS_EVENT_ON_CHILD))) + type == FSNOTIFY_OBJ_TYPE_INODE && + !(mark->mask & FS_EVENT_ON_CHILD)) continue; marks_mask |= mark->mask; @@ -264,67 +288,102 @@ static u32 fanotify_group_event_mask(struct fsnotify_group *group, * * For backward compatibility and consistency, do not report FAN_ONDIR * to user in legacy fanotify mode (reporting fd) and report FAN_ONDIR - * to user in FAN_REPORT_FID mode for all event types. + * to user in fid mode for all event types. + * + * We never report FAN_EVENT_ON_CHILD to user, but we do pass it in to + * fanotify_alloc_event() when group is reporting fid as indication + * that event happened on child. */ - if (FAN_GROUP_FLAG(group, FAN_REPORT_FID)) { - /* Do not report FAN_ONDIR without any event */ - if (!(test_mask & ~FAN_ONDIR)) + if (fid_mode) { + /* Do not report event flags without any event */ + if (!(test_mask & ~FANOTIFY_EVENT_FLAGS)) return 0; } else { - user_mask &= ~FAN_ONDIR; + user_mask &= ~FANOTIFY_EVENT_FLAGS; } return test_mask & user_mask; } -static void fanotify_encode_fh(struct fanotify_fh *fh, struct inode *inode, - gfp_t gfp) +/* + * Check size needed to encode fanotify_fh. + * + * Return size of encoded fh without fanotify_fh header. + * Return 0 on failure to encode. + */ +static int fanotify_encode_fh_len(struct inode *inode) { - int dwords, type, bytes = 0; + int dwords = 0; + + if (!inode) + return 0; + + exportfs_encode_inode_fh(inode, NULL, &dwords, NULL); + + return dwords << 2; +} + +/* + * Encode fanotify_fh. + * + * Return total size of encoded fh including fanotify_fh header. + * Return 0 on failure to encode. + */ +static int fanotify_encode_fh(struct fanotify_fh *fh, struct inode *inode, + unsigned int fh_len, gfp_t gfp) +{ + int dwords, type = 0; char *ext_buf = NULL; void *buf = fh->buf; int err; + fh->type = FILEID_ROOT; + fh->len = 0; + fh->flags = 0; if (!inode) - goto out; + return 0; - dwords = 0; + /* + * !gpf means preallocated variable size fh, but fh_len could + * be zero in that case if encoding fh len failed. + */ err = -ENOENT; - type = exportfs_encode_inode_fh(inode, NULL, &dwords, NULL); - if (!dwords) + if (fh_len < 4 || WARN_ON_ONCE(fh_len % 4)) goto out_err; - bytes = dwords << 2; - if (bytes > FANOTIFY_INLINE_FH_LEN) { - /* Treat failure to allocate fh as failure to allocate event */ + /* No external buffer in a variable size allocated fh */ + if (gfp && fh_len > FANOTIFY_INLINE_FH_LEN) { + /* Treat failure to allocate fh as failure to encode fh */ err = -ENOMEM; - ext_buf = kmalloc(bytes, gfp); + ext_buf = kmalloc(fh_len, gfp); if (!ext_buf) goto out_err; *fanotify_fh_ext_buf_ptr(fh) = ext_buf; buf = ext_buf; + fh->flags |= FANOTIFY_FH_FLAG_EXT_BUF; } + dwords = fh_len >> 2; type = exportfs_encode_inode_fh(inode, buf, &dwords, NULL); err = -EINVAL; - if (!type || type == FILEID_INVALID || bytes != dwords << 2) + if (!type || type == FILEID_INVALID || fh_len != dwords << 2) goto out_err; fh->type = type; - fh->len = bytes; + fh->len = fh_len; - return; + return FANOTIFY_FH_HDR_LEN + fh_len; out_err: pr_warn_ratelimited("fanotify: failed to encode fid (type=%d, len=%d, err=%i)\n", - type, bytes, err); + type, fh_len, err); kfree(ext_buf); *fanotify_fh_ext_buf_ptr(fh) = NULL; -out: /* Report the event without a file identifier on encode error */ fh->type = FILEID_INVALID; fh->len = 0; + return 0; } /* @@ -335,27 +394,179 @@ out: * FS_ATTRIB reports the child inode even if reported on a watched parent. * FS_CREATE reports the modified dir inode and not the created inode. */ -static struct inode *fanotify_fid_inode(struct inode *to_tell, u32 event_mask, - const void *data, int data_type) +static struct inode *fanotify_fid_inode(u32 event_mask, const void *data, + int data_type, struct inode *dir) { if (event_mask & ALL_FSNOTIFY_DIRENT_EVENTS) - return to_tell; + return dir; + + return fsnotify_data_inode(data, data_type); +} + +/* + * The inode to use as identifier when reporting dir fid depends on the event. + * Report the modified directory inode on dirent modification events. + * Report the "victim" inode if "victim" is a directory. + * Report the parent inode if "victim" is not a directory and event is + * reported to parent. + * Otherwise, do not report dir fid. + */ +static struct inode *fanotify_dfid_inode(u32 event_mask, const void *data, + int data_type, struct inode *dir) +{ + struct inode *inode = fsnotify_data_inode(data, data_type); + + if (event_mask & ALL_FSNOTIFY_DIRENT_EVENTS) + return dir; + + if (S_ISDIR(inode->i_mode)) + return inode; + + return dir; +} + +static struct fanotify_event *fanotify_alloc_path_event(const struct path *path, + gfp_t gfp) +{ + struct fanotify_path_event *pevent; + + pevent = kmem_cache_alloc(fanotify_path_event_cachep, gfp); + if (!pevent) + return NULL; + + pevent->fae.type = FANOTIFY_EVENT_TYPE_PATH; + pevent->path = *path; + path_get(path); + + return &pevent->fae; +} - return (struct inode *)fsnotify_data_inode(data, data_type); +static struct fanotify_event *fanotify_alloc_perm_event(const struct path *path, + gfp_t gfp) +{ + struct fanotify_perm_event *pevent; + + pevent = kmem_cache_alloc(fanotify_perm_event_cachep, gfp); + if (!pevent) + return NULL; + + pevent->fae.type = FANOTIFY_EVENT_TYPE_PATH_PERM; + pevent->response = 0; + pevent->state = FAN_EVENT_INIT; + pevent->path = *path; + path_get(path); + + return &pevent->fae; } -struct fanotify_event *fanotify_alloc_event(struct fsnotify_group *group, - struct inode *inode, u32 mask, - const void *data, int data_type, - const struct qstr *file_name, - __kernel_fsid_t *fsid) +static struct fanotify_event *fanotify_alloc_fid_event(struct inode *id, + __kernel_fsid_t *fsid, + gfp_t gfp) +{ + struct fanotify_fid_event *ffe; + + ffe = kmem_cache_alloc(fanotify_fid_event_cachep, gfp); + if (!ffe) + return NULL; + + ffe->fae.type = FANOTIFY_EVENT_TYPE_FID; + ffe->fsid = *fsid; + fanotify_encode_fh(&ffe->object_fh, id, fanotify_encode_fh_len(id), + gfp); + + return &ffe->fae; +} + +static struct fanotify_event *fanotify_alloc_name_event(struct inode *id, + __kernel_fsid_t *fsid, + const struct qstr *file_name, + struct inode *child, + gfp_t gfp) +{ + struct fanotify_name_event *fne; + struct fanotify_info *info; + struct fanotify_fh *dfh, *ffh; + unsigned int dir_fh_len = fanotify_encode_fh_len(id); + unsigned int child_fh_len = fanotify_encode_fh_len(child); + unsigned int size; + + size = sizeof(*fne) + FANOTIFY_FH_HDR_LEN + dir_fh_len; + if (child_fh_len) + size += FANOTIFY_FH_HDR_LEN + child_fh_len; + if (file_name) + size += file_name->len + 1; + fne = kmalloc(size, gfp); + if (!fne) + return NULL; + + fne->fae.type = FANOTIFY_EVENT_TYPE_FID_NAME; + fne->fsid = *fsid; + info = &fne->info; + fanotify_info_init(info); + dfh = fanotify_info_dir_fh(info); + info->dir_fh_totlen = fanotify_encode_fh(dfh, id, dir_fh_len, 0); + if (child_fh_len) { + ffh = fanotify_info_file_fh(info); + info->file_fh_totlen = fanotify_encode_fh(ffh, child, child_fh_len, 0); + } + if (file_name) + fanotify_info_copy_name(info, file_name); + + pr_debug("%s: ino=%lu size=%u dir_fh_len=%u child_fh_len=%u name_len=%u name='%.*s'\n", + __func__, id->i_ino, size, dir_fh_len, child_fh_len, + info->name_len, info->name_len, fanotify_info_name(info)); + + return &fne->fae; +} + +static struct fanotify_event *fanotify_alloc_event(struct fsnotify_group *group, + u32 mask, const void *data, + int data_type, struct inode *dir, + const struct qstr *file_name, + __kernel_fsid_t *fsid) { struct fanotify_event *event = NULL; - struct fanotify_fid_event *ffe = NULL; - struct fanotify_name_event *fne = NULL; gfp_t gfp = GFP_KERNEL_ACCOUNT; - struct inode *id = fanotify_fid_inode(inode, mask, data, data_type); + struct inode *id = fanotify_fid_inode(mask, data, data_type, dir); + struct inode *dirid = fanotify_dfid_inode(mask, data, data_type, dir); const struct path *path = fsnotify_data_path(data, data_type); + unsigned int fid_mode = FAN_GROUP_FLAG(group, FANOTIFY_FID_BITS); + struct inode *child = NULL; + bool name_event = false; + + if ((fid_mode & FAN_REPORT_DIR_FID) && dirid) { + /* + * With both flags FAN_REPORT_DIR_FID and FAN_REPORT_FID, we + * report the child fid for events reported on a non-dir child + * in addition to reporting the parent fid and maybe child name. + */ + if ((fid_mode & FAN_REPORT_FID) && + id != dirid && !(mask & FAN_ONDIR)) + child = id; + + id = dirid; + + /* + * We record file name only in a group with FAN_REPORT_NAME + * and when we have a directory inode to report. + * + * For directory entry modification event, we record the fid of + * the directory and the name of the modified entry. + * + * For event on non-directory that is reported to parent, we + * record the fid of the parent and the name of the child. + * + * Even if not reporting name, we need a variable length + * fanotify_name_event if reporting both parent and child fids. + */ + if (!(fid_mode & FAN_REPORT_NAME)) { + name_event = !!child; + file_name = NULL; + } else if ((mask & ALL_FSNOTIFY_DIRENT_EVENTS) || + !(mask & FAN_ONDIR)) { + name_event = true; + } + } /* * For queues with unlimited length lost events are not expected and @@ -372,87 +583,30 @@ struct fanotify_event *fanotify_alloc_event(struct fsnotify_group *group, memalloc_use_memcg(group->memcg); if (fanotify_is_perm_event(mask)) { - struct fanotify_perm_event *pevent; - - pevent = kmem_cache_alloc(fanotify_perm_event_cachep, gfp); - if (!pevent) - goto out; - - event = &pevent->fae; - event->type = FANOTIFY_EVENT_TYPE_PATH_PERM; - pevent->response = 0; - pevent->state = FAN_EVENT_INIT; - goto init; - } - - /* - * For FAN_DIR_MODIFY event, we report the fid of the directory and - * the name of the modified entry. - * Allocate an fanotify_name_event struct and copy the name. - */ - if (mask & FAN_DIR_MODIFY && !(WARN_ON_ONCE(!file_name))) { - fne = kmalloc(sizeof(*fne) + file_name->len + 1, gfp); - if (!fne) - goto out; - - event = &fne->fae; - event->type = FANOTIFY_EVENT_TYPE_FID_NAME; - fne->name_len = file_name->len; - strcpy(fne->name, file_name->name); - goto init; - } - - if (FAN_GROUP_FLAG(group, FAN_REPORT_FID)) { - ffe = kmem_cache_alloc(fanotify_fid_event_cachep, gfp); - if (!ffe) - goto out; - - event = &ffe->fae; - event->type = FANOTIFY_EVENT_TYPE_FID; + event = fanotify_alloc_perm_event(path, gfp); + } else if (name_event && (file_name || child)) { + event = fanotify_alloc_name_event(id, fsid, file_name, child, + gfp); + } else if (fid_mode) { + event = fanotify_alloc_fid_event(id, fsid, gfp); } else { - struct fanotify_path_event *pevent; - - pevent = kmem_cache_alloc(fanotify_path_event_cachep, gfp); - if (!pevent) - goto out; - - event = &pevent->fae; - event->type = FANOTIFY_EVENT_TYPE_PATH; + event = fanotify_alloc_path_event(path, gfp); } -init: + if (!event) + goto out; + /* * Use the victim inode instead of the watching inode as the id for * event queue, so event reported on parent is merged with event * reported on child when both directory and child watches exist. */ - fsnotify_init_event(&event->fse, (unsigned long)id); - event->mask = mask; + fanotify_init_event(event, (unsigned long)id, mask); if (FAN_GROUP_FLAG(group, FAN_REPORT_TID)) event->pid = get_pid(task_pid(current)); else event->pid = get_pid(task_tgid(current)); - if (fsid && fanotify_event_fsid(event)) - *fanotify_event_fsid(event) = *fsid; - - if (fanotify_event_object_fh(event)) - fanotify_encode_fh(fanotify_event_object_fh(event), id, gfp); - - if (fanotify_event_dir_fh(event)) - fanotify_encode_fh(fanotify_event_dir_fh(event), id, gfp); - - if (fanotify_event_has_path(event)) { - struct path *p = fanotify_event_path(event); - - if (path) { - *p = *path; - path_get(path); - } else { - p->mnt = NULL; - p->dentry = NULL; - } - } out: memalloc_unuse_memcg(); return event; @@ -491,9 +645,9 @@ static __kernel_fsid_t fanotify_get_fsid(struct fsnotify_iter_info *iter_info) return fsid; } -static int fanotify_handle_event(struct fsnotify_group *group, - struct inode *inode, - u32 mask, const void *data, int data_type, +static int fanotify_handle_event(struct fsnotify_group *group, u32 mask, + const void *data, int data_type, + struct inode *dir, const struct qstr *file_name, u32 cookie, struct fsnotify_iter_info *iter_info) { @@ -512,7 +666,6 @@ static int fanotify_handle_event(struct fsnotify_group *group, BUILD_BUG_ON(FAN_MOVED_FROM != FS_MOVED_FROM); BUILD_BUG_ON(FAN_CREATE != FS_CREATE); BUILD_BUG_ON(FAN_DELETE != FS_DELETE); - BUILD_BUG_ON(FAN_DIR_MODIFY != FS_DIR_MODIFY); BUILD_BUG_ON(FAN_DELETE_SELF != FS_DELETE_SELF); BUILD_BUG_ON(FAN_MOVE_SELF != FS_MOVE_SELF); BUILD_BUG_ON(FAN_EVENT_ON_CHILD != FS_EVENT_ON_CHILD); @@ -526,12 +679,11 @@ static int fanotify_handle_event(struct fsnotify_group *group, BUILD_BUG_ON(HWEIGHT32(ALL_FANOTIFY_EVENT_BITS) != 19); mask = fanotify_group_event_mask(group, iter_info, mask, data, - data_type); + data_type, dir); if (!mask) return 0; - pr_debug("%s: group=%p inode=%p mask=%x\n", __func__, group, inode, - mask); + pr_debug("%s: group=%p mask=%x\n", __func__, group, mask); if (fanotify_is_perm_event(mask)) { /* @@ -542,14 +694,14 @@ static int fanotify_handle_event(struct fsnotify_group *group, return 0; } - if (FAN_GROUP_FLAG(group, FAN_REPORT_FID)) { + if (FAN_GROUP_FLAG(group, FANOTIFY_FID_BITS)) { fsid = fanotify_get_fsid(iter_info); /* Racing with mark destruction or creation? */ if (!fsid.val[0] && !fsid.val[1]) return 0; } - event = fanotify_alloc_event(group, inode, mask, data, data_type, + event = fanotify_alloc_event(group, mask, data, data_type, dir, file_name, &fsid); ret = -ENOMEM; if (unlikely(!event)) { @@ -614,11 +766,7 @@ static void fanotify_free_fid_event(struct fanotify_event *event) static void fanotify_free_name_event(struct fanotify_event *event) { - struct fanotify_name_event *fne = FANOTIFY_NE(event); - - if (fanotify_fh_has_ext_buf(&fne->dir_fh)) - kfree(fanotify_fh_ext_buf(&fne->dir_fh)); - kfree(fne); + kfree(FANOTIFY_NE(event)); } static void fanotify_free_event(struct fsnotify_event *fsn_event) @@ -640,6 +788,9 @@ static void fanotify_free_event(struct fsnotify_event *fsn_event) case FANOTIFY_EVENT_TYPE_FID_NAME: fanotify_free_name_event(event); break; + case FANOTIFY_EVENT_TYPE_OVERFLOW: + kfree(event); + break; default: WARN_ON_ONCE(1); } diff --git a/fs/notify/fanotify/fanotify.h b/fs/notify/fanotify/fanotify.h index 8ce7ccfc4b0d..896c819a1786 100644 --- a/fs/notify/fanotify/fanotify.h +++ b/fs/notify/fanotify/fanotify.h @@ -23,20 +23,41 @@ enum { * stored in either the first or last 2 dwords. */ #define FANOTIFY_INLINE_FH_LEN (3 << 2) +#define FANOTIFY_FH_HDR_LEN offsetof(struct fanotify_fh, buf) +/* Fixed size struct for file handle */ struct fanotify_fh { - unsigned char buf[FANOTIFY_INLINE_FH_LEN]; u8 type; u8 len; +#define FANOTIFY_FH_FLAG_EXT_BUF 1 + u8 flags; + u8 pad; + unsigned char buf[]; +} __aligned(4); + +/* Variable size struct for dir file handle + child file handle + name */ +struct fanotify_info { + /* size of dir_fh/file_fh including fanotify_fh hdr size */ + u8 dir_fh_totlen; + u8 file_fh_totlen; + u8 name_len; + u8 pad; + unsigned char buf[]; + /* + * (struct fanotify_fh) dir_fh starts at buf[0] + * (optional) file_fh starts at buf[dir_fh_totlen] + * name starts at buf[dir_fh_totlen + file_fh_totlen] + */ } __aligned(4); static inline bool fanotify_fh_has_ext_buf(struct fanotify_fh *fh) { - return fh->len > FANOTIFY_INLINE_FH_LEN; + return (fh->flags & FANOTIFY_FH_FLAG_EXT_BUF); } static inline char **fanotify_fh_ext_buf_ptr(struct fanotify_fh *fh) { + BUILD_BUG_ON(FANOTIFY_FH_HDR_LEN % 4); BUILD_BUG_ON(__alignof__(char *) - 4 + sizeof(char *) > FANOTIFY_INLINE_FH_LEN); return (char **)ALIGN((unsigned long)(fh->buf), __alignof__(char *)); @@ -52,6 +73,56 @@ static inline void *fanotify_fh_buf(struct fanotify_fh *fh) return fanotify_fh_has_ext_buf(fh) ? fanotify_fh_ext_buf(fh) : fh->buf; } +static inline int fanotify_info_dir_fh_len(struct fanotify_info *info) +{ + if (!info->dir_fh_totlen || + WARN_ON_ONCE(info->dir_fh_totlen < FANOTIFY_FH_HDR_LEN)) + return 0; + + return info->dir_fh_totlen - FANOTIFY_FH_HDR_LEN; +} + +static inline struct fanotify_fh *fanotify_info_dir_fh(struct fanotify_info *info) +{ + BUILD_BUG_ON(offsetof(struct fanotify_info, buf) % 4); + + return (struct fanotify_fh *)info->buf; +} + +static inline int fanotify_info_file_fh_len(struct fanotify_info *info) +{ + if (!info->file_fh_totlen || + WARN_ON_ONCE(info->file_fh_totlen < FANOTIFY_FH_HDR_LEN)) + return 0; + + return info->file_fh_totlen - FANOTIFY_FH_HDR_LEN; +} + +static inline struct fanotify_fh *fanotify_info_file_fh(struct fanotify_info *info) +{ + return (struct fanotify_fh *)(info->buf + info->dir_fh_totlen); +} + +static inline const char *fanotify_info_name(struct fanotify_info *info) +{ + return info->buf + info->dir_fh_totlen + info->file_fh_totlen; +} + +static inline void fanotify_info_init(struct fanotify_info *info) +{ + info->dir_fh_totlen = 0; + info->file_fh_totlen = 0; + info->name_len = 0; +} + +static inline void fanotify_info_copy_name(struct fanotify_info *info, + const struct qstr *name) +{ + info->name_len = name->len; + strcpy(info->buf + info->dir_fh_totlen + info->file_fh_totlen, + name->name); +} + /* * Common structure for fanotify events. Concrete structs are allocated in * fanotify_handle_event() and freed when the information is retrieved by @@ -63,6 +134,7 @@ enum fanotify_event_type { FANOTIFY_EVENT_TYPE_FID_NAME, /* variable length */ FANOTIFY_EVENT_TYPE_PATH, FANOTIFY_EVENT_TYPE_PATH_PERM, + FANOTIFY_EVENT_TYPE_OVERFLOW, /* struct fanotify_event */ }; struct fanotify_event { @@ -72,10 +144,20 @@ struct fanotify_event { struct pid *pid; }; +static inline void fanotify_init_event(struct fanotify_event *event, + unsigned long id, u32 mask) +{ + fsnotify_init_event(&event->fse, id); + event->mask = mask; + event->pid = NULL; +} + struct fanotify_fid_event { struct fanotify_event fae; __kernel_fsid_t fsid; struct fanotify_fh object_fh; + /* Reserve space in object_fh.buf[] - access with fanotify_fh_buf() */ + unsigned char _inline_fh_buf[FANOTIFY_INLINE_FH_LEN]; }; static inline struct fanotify_fid_event * @@ -87,9 +169,7 @@ FANOTIFY_FE(struct fanotify_event *event) struct fanotify_name_event { struct fanotify_event fae; __kernel_fsid_t fsid; - struct fanotify_fh dir_fh; - u8 name_len; - char name[]; + struct fanotify_info info; }; static inline struct fanotify_name_event * @@ -113,35 +193,37 @@ static inline struct fanotify_fh *fanotify_event_object_fh( { if (event->type == FANOTIFY_EVENT_TYPE_FID) return &FANOTIFY_FE(event)->object_fh; + else if (event->type == FANOTIFY_EVENT_TYPE_FID_NAME) + return fanotify_info_file_fh(&FANOTIFY_NE(event)->info); else return NULL; } -static inline struct fanotify_fh *fanotify_event_dir_fh( +static inline struct fanotify_info *fanotify_event_info( struct fanotify_event *event) { if (event->type == FANOTIFY_EVENT_TYPE_FID_NAME) - return &FANOTIFY_NE(event)->dir_fh; + return &FANOTIFY_NE(event)->info; else return NULL; } static inline int fanotify_event_object_fh_len(struct fanotify_event *event) { + struct fanotify_info *info = fanotify_event_info(event); struct fanotify_fh *fh = fanotify_event_object_fh(event); - return fh ? fh->len : 0; + if (info) + return info->file_fh_totlen ? fh->len : 0; + else + return fh ? fh->len : 0; } -static inline bool fanotify_event_has_name(struct fanotify_event *event) +static inline int fanotify_event_dir_fh_len(struct fanotify_event *event) { - return event->type == FANOTIFY_EVENT_TYPE_FID_NAME; -} + struct fanotify_info *info = fanotify_event_info(event); -static inline int fanotify_event_name_len(struct fanotify_event *event) -{ - return fanotify_event_has_name(event) ? - FANOTIFY_NE(event)->name_len : 0; + return info ? fanotify_info_dir_fh_len(info) : 0; } struct fanotify_path_event { @@ -202,9 +284,3 @@ static inline struct path *fanotify_event_path(struct fanotify_event *event) else return NULL; } - -struct fanotify_event *fanotify_alloc_event(struct fsnotify_group *group, - struct inode *inode, u32 mask, - const void *data, int data_type, - const struct qstr *file_name, - __kernel_fsid_t *fsid); diff --git a/fs/notify/fanotify/fanotify_user.c b/fs/notify/fanotify/fanotify_user.c index 63b5dffdca9e..3e01d8f2ab90 100644 --- a/fs/notify/fanotify/fanotify_user.c +++ b/fs/notify/fanotify/fanotify_user.c @@ -64,21 +64,28 @@ static int fanotify_fid_info_len(int fh_len, int name_len) return roundup(FANOTIFY_INFO_HDR_LEN + info_len, FANOTIFY_EVENT_ALIGN); } -static int fanotify_event_info_len(struct fanotify_event *event) +static int fanotify_event_info_len(unsigned int fid_mode, + struct fanotify_event *event) { - int info_len = 0; + struct fanotify_info *info = fanotify_event_info(event); + int dir_fh_len = fanotify_event_dir_fh_len(event); int fh_len = fanotify_event_object_fh_len(event); + int info_len = 0; + int dot_len = 0; - if (fh_len) - info_len += fanotify_fid_info_len(fh_len, 0); - - if (fanotify_event_name_len(event)) { - struct fanotify_name_event *fne = FANOTIFY_NE(event); - - info_len += fanotify_fid_info_len(fne->dir_fh.len, - fne->name_len); + if (dir_fh_len) { + info_len += fanotify_fid_info_len(dir_fh_len, info->name_len); + } else if ((fid_mode & FAN_REPORT_NAME) && (event->mask & FAN_ONDIR)) { + /* + * With group flag FAN_REPORT_NAME, if name was not recorded in + * event on a directory, we will report the name ".". + */ + dot_len = 1; } + if (fh_len) + info_len += fanotify_fid_info_len(fh_len, dot_len); + return info_len; } @@ -93,6 +100,7 @@ static struct fanotify_event *get_one_event(struct fsnotify_group *group, { size_t event_size = FAN_EVENT_METADATA_LEN; struct fanotify_event *event = NULL; + unsigned int fid_mode = FAN_GROUP_FLAG(group, FANOTIFY_FID_BITS); pr_debug("%s: group=%p count=%zd\n", __func__, group, count); @@ -100,8 +108,8 @@ static struct fanotify_event *get_one_event(struct fsnotify_group *group, if (fsnotify_notify_queue_is_empty(group)) goto out; - if (FAN_GROUP_FLAG(group, FAN_REPORT_FID)) { - event_size += fanotify_event_info_len( + if (fid_mode) { + event_size += fanotify_event_info_len(fid_mode, FANOTIFY_E(fsnotify_peek_first_event(group))); } @@ -218,7 +226,7 @@ static int process_access_response(struct fsnotify_group *group, } static int copy_info_to_user(__kernel_fsid_t *fsid, struct fanotify_fh *fh, - const char *name, size_t name_len, + int info_type, const char *name, size_t name_len, char __user *buf, size_t count) { struct fanotify_event_info_fid info = { }; @@ -231,7 +239,7 @@ static int copy_info_to_user(__kernel_fsid_t *fsid, struct fanotify_fh *fh, pr_debug("%s: fh_len=%zu name_len=%zu, info_len=%zu, count=%zu\n", __func__, fh_len, name_len, info_len, count); - if (!fh_len || (name && !name_len)) + if (!fh_len) return 0; if (WARN_ON_ONCE(len < sizeof(info) || len > count)) @@ -241,8 +249,21 @@ static int copy_info_to_user(__kernel_fsid_t *fsid, struct fanotify_fh *fh, * Copy event info fid header followed by variable sized file handle * and optionally followed by variable sized filename. */ - info.hdr.info_type = name_len ? FAN_EVENT_INFO_TYPE_DFID_NAME : - FAN_EVENT_INFO_TYPE_FID; + switch (info_type) { + case FAN_EVENT_INFO_TYPE_FID: + case FAN_EVENT_INFO_TYPE_DFID: + if (WARN_ON_ONCE(name_len)) + return -EFAULT; + break; + case FAN_EVENT_INFO_TYPE_DFID_NAME: + if (WARN_ON_ONCE(!name || !name_len)) + return -EFAULT; + break; + default: + return -EFAULT; + } + + info.hdr.info_type = info_type; info.hdr.len = len; info.fsid = *fsid; if (copy_to_user(buf, &info, sizeof(info))) @@ -305,13 +326,16 @@ static ssize_t copy_event_to_user(struct fsnotify_group *group, { struct fanotify_event_metadata metadata; struct path *path = fanotify_event_path(event); + struct fanotify_info *info = fanotify_event_info(event); + unsigned int fid_mode = FAN_GROUP_FLAG(group, FANOTIFY_FID_BITS); struct file *f = NULL; int ret, fd = FAN_NOFD; + int info_type = 0; pr_debug("%s: group=%p event=%p\n", __func__, group, event); metadata.event_len = FAN_EVENT_METADATA_LEN + - fanotify_event_info_len(event); + fanotify_event_info_len(fid_mode, event); metadata.metadata_len = FAN_EVENT_METADATA_LEN; metadata.vers = FANOTIFY_METADATA_VERSION; metadata.reserved = 0; @@ -346,13 +370,13 @@ static ssize_t copy_event_to_user(struct fsnotify_group *group, fd_install(fd, f); /* Event info records order is: dir fid + name, child fid */ - if (fanotify_event_name_len(event)) { - struct fanotify_name_event *fne = FANOTIFY_NE(event); - + if (fanotify_event_dir_fh_len(event)) { + info_type = info->name_len ? FAN_EVENT_INFO_TYPE_DFID_NAME : + FAN_EVENT_INFO_TYPE_DFID; ret = copy_info_to_user(fanotify_event_fsid(event), - fanotify_event_dir_fh(event), - fne->name, fne->name_len, - buf, count); + fanotify_info_dir_fh(info), + info_type, fanotify_info_name(info), + info->name_len, buf, count); if (ret < 0) return ret; @@ -361,9 +385,46 @@ static ssize_t copy_event_to_user(struct fsnotify_group *group, } if (fanotify_event_object_fh_len(event)) { + const char *dot = NULL; + int dot_len = 0; + + if (fid_mode == FAN_REPORT_FID || info_type) { + /* + * With only group flag FAN_REPORT_FID only type FID is + * reported. Second info record type is always FID. + */ + info_type = FAN_EVENT_INFO_TYPE_FID; + } else if ((fid_mode & FAN_REPORT_NAME) && + (event->mask & FAN_ONDIR)) { + /* + * With group flag FAN_REPORT_NAME, if name was not + * recorded in an event on a directory, report the + * name "." with info type DFID_NAME. + */ + info_type = FAN_EVENT_INFO_TYPE_DFID_NAME; + dot = "."; + dot_len = 1; + } else if ((event->mask & ALL_FSNOTIFY_DIRENT_EVENTS) || + (event->mask & FAN_ONDIR)) { + /* + * With group flag FAN_REPORT_DIR_FID, a single info + * record has type DFID for directory entry modification + * event and for event on a directory. + */ + info_type = FAN_EVENT_INFO_TYPE_DFID; + } else { + /* + * With group flags FAN_REPORT_DIR_FID|FAN_REPORT_FID, + * a single info record has type FID for event on a + * non-directory, when there is no directory to report. + * For example, on FAN_DELETE_SELF event. + */ + info_type = FAN_EVENT_INFO_TYPE_FID; + } + ret = copy_info_to_user(fanotify_event_fsid(event), fanotify_event_object_fh(event), - NULL, 0, buf, count); + info_type, dot, dot_len, buf, count); if (ret < 0) return ret; @@ -412,6 +473,11 @@ static ssize_t fanotify_read(struct file *file, char __user *buf, add_wait_queue(&group->notification_waitq, &wait); while (1) { + /* + * User can supply arbitrarily large buffer. Avoid softlockups + * in case there are lots of available events. + */ + cond_resched(); event = get_one_event(group, count); if (IS_ERR(event)) { ret = PTR_ERR(event); @@ -651,12 +717,13 @@ out: } static __u32 fanotify_mark_remove_from_mask(struct fsnotify_mark *fsn_mark, - __u32 mask, - unsigned int flags, - int *destroy) + __u32 mask, unsigned int flags, + __u32 umask, int *destroy) { __u32 oldmask = 0; + /* umask bits cannot be removed by user */ + mask &= ~umask; spin_lock(&fsn_mark->lock); if (!(flags & FAN_MARK_IGNORED_MASK)) { oldmask = fsn_mark->mask; @@ -664,7 +731,13 @@ static __u32 fanotify_mark_remove_from_mask(struct fsnotify_mark *fsn_mark, } else { fsn_mark->ignored_mask &= ~mask; } - *destroy = !(fsn_mark->mask | fsn_mark->ignored_mask); + /* + * We need to keep the mark around even if remaining mask cannot + * result in any events (e.g. mask == FAN_ONDIR) to support incremenal + * changes to the mask. + * Destroy mark when only umask bits remain. + */ + *destroy = !((fsn_mark->mask | fsn_mark->ignored_mask) & ~umask); spin_unlock(&fsn_mark->lock); return mask & oldmask; @@ -672,7 +745,7 @@ static __u32 fanotify_mark_remove_from_mask(struct fsnotify_mark *fsn_mark, static int fanotify_remove_mark(struct fsnotify_group *group, fsnotify_connp_t *connp, __u32 mask, - unsigned int flags) + unsigned int flags, __u32 umask) { struct fsnotify_mark *fsn_mark = NULL; __u32 removed; @@ -686,7 +759,7 @@ static int fanotify_remove_mark(struct fsnotify_group *group, } removed = fanotify_mark_remove_from_mask(fsn_mark, mask, flags, - &destroy_mark); + umask, &destroy_mark); if (removed & fsnotify_conn_mask(fsn_mark->connector)) fsnotify_recalc_mask(fsn_mark->connector); if (destroy_mark) @@ -702,25 +775,26 @@ static int fanotify_remove_mark(struct fsnotify_group *group, static int fanotify_remove_vfsmount_mark(struct fsnotify_group *group, struct vfsmount *mnt, __u32 mask, - unsigned int flags) + unsigned int flags, __u32 umask) { return fanotify_remove_mark(group, &real_mount(mnt)->mnt_fsnotify_marks, - mask, flags); + mask, flags, umask); } static int fanotify_remove_sb_mark(struct fsnotify_group *group, - struct super_block *sb, __u32 mask, - unsigned int flags) + struct super_block *sb, __u32 mask, + unsigned int flags, __u32 umask) { - return fanotify_remove_mark(group, &sb->s_fsnotify_marks, mask, flags); + return fanotify_remove_mark(group, &sb->s_fsnotify_marks, mask, + flags, umask); } static int fanotify_remove_inode_mark(struct fsnotify_group *group, struct inode *inode, __u32 mask, - unsigned int flags) + unsigned int flags, __u32 umask) { return fanotify_remove_mark(group, &inode->i_fsnotify_marks, mask, - flags); + flags, umask); } static __u32 fanotify_mark_add_to_mask(struct fsnotify_mark *fsn_mark, @@ -831,13 +905,28 @@ static int fanotify_add_inode_mark(struct fsnotify_group *group, FSNOTIFY_OBJ_TYPE_INODE, mask, flags, fsid); } +static struct fsnotify_event *fanotify_alloc_overflow_event(void) +{ + struct fanotify_event *oevent; + + oevent = kmalloc(sizeof(*oevent), GFP_KERNEL_ACCOUNT); + if (!oevent) + return NULL; + + fanotify_init_event(oevent, 0, FS_Q_OVERFLOW); + oevent->type = FANOTIFY_EVENT_TYPE_OVERFLOW; + + return &oevent->fse; +} + /* fanotify syscalls */ SYSCALL_DEFINE2(fanotify_init, unsigned int, flags, unsigned int, event_f_flags) { struct fsnotify_group *group; int f_flags, fd; struct user_struct *user; - struct fanotify_event *oevent; + unsigned int fid_mode = flags & FANOTIFY_FID_BITS; + unsigned int class = flags & FANOTIFY_CLASS_BITS; pr_debug("%s: flags=%x event_f_flags=%x\n", __func__, flags, event_f_flags); @@ -864,8 +953,14 @@ SYSCALL_DEFINE2(fanotify_init, unsigned int, flags, unsigned int, event_f_flags) return -EINVAL; } - if ((flags & FAN_REPORT_FID) && - (flags & FANOTIFY_CLASS_BITS) != FAN_CLASS_NOTIF) + if (fid_mode && class != FAN_CLASS_NOTIF) + return -EINVAL; + + /* + * Child name is reported with parent fid so requires dir fid. + * We can report both child fid and dir fid with or without name. + */ + if ((fid_mode & FAN_REPORT_NAME) && !(fid_mode & FAN_REPORT_DIR_FID)) return -EINVAL; user = get_current_user(); @@ -892,20 +987,18 @@ SYSCALL_DEFINE2(fanotify_init, unsigned int, flags, unsigned int, event_f_flags) atomic_inc(&user->fanotify_listeners); group->memcg = get_mem_cgroup_from_mm(current->mm); - oevent = fanotify_alloc_event(group, NULL, FS_Q_OVERFLOW, NULL, - FSNOTIFY_EVENT_NONE, NULL, NULL); - if (unlikely(!oevent)) { + group->overflow_event = fanotify_alloc_overflow_event(); + if (unlikely(!group->overflow_event)) { fd = -ENOMEM; goto out_destroy_group; } - group->overflow_event = &oevent->fse; if (force_o_largefile()) event_f_flags |= O_LARGEFILE; group->fanotify_data.f_flags = event_f_flags; init_waitqueue_head(&group->fanotify_data.access_waitq); INIT_LIST_HEAD(&group->fanotify_data.access_list); - switch (flags & FANOTIFY_CLASS_BITS) { + switch (class) { case FAN_CLASS_NOTIF: group->priority = FS_PRIO_0; break; @@ -1024,7 +1117,9 @@ static int do_fanotify_mark(int fanotify_fd, unsigned int flags, __u64 mask, __kernel_fsid_t __fsid, *fsid = NULL; u32 valid_mask = FANOTIFY_EVENTS | FANOTIFY_EVENT_FLAGS; unsigned int mark_type = flags & FANOTIFY_MARK_TYPE_BITS; - unsigned int obj_type; + bool ignored = flags & FAN_MARK_IGNORED_MASK; + unsigned int obj_type, fid_mode; + u32 umask = 0; int ret; pr_debug("%s: fanotify_fd=%d flags=%x dfd=%d pathname=%p mask=%llx\n", @@ -1052,7 +1147,7 @@ static int do_fanotify_mark(int fanotify_fd, unsigned int flags, __u64 mask, } switch (flags & (FAN_MARK_ADD | FAN_MARK_REMOVE | FAN_MARK_FLUSH)) { - case FAN_MARK_ADD: /* fallthrough */ + case FAN_MARK_ADD: case FAN_MARK_REMOVE: if (!mask) return -EINVAL; @@ -1071,6 +1166,10 @@ static int do_fanotify_mark(int fanotify_fd, unsigned int flags, __u64 mask, if (mask & ~valid_mask) return -EINVAL; + /* Event flags (ONDIR, ON_CHILD) are meaningless in ignored mask */ + if (ignored) + mask &= ~FANOTIFY_EVENT_FLAGS; + f = fdget(fanotify_fd); if (unlikely(!f.file)) return -EBADF; @@ -1097,9 +1196,9 @@ static int do_fanotify_mark(int fanotify_fd, unsigned int flags, __u64 mask, * inode events are not supported on a mount mark, because they do not * carry enough information (i.e. path) to be filtered by mount point. */ + fid_mode = FAN_GROUP_FLAG(group, FANOTIFY_FID_BITS); if (mask & FANOTIFY_INODE_EVENTS && - (!FAN_GROUP_FLAG(group, FAN_REPORT_FID) || - mark_type == FAN_MARK_MOUNT)) + (!fid_mode || mark_type == FAN_MARK_MOUNT)) goto fput_and_out; if (flags & FAN_MARK_FLUSH) { @@ -1124,7 +1223,7 @@ static int do_fanotify_mark(int fanotify_fd, unsigned int flags, __u64 mask, goto path_put_and_out; } - if (FAN_GROUP_FLAG(group, FAN_REPORT_FID)) { + if (fid_mode) { ret = fanotify_test_fid(&path, &__fsid); if (ret) goto path_put_and_out; @@ -1138,6 +1237,19 @@ static int do_fanotify_mark(int fanotify_fd, unsigned int flags, __u64 mask, else mnt = path.mnt; + /* Mask out FAN_EVENT_ON_CHILD flag for sb/mount/non-dir marks */ + if (mnt || !S_ISDIR(inode->i_mode)) { + mask &= ~FAN_EVENT_ON_CHILD; + umask = FAN_EVENT_ON_CHILD; + /* + * If group needs to report parent fid, register for getting + * events with parent/name info for non-directory. + */ + if ((fid_mode & FAN_REPORT_DIR_FID) && + (flags & FAN_MARK_ADD) && !ignored) + mask |= FAN_EVENT_ON_CHILD; + } + /* create/update an inode mark */ switch (flags & (FAN_MARK_ADD | FAN_MARK_REMOVE)) { case FAN_MARK_ADD: @@ -1154,13 +1266,13 @@ static int do_fanotify_mark(int fanotify_fd, unsigned int flags, __u64 mask, case FAN_MARK_REMOVE: if (mark_type == FAN_MARK_MOUNT) ret = fanotify_remove_vfsmount_mark(group, mnt, mask, - flags); + flags, umask); else if (mark_type == FAN_MARK_FILESYSTEM) ret = fanotify_remove_sb_mark(group, mnt->mnt_sb, mask, - flags); + flags, umask); else ret = fanotify_remove_inode_mark(group, inode, mask, - flags); + flags, umask); break; default: ret = -EINVAL; @@ -1203,7 +1315,7 @@ COMPAT_SYSCALL_DEFINE6(fanotify_mark, */ static int __init fanotify_user_setup(void) { - BUILD_BUG_ON(HWEIGHT32(FANOTIFY_INIT_FLAGS) != 8); + BUILD_BUG_ON(HWEIGHT32(FANOTIFY_INIT_FLAGS) != 10); BUILD_BUG_ON(HWEIGHT32(FANOTIFY_MARK_FLAGS) != 9); fanotify_mark_cache = KMEM_CACHE(fsnotify_mark, diff --git a/fs/notify/fsnotify.c b/fs/notify/fsnotify.c index 72d332ce8e12..a960ec3a569a 100644 --- a/fs/notify/fsnotify.c +++ b/fs/notify/fsnotify.c @@ -74,7 +74,7 @@ static void fsnotify_unmount_inodes(struct super_block *sb) iput(iput_inode); /* for each watch, send FS_UNMOUNT and then remove it */ - fsnotify(inode, FS_UNMOUNT, inode, FSNOTIFY_EVENT_INODE, NULL, 0); + fsnotify_inode(inode, FS_UNMOUNT); fsnotify_inode_delete(inode); @@ -142,45 +142,140 @@ void __fsnotify_update_child_dentry_flags(struct inode *inode) spin_unlock(&inode->i_lock); } -/* Notify this dentry's parent about a child's events. */ -int fsnotify_parent(struct dentry *dentry, __u32 mask, const void *data, - int data_type) +/* Are inode/sb/mount interested in parent and name info with this event? */ +static bool fsnotify_event_needs_parent(struct inode *inode, struct mount *mnt, + __u32 mask) { + __u32 marks_mask = 0; + + /* We only send parent/name to inode/sb/mount for events on non-dir */ + if (mask & FS_ISDIR) + return false; + + /* Did either inode/sb/mount subscribe for events with parent/name? */ + marks_mask |= fsnotify_parent_needed_mask(inode->i_fsnotify_mask); + marks_mask |= fsnotify_parent_needed_mask(inode->i_sb->s_fsnotify_mask); + if (mnt) + marks_mask |= fsnotify_parent_needed_mask(mnt->mnt_fsnotify_mask); + + /* Did they subscribe for this event with parent/name info? */ + return mask & marks_mask; +} + +/* + * Notify this dentry's parent about a child's events with child name info + * if parent is watching or if inode/sb/mount are interested in events with + * parent and name info. + * + * Notify only the child without name info if parent is not watching and + * inode/sb/mount are not interested in events with parent and name info. + */ +int __fsnotify_parent(struct dentry *dentry, __u32 mask, const void *data, + int data_type) +{ + const struct path *path = fsnotify_data_path(data, data_type); + struct mount *mnt = path ? real_mount(path->mnt) : NULL; + struct inode *inode = d_inode(dentry); struct dentry *parent; - struct inode *p_inode; + bool parent_watched = dentry->d_flags & DCACHE_FSNOTIFY_PARENT_WATCHED; + __u32 p_mask; + struct inode *p_inode = NULL; + struct name_snapshot name; + struct qstr *file_name = NULL; int ret = 0; - if (!(dentry->d_flags & DCACHE_FSNOTIFY_PARENT_WATCHED)) + /* + * Do inode/sb/mount care about parent and name info on non-dir? + * Do they care about any event at all? + */ + if (!inode->i_fsnotify_marks && !inode->i_sb->s_fsnotify_marks && + (!mnt || !mnt->mnt_fsnotify_marks) && !parent_watched) return 0; + parent = NULL; + if (!parent_watched && !fsnotify_event_needs_parent(inode, mnt, mask)) + goto notify; + + /* Does parent inode care about events on children? */ parent = dget_parent(dentry); p_inode = parent->d_inode; - - if (unlikely(!fsnotify_inode_watches_children(p_inode))) { + p_mask = fsnotify_inode_watches_children(p_inode); + if (unlikely(parent_watched && !p_mask)) __fsnotify_update_child_dentry_flags(p_inode); - } else if (p_inode->i_fsnotify_mask & mask & ALL_FSNOTIFY_EVENTS) { - struct name_snapshot name; - /* we are notifying a parent so come up with the new mask which - * specifies these are events which came from a child. */ - mask |= FS_EVENT_ON_CHILD; + /* + * Include parent/name in notification either if some notification + * groups require parent info (!parent_watched case) or the parent is + * interested in this event. + */ + if (!parent_watched || (mask & p_mask & ALL_FSNOTIFY_EVENTS)) { + /* When notifying parent, child should be passed as data */ + WARN_ON_ONCE(inode != fsnotify_data_inode(data, data_type)); + /* Notify both parent and child with child name info */ take_dentry_name_snapshot(&name, dentry); - ret = fsnotify(p_inode, mask, data, data_type, &name.name, 0); - release_dentry_name_snapshot(&name); + file_name = &name.name; + if (parent_watched) + mask |= FS_EVENT_ON_CHILD; } +notify: + ret = fsnotify(mask, data, data_type, p_inode, file_name, inode, 0); + + if (file_name) + release_dentry_name_snapshot(&name); dput(parent); return ret; } -EXPORT_SYMBOL_GPL(fsnotify_parent); +EXPORT_SYMBOL_GPL(__fsnotify_parent); + +static int fsnotify_handle_event(struct fsnotify_group *group, __u32 mask, + const void *data, int data_type, + struct inode *dir, const struct qstr *name, + u32 cookie, struct fsnotify_iter_info *iter_info) +{ + struct fsnotify_mark *inode_mark = fsnotify_iter_inode_mark(iter_info); + struct fsnotify_mark *child_mark = fsnotify_iter_child_mark(iter_info); + struct inode *inode = fsnotify_data_inode(data, data_type); + const struct fsnotify_ops *ops = group->ops; + int ret; + + if (WARN_ON_ONCE(!ops->handle_inode_event)) + return 0; + + if (WARN_ON_ONCE(fsnotify_iter_sb_mark(iter_info)) || + WARN_ON_ONCE(fsnotify_iter_vfsmount_mark(iter_info))) + return 0; + + /* + * An event can be sent on child mark iterator instead of inode mark + * iterator because of other groups that have interest of this inode + * and have marks on both parent and child. We can simplify this case. + */ + if (!inode_mark) { + inode_mark = child_mark; + child_mark = NULL; + dir = NULL; + name = NULL; + } + + ret = ops->handle_inode_event(inode_mark, mask, inode, dir, name); + if (ret || !child_mark) + return ret; + + /* + * Some events can be sent on both parent dir and child marks + * (e.g. FS_ATTRIB). If both parent dir and child are watching, + * report the event once to parent dir with name and once to child + * without name. + */ + return ops->handle_inode_event(child_mark, mask, inode, NULL, NULL); +} -static int send_to_group(struct inode *to_tell, - __u32 mask, const void *data, - int data_is, u32 cookie, - const struct qstr *file_name, - struct fsnotify_iter_info *iter_info) +static int send_to_group(__u32 mask, const void *data, int data_type, + struct inode *dir, const struct qstr *file_name, + u32 cookie, struct fsnotify_iter_info *iter_info) { struct fsnotify_group *group = NULL; __u32 test_mask = (mask & ALL_FSNOTIFY_EVENTS); @@ -216,16 +311,20 @@ static int send_to_group(struct inode *to_tell, } } - pr_debug("%s: group=%p to_tell=%p mask=%x marks_mask=%x marks_ignored_mask=%x" - " data=%p data_is=%d cookie=%d\n", - __func__, group, to_tell, mask, marks_mask, marks_ignored_mask, - data, data_is, cookie); + pr_debug("%s: group=%p mask=%x marks_mask=%x marks_ignored_mask=%x data=%p data_type=%d dir=%p cookie=%d\n", + __func__, group, mask, marks_mask, marks_ignored_mask, + data, data_type, dir, cookie); if (!(test_mask & marks_mask & ~marks_ignored_mask)) return 0; - return group->ops->handle_event(group, to_tell, mask, data, data_is, - file_name, cookie, iter_info); + if (group->ops->handle_event) { + return group->ops->handle_event(group, mask, data, data_type, dir, + file_name, cookie, iter_info); + } + + return fsnotify_handle_event(group, mask, data, data_type, dir, + file_name, cookie, iter_info); } static struct fsnotify_mark *fsnotify_first_mark(struct fsnotify_mark_connector **connp) @@ -303,29 +402,51 @@ static void fsnotify_iter_next(struct fsnotify_iter_info *iter_info) } /* - * This is the main call to fsnotify. The VFS calls into hook specific functions - * in linux/fsnotify.h. Those functions then in turn call here. Here will call - * out to all of the registered fsnotify_group. Those groups can then use the - * notification event in whatever means they feel necessary. + * fsnotify - This is the main call to fsnotify. + * + * The VFS calls into hook specific functions in linux/fsnotify.h. + * Those functions then in turn call here. Here will call out to all of the + * registered fsnotify_group. Those groups can then use the notification event + * in whatever means they feel necessary. + * + * @mask: event type and flags + * @data: object that event happened on + * @data_type: type of object for fanotify_data_XXX() accessors + * @dir: optional directory associated with event - + * if @file_name is not NULL, this is the directory that + * @file_name is relative to + * @file_name: optional file name associated with event + * @inode: optional inode associated with event - + * either @dir or @inode must be non-NULL. + * if both are non-NULL event may be reported to both. + * @cookie: inotify rename cookie */ -int fsnotify(struct inode *to_tell, __u32 mask, const void *data, int data_is, - const struct qstr *file_name, u32 cookie) +int fsnotify(__u32 mask, const void *data, int data_type, struct inode *dir, + const struct qstr *file_name, struct inode *inode, u32 cookie) { - const struct path *path = fsnotify_data_path(data, data_is); + const struct path *path = fsnotify_data_path(data, data_type); struct fsnotify_iter_info iter_info = {}; - struct super_block *sb = to_tell->i_sb; + struct super_block *sb; struct mount *mnt = NULL; - __u32 mnt_or_sb_mask = sb->s_fsnotify_mask; + struct inode *child = NULL; int ret = 0; - __u32 test_mask = (mask & ALL_FSNOTIFY_EVENTS); + __u32 test_mask, marks_mask; - if (path) { + if (path) mnt = real_mount(path->mnt); - mnt_or_sb_mask |= mnt->mnt_fsnotify_mask; + + if (!inode) { + /* Dirent event - report on TYPE_INODE to dir */ + inode = dir; + } else if (mask & FS_EVENT_ON_CHILD) { + /* + * Event on child - report on TYPE_INODE to dir if it is + * watching children and on TYPE_CHILD to child. + */ + child = inode; + inode = dir; } - /* An event "on child" is not intended for a mount/sb mark */ - if (mask & FS_EVENT_ON_CHILD) - mnt_or_sb_mask = 0; + sb = inode->i_sb; /* * Optimization: srcu_read_lock() has a memory barrier which can @@ -334,28 +455,45 @@ int fsnotify(struct inode *to_tell, __u32 mask, const void *data, int data_is, * SRCU because we have no references to any objects and do not * need SRCU to keep them "alive". */ - if (!to_tell->i_fsnotify_marks && !sb->s_fsnotify_marks && - (!mnt || !mnt->mnt_fsnotify_marks)) + if (!sb->s_fsnotify_marks && + (!mnt || !mnt->mnt_fsnotify_marks) && + (!inode || !inode->i_fsnotify_marks) && + (!child || !child->i_fsnotify_marks)) return 0; + + marks_mask = sb->s_fsnotify_mask; + if (mnt) + marks_mask |= mnt->mnt_fsnotify_mask; + if (inode) + marks_mask |= inode->i_fsnotify_mask; + if (child) + marks_mask |= child->i_fsnotify_mask; + + /* * if this is a modify event we may need to clear the ignored masks - * otherwise return if neither the inode nor the vfsmount/sb care about - * this type of event. + * otherwise return if none of the marks care about this type of event. */ - if (!(mask & FS_MODIFY) && - !(test_mask & (to_tell->i_fsnotify_mask | mnt_or_sb_mask))) + test_mask = (mask & ALL_FSNOTIFY_EVENTS); + if (!(mask & FS_MODIFY) && !(test_mask & marks_mask)) return 0; iter_info.srcu_idx = srcu_read_lock(&fsnotify_mark_srcu); - iter_info.marks[FSNOTIFY_OBJ_TYPE_INODE] = - fsnotify_first_mark(&to_tell->i_fsnotify_marks); iter_info.marks[FSNOTIFY_OBJ_TYPE_SB] = fsnotify_first_mark(&sb->s_fsnotify_marks); if (mnt) { iter_info.marks[FSNOTIFY_OBJ_TYPE_VFSMOUNT] = fsnotify_first_mark(&mnt->mnt_fsnotify_marks); } + if (inode) { + iter_info.marks[FSNOTIFY_OBJ_TYPE_INODE] = + fsnotify_first_mark(&inode->i_fsnotify_marks); + } + if (child) { + iter_info.marks[FSNOTIFY_OBJ_TYPE_CHILD] = + fsnotify_first_mark(&child->i_fsnotify_marks); + } /* * We need to merge inode/vfsmount/sb mark lists so that e.g. inode mark @@ -363,8 +501,8 @@ int fsnotify(struct inode *to_tell, __u32 mask, const void *data, int data_is, * That's why this traversal is so complicated... */ while (fsnotify_iter_select_report_types(&iter_info)) { - ret = send_to_group(to_tell, mask, data, data_is, cookie, - file_name, &iter_info); + ret = send_to_group(mask, data, data_type, dir, file_name, + cookie, &iter_info); if (ret && (mask & ALL_FSNOTIFY_PERM_EVENTS)) goto out; @@ -383,7 +521,7 @@ static __init int fsnotify_init(void) { int ret; - BUILD_BUG_ON(HWEIGHT32(ALL_FSNOTIFY_BITS) != 26); + BUILD_BUG_ON(HWEIGHT32(ALL_FSNOTIFY_BITS) != 25); ret = init_srcu_struct(&fsnotify_mark_srcu); if (ret) diff --git a/fs/notify/inotify/inotify.h b/fs/notify/inotify/inotify.h index 3f246f7b8a92..4327d0e9c364 100644 --- a/fs/notify/inotify/inotify.h +++ b/fs/notify/inotify/inotify.h @@ -24,9 +24,9 @@ static inline struct inotify_event_info *INOTIFY_E(struct fsnotify_event *fse) extern void inotify_ignored_and_remove_idr(struct fsnotify_mark *fsn_mark, struct fsnotify_group *group); -extern int inotify_handle_event(struct fsnotify_group *group, - struct inode *inode, - u32 mask, const void *data, int data_type, +extern int inotify_handle_event(struct fsnotify_group *group, u32 mask, + const void *data, int data_type, + struct inode *dir, const struct qstr *file_name, u32 cookie, struct fsnotify_iter_info *iter_info); diff --git a/fs/notify/inotify/inotify_fsnotify.c b/fs/notify/inotify/inotify_fsnotify.c index 2ebc89047153..a65cf8c9f600 100644 --- a/fs/notify/inotify/inotify_fsnotify.c +++ b/fs/notify/inotify/inotify_fsnotify.c @@ -39,7 +39,7 @@ static bool event_compare(struct fsnotify_event *old_fsn, if (old->mask & FS_IN_IGNORED) return false; if ((old->mask == new->mask) && - (old_fsn->objectid == new_fsn->objectid) && + (old->wd == new->wd) && (old->name_len == new->name_len) && (!old->name_len || !strcmp(old->name, new->name))) return true; @@ -55,14 +55,11 @@ static int inotify_merge(struct list_head *list, return event_compare(last_event, event); } -int inotify_handle_event(struct fsnotify_group *group, - struct inode *inode, - u32 mask, const void *data, int data_type, - const struct qstr *file_name, u32 cookie, - struct fsnotify_iter_info *iter_info) +static int inotify_one_event(struct fsnotify_group *group, u32 mask, + struct fsnotify_mark *inode_mark, + const struct path *path, + const struct qstr *file_name, u32 cookie) { - const struct path *path = fsnotify_data_path(data, data_type); - struct fsnotify_mark *inode_mark = fsnotify_iter_inode_mark(iter_info); struct inotify_inode_mark *i_mark; struct inotify_event_info *event; struct fsnotify_event *fsn_event; @@ -70,9 +67,6 @@ int inotify_handle_event(struct fsnotify_group *group, int len = 0; int alloc_len = sizeof(struct inotify_event_info); - if (WARN_ON(fsnotify_iter_vfsmount_mark(iter_info))) - return 0; - if ((inode_mark->mask & FS_EXCL_UNLINK) && path && d_unlinked(path->dentry)) return 0; @@ -82,7 +76,7 @@ int inotify_handle_event(struct fsnotify_group *group, alloc_len += len + 1; } - pr_debug("%s: group=%p inode=%p mask=%x\n", __func__, group, inode, + pr_debug("%s: group=%p mark=%p mask=%x\n", __func__, group, inode_mark, mask); i_mark = container_of(inode_mark, struct inotify_inode_mark, @@ -116,7 +110,7 @@ int inotify_handle_event(struct fsnotify_group *group, mask &= ~IN_ISDIR; fsn_event = &event->fse; - fsnotify_init_event(fsn_event, (unsigned long)inode); + fsnotify_init_event(fsn_event, 0); event->mask = mask; event->wd = i_mark->wd; event->sync_cookie = cookie; @@ -136,6 +130,37 @@ int inotify_handle_event(struct fsnotify_group *group, return 0; } +int inotify_handle_event(struct fsnotify_group *group, u32 mask, + const void *data, int data_type, struct inode *dir, + const struct qstr *file_name, u32 cookie, + struct fsnotify_iter_info *iter_info) +{ + const struct path *path = fsnotify_data_path(data, data_type); + struct fsnotify_mark *inode_mark = fsnotify_iter_inode_mark(iter_info); + struct fsnotify_mark *child_mark = fsnotify_iter_child_mark(iter_info); + int ret = 0; + + if (WARN_ON(fsnotify_iter_vfsmount_mark(iter_info))) + return 0; + + /* + * Some events cannot be sent on both parent and child marks + * (e.g. IN_CREATE). Those events are always sent on inode_mark. + * For events that are possible on both parent and child (e.g. IN_OPEN), + * event is sent on inode_mark with name if the parent is watching and + * is sent on child_mark without name if child is watching. + * If both parent and child are watching, report the event with child's + * name here and report another event without child's name below. + */ + if (inode_mark) + ret = inotify_one_event(group, mask, inode_mark, path, + file_name, cookie); + if (ret || !child_mark) + return ret; + + return inotify_one_event(group, mask, child_mark, path, NULL, 0); +} + static void inotify_freeing_mark(struct fsnotify_mark *fsn_mark, struct fsnotify_group *group) { inotify_ignored_and_remove_idr(fsn_mark, group); diff --git a/fs/notify/inotify/inotify_user.c b/fs/notify/inotify/inotify_user.c index f88bbcc9efeb..186722ba3894 100644 --- a/fs/notify/inotify/inotify_user.c +++ b/fs/notify/inotify/inotify_user.c @@ -75,15 +75,17 @@ struct ctl_table inotify_table[] = { }; #endif /* CONFIG_SYSCTL */ -static inline __u32 inotify_arg_to_mask(u32 arg) +static inline __u32 inotify_arg_to_mask(struct inode *inode, u32 arg) { __u32 mask; /* - * everything should accept their own ignored, cares about children, - * and should receive events when the inode is unmounted + * Everything should accept their own ignored and should receive events + * when the inode is unmounted. All directories care about children. */ - mask = (FS_IN_IGNORED | FS_EVENT_ON_CHILD | FS_UNMOUNT); + mask = (FS_IN_IGNORED | FS_UNMOUNT); + if (S_ISDIR(inode->i_mode)) + mask |= FS_EVENT_ON_CHILD; /* mask off the flags used to open the fd */ mask |= (arg & (IN_ALL_EVENTS | IN_ONESHOT | IN_EXCL_UNLINK)); @@ -490,8 +492,8 @@ void inotify_ignored_and_remove_idr(struct fsnotify_mark *fsn_mark, fsn_mark); /* Queue ignore event for the watch */ - inotify_handle_event(group, NULL, FS_IN_IGNORED, NULL, - FSNOTIFY_EVENT_NONE, NULL, 0, &iter_info); + inotify_handle_event(group, FS_IN_IGNORED, NULL, FSNOTIFY_EVENT_NONE, + NULL, NULL, 0, &iter_info); i_mark = container_of(fsn_mark, struct inotify_inode_mark, fsn_mark); /* remove this mark from the idr */ @@ -512,7 +514,7 @@ static int inotify_update_existing_watch(struct fsnotify_group *group, int create = (arg & IN_MASK_CREATE); int ret; - mask = inotify_arg_to_mask(arg); + mask = inotify_arg_to_mask(inode, arg); fsn_mark = fsnotify_find_mark(&inode->i_fsnotify_marks, group); if (!fsn_mark) @@ -565,7 +567,7 @@ static int inotify_new_watch(struct fsnotify_group *group, struct idr *idr = &group->inotify_data.idr; spinlock_t *idr_lock = &group->inotify_data.idr_lock; - mask = inotify_arg_to_mask(arg); + mask = inotify_arg_to_mask(inode, arg); tmp_i_mark = kmem_cache_alloc(inotify_inode_mark_cachep, GFP_KERNEL); if (unlikely(!tmp_i_mark)) diff --git a/fs/ntfs/dir.c b/fs/ntfs/dir.c index 3c4811469ae8..cd96083a12c8 100644 --- a/fs/ntfs/dir.c +++ b/fs/ntfs/dir.c @@ -8,6 +8,7 @@ #include <linux/buffer_head.h> #include <linux/slab.h> +#include <linux/blkdev.h> #include "dir.h" #include "aops.h" @@ -1503,7 +1504,7 @@ static int ntfs_dir_fsync(struct file *filp, loff_t start, loff_t end, na.type = AT_BITMAP; na.name = I30; na.name_len = 4; - bmp_vi = ilookup5(vi->i_sb, vi->i_ino, (test_t)ntfs_test_inode, &na); + bmp_vi = ilookup5(vi->i_sb, vi->i_ino, ntfs_test_inode, &na); if (bmp_vi) { write_inode_now(bmp_vi, !datasync); iput(bmp_vi); diff --git a/fs/ntfs/inode.c b/fs/ntfs/inode.c index d4359a1df3d5..9bb9f0952b18 100644 --- a/fs/ntfs/inode.c +++ b/fs/ntfs/inode.c @@ -30,10 +30,10 @@ /** * ntfs_test_inode - compare two (possibly fake) inodes for equality * @vi: vfs inode which to test - * @na: ntfs attribute which is being tested with + * @data: data which is being tested with * * Compare the ntfs attribute embedded in the ntfs specific part of the vfs - * inode @vi for equality with the ntfs attribute @na. + * inode @vi for equality with the ntfs attribute @data. * * If searching for the normal file/directory inode, set @na->type to AT_UNUSED. * @na->name and @na->name_len are then ignored. @@ -43,8 +43,9 @@ * NOTE: This function runs with the inode_hash_lock spin lock held so it is not * allowed to sleep. */ -int ntfs_test_inode(struct inode *vi, ntfs_attr *na) +int ntfs_test_inode(struct inode *vi, void *data) { + ntfs_attr *na = (ntfs_attr *)data; ntfs_inode *ni; if (vi->i_ino != na->mft_no) @@ -72,9 +73,9 @@ int ntfs_test_inode(struct inode *vi, ntfs_attr *na) /** * ntfs_init_locked_inode - initialize an inode * @vi: vfs inode to initialize - * @na: ntfs attribute which to initialize @vi to + * @data: data which to initialize @vi to * - * Initialize the vfs inode @vi with the values from the ntfs attribute @na in + * Initialize the vfs inode @vi with the values from the ntfs attribute @data in * order to enable ntfs_test_inode() to do its work. * * If initializing the normal file/directory inode, set @na->type to AT_UNUSED. @@ -87,8 +88,9 @@ int ntfs_test_inode(struct inode *vi, ntfs_attr *na) * NOTE: This function runs with the inode->i_lock spin lock held so it is not * allowed to sleep. (Hence the GFP_ATOMIC allocation.) */ -static int ntfs_init_locked_inode(struct inode *vi, ntfs_attr *na) +static int ntfs_init_locked_inode(struct inode *vi, void *data) { + ntfs_attr *na = (ntfs_attr *)data; ntfs_inode *ni = NTFS_I(vi); vi->i_ino = na->mft_no; @@ -131,7 +133,6 @@ static int ntfs_init_locked_inode(struct inode *vi, ntfs_attr *na) return 0; } -typedef int (*set_t)(struct inode *, void *); static int ntfs_read_locked_inode(struct inode *vi); static int ntfs_read_locked_attr_inode(struct inode *base_vi, struct inode *vi); static int ntfs_read_locked_index_inode(struct inode *base_vi, @@ -164,8 +165,8 @@ struct inode *ntfs_iget(struct super_block *sb, unsigned long mft_no) na.name = NULL; na.name_len = 0; - vi = iget5_locked(sb, mft_no, (test_t)ntfs_test_inode, - (set_t)ntfs_init_locked_inode, &na); + vi = iget5_locked(sb, mft_no, ntfs_test_inode, + ntfs_init_locked_inode, &na); if (unlikely(!vi)) return ERR_PTR(-ENOMEM); @@ -225,8 +226,8 @@ struct inode *ntfs_attr_iget(struct inode *base_vi, ATTR_TYPE type, na.name = name; na.name_len = name_len; - vi = iget5_locked(base_vi->i_sb, na.mft_no, (test_t)ntfs_test_inode, - (set_t)ntfs_init_locked_inode, &na); + vi = iget5_locked(base_vi->i_sb, na.mft_no, ntfs_test_inode, + ntfs_init_locked_inode, &na); if (unlikely(!vi)) return ERR_PTR(-ENOMEM); @@ -280,8 +281,8 @@ struct inode *ntfs_index_iget(struct inode *base_vi, ntfschar *name, na.name = name; na.name_len = name_len; - vi = iget5_locked(base_vi->i_sb, na.mft_no, (test_t)ntfs_test_inode, - (set_t)ntfs_init_locked_inode, &na); + vi = iget5_locked(base_vi->i_sb, na.mft_no, ntfs_test_inode, + ntfs_init_locked_inode, &na); if (unlikely(!vi)) return ERR_PTR(-ENOMEM); diff --git a/fs/ntfs/inode.h b/fs/ntfs/inode.h index 98e670fbdd31..363e4e820673 100644 --- a/fs/ntfs/inode.h +++ b/fs/ntfs/inode.h @@ -253,9 +253,7 @@ typedef struct { ATTR_TYPE type; } ntfs_attr; -typedef int (*test_t)(struct inode *, void *); - -extern int ntfs_test_inode(struct inode *vi, ntfs_attr *na); +extern int ntfs_test_inode(struct inode *vi, void *data); extern struct inode *ntfs_iget(struct super_block *sb, unsigned long mft_no); extern struct inode *ntfs_attr_iget(struct inode *base_vi, ATTR_TYPE type, diff --git a/fs/ntfs/mft.c b/fs/ntfs/mft.c index fbb9f1bc623d..0d62cd5bb7f8 100644 --- a/fs/ntfs/mft.c +++ b/fs/ntfs/mft.c @@ -958,7 +958,7 @@ bool ntfs_may_write_mft_record(ntfs_volume *vol, const unsigned long mft_no, * dirty code path of the inode dirty code path when writing * $MFT occurs. */ - vi = ilookup5_nowait(sb, mft_no, (test_t)ntfs_test_inode, &na); + vi = ilookup5_nowait(sb, mft_no, ntfs_test_inode, &na); } if (vi) { ntfs_debug("Base inode 0x%lx is in icache.", mft_no); @@ -1019,7 +1019,7 @@ bool ntfs_may_write_mft_record(ntfs_volume *vol, const unsigned long mft_no, vi = igrab(mft_vi); BUG_ON(vi != mft_vi); } else - vi = ilookup5_nowait(sb, na.mft_no, (test_t)ntfs_test_inode, + vi = ilookup5_nowait(sb, na.mft_no, ntfs_test_inode, &na); if (!vi) { /* diff --git a/fs/ocfs2/Kconfig b/fs/ocfs2/Kconfig index aca16624b370..5d11380d8724 100644 --- a/fs/ocfs2/Kconfig +++ b/fs/ocfs2/Kconfig @@ -16,9 +16,9 @@ config OCFS2_FS You'll want to install the ocfs2-tools package in order to at least get "mount.ocfs2". - Project web page: http://oss.oracle.com/projects/ocfs2 - Tools web page: http://oss.oracle.com/projects/ocfs2-tools - OCFS2 mailing lists: http://oss.oracle.com/projects/ocfs2/mailman/ + Project web page: https://oss.oracle.com/projects/ocfs2 + Tools web page: https://oss.oracle.com/projects/ocfs2-tools + OCFS2 mailing lists: https://oss.oracle.com/projects/ocfs2/mailman/ For more information on OCFS2, see the file <file:Documentation/filesystems/ocfs2.rst>. diff --git a/fs/ocfs2/acl.c b/fs/ocfs2/acl.c index bb981ec76456..7b07f5df3a29 100644 --- a/fs/ocfs2/acl.c +++ b/fs/ocfs2/acl.c @@ -256,6 +256,8 @@ static int ocfs2_set_acl(handle_t *handle, ret = ocfs2_xattr_set(inode, name_index, "", value, size, 0); kfree(value); + if (!ret) + set_cached_acl(inode, type, acl); return ret; } diff --git a/fs/ocfs2/alloc.c b/fs/ocfs2/alloc.c index 2f834add165b..4c1b90442d6f 100644 --- a/fs/ocfs2/alloc.c +++ b/fs/ocfs2/alloc.c @@ -4707,7 +4707,7 @@ int ocfs2_insert_extent(handle_t *handle, struct ocfs2_alloc_context *meta_ac) { int status; - int uninitialized_var(free_records); + int free_records; struct buffer_head *last_eb_bh = NULL; struct ocfs2_insert_type insert = {0, }; struct ocfs2_extent_rec rec; @@ -7051,7 +7051,7 @@ int ocfs2_convert_inline_data_to_extents(struct inode *inode, int need_free = 0; u32 bit_off, num; handle_t *handle; - u64 uninitialized_var(block); + u64 block; struct ocfs2_inode_info *oi = OCFS2_I(inode); struct ocfs2_super *osb = OCFS2_SB(inode->i_sb); struct ocfs2_dinode *di = (struct ocfs2_dinode *)di_bh->b_data; diff --git a/fs/ocfs2/blockcheck.c b/fs/ocfs2/blockcheck.c index eaf042feaf5e..6e07ddb0e3c0 100644 --- a/fs/ocfs2/blockcheck.c +++ b/fs/ocfs2/blockcheck.c @@ -124,7 +124,7 @@ u32 ocfs2_hamming_encode(u32 parity, void *data, unsigned int d, unsigned int nr * parity bits that are part of the bit number * representation. Huh? * - * <wikipedia href="http://en.wikipedia.org/wiki/Hamming_code"> + * <wikipedia href="https://en.wikipedia.org/wiki/Hamming_code"> * In other words, the parity bit at position 2^k * checks bits in positions having bit k set in * their binary representation. Conversely, for diff --git a/fs/ocfs2/cluster/quorum.c b/fs/ocfs2/cluster/quorum.c index 1ef24574f481..cea739be77c4 100644 --- a/fs/ocfs2/cluster/quorum.c +++ b/fs/ocfs2/cluster/quorum.c @@ -67,7 +67,7 @@ static void o2quo_fence_self(void) default: WARN_ON(o2nm_single_cluster->cl_fence_method >= O2NM_FENCE_METHODS); - /* fall through */ + fallthrough; case O2NM_FENCE_RESET: printk(KERN_ERR "*** ocfs2 is very sorry to be fencing this " "system by restarting ***\n"); diff --git a/fs/ocfs2/dir.c b/fs/ocfs2/dir.c index 5761060d2ba8..bdfba9db558a 100644 --- a/fs/ocfs2/dir.c +++ b/fs/ocfs2/dir.c @@ -848,9 +848,9 @@ static int ocfs2_dx_dir_lookup(struct inode *inode, u64 *ret_phys_blkno) { int ret = 0; - unsigned int cend, uninitialized_var(clen); - u32 uninitialized_var(cpos); - u64 uninitialized_var(blkno); + unsigned int cend, clen; + u32 cpos; + u64 blkno; u32 name_hash = hinfo->major_hash; ret = ocfs2_dx_dir_lookup_rec(inode, el, name_hash, &cpos, &blkno, @@ -894,7 +894,7 @@ static int ocfs2_dx_dir_search(const char *name, int namelen, struct ocfs2_dir_lookup_result *res) { int ret, i, found; - u64 uninitialized_var(phys); + u64 phys; struct buffer_head *dx_leaf_bh = NULL; struct ocfs2_dx_leaf *dx_leaf; struct ocfs2_dx_entry *dx_entry = NULL; @@ -4393,9 +4393,9 @@ out: int ocfs2_dx_dir_truncate(struct inode *dir, struct buffer_head *di_bh) { int ret; - unsigned int uninitialized_var(clen); - u32 major_hash = UINT_MAX, p_cpos, uninitialized_var(cpos); - u64 uninitialized_var(blkno); + unsigned int clen; + u32 major_hash = UINT_MAX, p_cpos, cpos; + u64 blkno; struct ocfs2_super *osb = OCFS2_SB(dir->i_sb); struct buffer_head *dx_root_bh = NULL; struct ocfs2_dx_root_block *dx_root; diff --git a/fs/ocfs2/dlmfs/dlmfs.c b/fs/ocfs2/dlmfs/dlmfs.c index ea868c6f9800..583820ec63e2 100644 --- a/fs/ocfs2/dlmfs/dlmfs.c +++ b/fs/ocfs2/dlmfs/dlmfs.c @@ -221,47 +221,17 @@ static __poll_t dlmfs_file_poll(struct file *file, poll_table *wait) return event; } -static ssize_t dlmfs_file_read(struct file *filp, +static ssize_t dlmfs_file_read(struct file *file, char __user *buf, size_t count, loff_t *ppos) { - int bytes_left; - ssize_t got; - char *lvb_buf; - struct inode *inode = file_inode(filp); - - mlog(0, "inode %lu, count = %zu, *ppos = %llu\n", - inode->i_ino, count, *ppos); + char lvb[DLM_LVB_LEN]; - if (*ppos >= i_size_read(inode)) + if (!user_dlm_read_lvb(file_inode(file), lvb)) return 0; - /* don't read past the lvb */ - if (count > i_size_read(inode) - *ppos) - count = i_size_read(inode) - *ppos; - - if (!count) - return 0; - - lvb_buf = kmalloc(count, GFP_NOFS); - if (!lvb_buf) - return -ENOMEM; - - got = user_dlm_read_lvb(inode, lvb_buf, count); - if (got) { - BUG_ON(got != count); - bytes_left = copy_to_user(buf, lvb_buf, count); - count -= bytes_left; - } else - count = 0; - - kfree(lvb_buf); - - *ppos = *ppos + count; - - mlog(0, "read %zu bytes\n", count); - return count; + return simple_read_from_buffer(buf, count, ppos, lvb, sizeof(lvb)); } static ssize_t dlmfs_file_write(struct file *filp, @@ -269,34 +239,28 @@ static ssize_t dlmfs_file_write(struct file *filp, size_t count, loff_t *ppos) { + char lvb_buf[DLM_LVB_LEN]; int bytes_left; - char *lvb_buf; struct inode *inode = file_inode(filp); mlog(0, "inode %lu, count = %zu, *ppos = %llu\n", inode->i_ino, count, *ppos); - if (*ppos >= i_size_read(inode)) + if (*ppos >= DLM_LVB_LEN) return -ENOSPC; /* don't write past the lvb */ - if (count > i_size_read(inode) - *ppos) - count = i_size_read(inode) - *ppos; + if (count > DLM_LVB_LEN - *ppos) + count = DLM_LVB_LEN - *ppos; if (!count) return 0; - lvb_buf = kmalloc(count, GFP_NOFS); - if (!lvb_buf) - return -ENOMEM; - bytes_left = copy_from_user(lvb_buf, buf, count); count -= bytes_left; if (count) user_dlm_write_lvb(inode, lvb_buf, count); - kfree(lvb_buf); - *ppos = *ppos + count; mlog(0, "wrote %zu bytes\n", count); return count; diff --git a/fs/ocfs2/dlmfs/userdlm.c b/fs/ocfs2/dlmfs/userdlm.c index 3df5be25bfb1..339f098d9592 100644 --- a/fs/ocfs2/dlmfs/userdlm.c +++ b/fs/ocfs2/dlmfs/userdlm.c @@ -547,24 +547,20 @@ void user_dlm_write_lvb(struct inode *inode, spin_unlock(&lockres->l_lock); } -ssize_t user_dlm_read_lvb(struct inode *inode, - char *val, - unsigned int len) +bool user_dlm_read_lvb(struct inode *inode, char *val) { struct user_lock_res *lockres = &DLMFS_I(inode)->ip_lockres; char *lvb; - ssize_t ret = len; - - BUG_ON(len > DLM_LVB_LEN); + bool ret = true; spin_lock(&lockres->l_lock); BUG_ON(lockres->l_level < DLM_LOCK_PR); if (ocfs2_dlm_lvb_valid(&lockres->l_lksb)) { lvb = ocfs2_dlm_lvb(&lockres->l_lksb); - memcpy(val, lvb, len); + memcpy(val, lvb, DLM_LVB_LEN); } else - ret = 0; + ret = false; spin_unlock(&lockres->l_lock); return ret; diff --git a/fs/ocfs2/dlmfs/userdlm.h b/fs/ocfs2/dlmfs/userdlm.h index 4bef7cdef859..0558ae768200 100644 --- a/fs/ocfs2/dlmfs/userdlm.h +++ b/fs/ocfs2/dlmfs/userdlm.h @@ -66,9 +66,7 @@ void user_dlm_cluster_unlock(struct user_lock_res *lockres, void user_dlm_write_lvb(struct inode *inode, const char *val, unsigned int len); -ssize_t user_dlm_read_lvb(struct inode *inode, - char *val, - unsigned int len); +bool user_dlm_read_lvb(struct inode *inode, char *val); struct ocfs2_cluster_connection *user_dlm_register(const struct qstr *name); void user_dlm_unregister(struct ocfs2_cluster_connection *conn); void user_dlm_set_locking_protocol(void); diff --git a/fs/ocfs2/dlmglue.c b/fs/ocfs2/dlmglue.c index 751bc4dc7466..8e3a369086db 100644 --- a/fs/ocfs2/dlmglue.c +++ b/fs/ocfs2/dlmglue.c @@ -2871,9 +2871,15 @@ int ocfs2_nfs_sync_lock(struct ocfs2_super *osb, int ex) status = ocfs2_cluster_lock(osb, lockres, ex ? LKM_EXMODE : LKM_PRMODE, 0, 0); - if (status < 0) + if (status < 0) { mlog(ML_ERROR, "lock on nfs sync lock failed %d\n", status); + if (ex) + up_write(&osb->nfs_sync_rwlock); + else + up_read(&osb->nfs_sync_rwlock); + } + return status; } diff --git a/fs/ocfs2/extent_map.c b/fs/ocfs2/extent_map.c index a94852af5510..7b93e9c766f6 100644 --- a/fs/ocfs2/extent_map.c +++ b/fs/ocfs2/extent_map.c @@ -403,7 +403,7 @@ static int ocfs2_get_clusters_nocache(struct inode *inode, { int i, ret, tree_height, len; struct ocfs2_dinode *di; - struct ocfs2_extent_block *uninitialized_var(eb); + struct ocfs2_extent_block *eb; struct ocfs2_extent_list *el; struct ocfs2_extent_rec *rec; struct buffer_head *eb_bh = NULL; @@ -599,7 +599,7 @@ int ocfs2_get_clusters(struct inode *inode, u32 v_cluster, unsigned int *extent_flags) { int ret; - unsigned int uninitialized_var(hole_len), flags = 0; + unsigned int hole_len, flags = 0; struct buffer_head *di_bh = NULL; struct ocfs2_extent_rec rec; diff --git a/fs/ocfs2/namei.c b/fs/ocfs2/namei.c index 5381020aaa9a..c46bf7f581a1 100644 --- a/fs/ocfs2/namei.c +++ b/fs/ocfs2/namei.c @@ -2498,7 +2498,7 @@ int ocfs2_create_inode_in_orphan(struct inode *dir, struct buffer_head *new_di_bh = NULL; struct ocfs2_alloc_context *inode_ac = NULL; struct ocfs2_dir_lookup_result orphan_insert = { NULL, }; - u64 uninitialized_var(di_blkno), suballoc_loc; + u64 di_blkno, suballoc_loc; u16 suballoc_bit; status = ocfs2_inode_lock(dir, &parent_di_bh, 1); diff --git a/fs/ocfs2/ocfs2.h b/fs/ocfs2/ocfs2.h index 2dd71d626196..7993d527edae 100644 --- a/fs/ocfs2/ocfs2.h +++ b/fs/ocfs2/ocfs2.h @@ -327,8 +327,8 @@ struct ocfs2_super spinlock_t osb_lock; u32 s_next_generation; unsigned long osb_flags; - s16 s_inode_steal_slot; - s16 s_meta_steal_slot; + u16 s_inode_steal_slot; + u16 s_meta_steal_slot; atomic_t s_num_inodes_stolen; atomic_t s_num_meta_stolen; diff --git a/fs/ocfs2/refcounttree.c b/fs/ocfs2/refcounttree.c index cfb77f70c888..3b397fa9c9e8 100644 --- a/fs/ocfs2/refcounttree.c +++ b/fs/ocfs2/refcounttree.c @@ -1063,7 +1063,7 @@ static int ocfs2_get_refcount_rec(struct ocfs2_caching_info *ci, struct buffer_head **ret_bh) { int ret = 0, i, found; - u32 low_cpos, uninitialized_var(cpos_end); + u32 low_cpos, cpos_end; struct ocfs2_extent_list *el; struct ocfs2_extent_rec *rec = NULL; struct ocfs2_extent_block *eb = NULL; diff --git a/fs/ocfs2/suballoc.c b/fs/ocfs2/suballoc.c index 45745cc3408a..8c8cf7f4eb34 100644 --- a/fs/ocfs2/suballoc.c +++ b/fs/ocfs2/suballoc.c @@ -879,9 +879,9 @@ static void __ocfs2_set_steal_slot(struct ocfs2_super *osb, int slot, int type) { spin_lock(&osb->osb_lock); if (type == INODE_ALLOC_SYSTEM_INODE) - osb->s_inode_steal_slot = slot; + osb->s_inode_steal_slot = (u16)slot; else if (type == EXTENT_ALLOC_SYSTEM_INODE) - osb->s_meta_steal_slot = slot; + osb->s_meta_steal_slot = (u16)slot; spin_unlock(&osb->osb_lock); } diff --git a/fs/ocfs2/suballoc.h b/fs/ocfs2/suballoc.h index f0a5d30a175d..50b36250beb6 100644 --- a/fs/ocfs2/suballoc.h +++ b/fs/ocfs2/suballoc.h @@ -40,7 +40,7 @@ struct ocfs2_alloc_context { u64 ac_last_group; u64 ac_max_block; /* Highest block number to allocate. 0 is - is the same as ~0 - unlimited */ + the same as ~0 - unlimited */ int ac_find_loc_only; /* hack for reflink operation ordering */ struct ocfs2_suballoc_result *ac_find_loc_priv; /* */ diff --git a/fs/ocfs2/super.c b/fs/ocfs2/super.c index 71ea9ce71a6b..1d91dd1e8711 100644 --- a/fs/ocfs2/super.c +++ b/fs/ocfs2/super.c @@ -78,7 +78,7 @@ struct mount_options unsigned long commit_interval; unsigned long mount_opt; unsigned int atime_quantum; - signed short slot; + unsigned short slot; int localalloc_opt; unsigned int resv_level; int dir_resv_level; @@ -1349,7 +1349,7 @@ static int ocfs2_parse_options(struct super_block *sb, goto bail; } if (option) - mopt->slot = (s16)option; + mopt->slot = (u16)option; break; case Opt_commit: if (match_int(&args[0], &option)) { diff --git a/fs/ocfs2/xattr.c b/fs/ocfs2/xattr.c index 90c830e3758e..9ccd19d8f7b1 100644 --- a/fs/ocfs2/xattr.c +++ b/fs/ocfs2/xattr.c @@ -1211,7 +1211,7 @@ static int ocfs2_xattr_block_get(struct inode *inode, struct ocfs2_xattr_value_root *xv; size_t size; int ret = -ENODATA, name_offset, name_len, i; - int uninitialized_var(block_off); + int block_off; xs->bucket = ocfs2_xattr_bucket_new(inode); if (!xs->bucket) { diff --git a/fs/omfs/file.c b/fs/omfs/file.c index d7b5f09d298c..2c7b70ee1388 100644 --- a/fs/omfs/file.c +++ b/fs/omfs/file.c @@ -220,7 +220,7 @@ static int omfs_get_block(struct inode *inode, sector_t block, struct buffer_head *bh; sector_t next, offset; int ret; - u64 uninitialized_var(new_block); + u64 new_block; u32 max_extents; int extent_count; struct omfs_extent *oe; diff --git a/fs/open.c b/fs/open.c index 6cd48a61cda3..9af548fb841b 100644 --- a/fs/open.c +++ b/fs/open.c @@ -394,7 +394,7 @@ static const struct cred *access_override_creds(void) return old_cred; } -long do_faccessat(int dfd, const char __user *filename, int mode, int flags) +static long do_faccessat(int dfd, const char __user *filename, int mode, int flags) { struct path path; struct inode *inode; @@ -482,7 +482,7 @@ SYSCALL_DEFINE2(access, const char __user *, filename, int, mode) return do_faccessat(AT_FDCWD, filename, mode, 0); } -int ksys_chdir(const char __user *filename) +SYSCALL_DEFINE1(chdir, const char __user *, filename) { struct path path; int error; @@ -508,11 +508,6 @@ out: return error; } -SYSCALL_DEFINE1(chdir, const char __user *, filename) -{ - return ksys_chdir(filename); -} - SYSCALL_DEFINE1(fchdir, unsigned int, fd) { struct fd f = fdget_raw(fd); @@ -535,7 +530,7 @@ out: return error; } -int ksys_chroot(const char __user *filename) +SYSCALL_DEFINE1(chroot, const char __user *, filename) { struct path path; int error; @@ -568,12 +563,7 @@ out: return error; } -SYSCALL_DEFINE1(chroot, const char __user *, filename) -{ - return ksys_chroot(filename); -} - -static int chmod_common(const struct path *path, umode_t mode) +int chmod_common(const struct path *path, umode_t mode) { struct inode *inode = path->dentry->d_inode; struct inode *delegated_inode = NULL; @@ -602,25 +592,25 @@ out_unlock: return error; } -int ksys_fchmod(unsigned int fd, umode_t mode) +int vfs_fchmod(struct file *file, umode_t mode) +{ + audit_file(file); + return chmod_common(&file->f_path, mode); +} + +SYSCALL_DEFINE2(fchmod, unsigned int, fd, umode_t, mode) { struct fd f = fdget(fd); int err = -EBADF; if (f.file) { - audit_file(f.file); - err = chmod_common(&f.file->f_path, mode); + err = vfs_fchmod(f.file, mode); fdput(f); } return err; } -SYSCALL_DEFINE2(fchmod, unsigned int, fd, umode_t, mode) -{ - return ksys_fchmod(fd, mode); -} - -int do_fchmodat(int dfd, const char __user *filename, umode_t mode) +static int do_fchmodat(int dfd, const char __user *filename, umode_t mode) { struct path path; int error; @@ -649,7 +639,7 @@ SYSCALL_DEFINE2(chmod, const char __user *, filename, umode_t, mode) return do_fchmodat(AT_FDCWD, filename, mode); } -static int chown_common(const struct path *path, uid_t user, gid_t group) +int chown_common(const struct path *path, uid_t user, gid_t group) { struct inode *inode = path->dentry->d_inode; struct inode *delegated_inode = NULL; @@ -740,23 +730,28 @@ SYSCALL_DEFINE3(lchown, const char __user *, filename, uid_t, user, gid_t, group AT_SYMLINK_NOFOLLOW); } +int vfs_fchown(struct file *file, uid_t user, gid_t group) +{ + int error; + + error = mnt_want_write_file(file); + if (error) + return error; + audit_file(file); + error = chown_common(&file->f_path, user, group); + mnt_drop_write_file(file); + return error; +} + int ksys_fchown(unsigned int fd, uid_t user, gid_t group) { struct fd f = fdget(fd); int error = -EBADF; - if (!f.file) - goto out; - - error = mnt_want_write_file(f.file); - if (error) - goto out_fput; - audit_file(f.file); - error = chown_common(&f.file->f_path, user, group); - mnt_drop_write_file(f.file); -out_fput: - fdput(f); -out: + if (f.file) { + error = vfs_fchown(f.file, user, group); + fdput(f); + } return error; } @@ -784,12 +779,6 @@ static int do_dentry_open(struct file *f, return 0; } - /* Any file opened for execve()/uselib() has to be a regular file. */ - if (unlikely(f->f_flags & FMODE_EXEC && !S_ISREG(inode->i_mode))) { - error = -EACCES; - goto cleanup_file; - } - if (f->f_mode & FMODE_WRITE && !special_file(inode->i_mode)) { error = get_write_access(inode); if (unlikely(error)) @@ -1198,7 +1187,9 @@ long do_sys_open(int dfd, const char __user *filename, int flags, umode_t mode) SYSCALL_DEFINE3(open, const char __user *, filename, int, flags, umode_t, mode) { - return ksys_open(filename, flags, mode); + if (force_o_largefile()) + flags |= O_LARGEFILE; + return do_sys_open(AT_FDCWD, filename, flags, mode); } SYSCALL_DEFINE4(openat, int, dfd, const char __user *, filename, int, flags, @@ -1260,9 +1251,12 @@ COMPAT_SYSCALL_DEFINE4(openat, int, dfd, const char __user *, filename, int, fla */ SYSCALL_DEFINE2(creat, const char __user *, pathname, umode_t, mode) { - return ksys_open(pathname, O_CREAT | O_WRONLY | O_TRUNC, mode); -} + int flags = O_CREAT | O_WRONLY | O_TRUNC; + if (force_o_largefile()) + flags |= O_LARGEFILE; + return do_sys_open(AT_FDCWD, pathname, flags, mode); +} #endif /* @@ -1310,6 +1304,23 @@ SYSCALL_DEFINE1(close, unsigned int, fd) return retval; } +/** + * close_range() - Close all file descriptors in a given range. + * + * @fd: starting file descriptor to close + * @max_fd: last file descriptor to close + * @flags: reserved for future extensions + * + * This closes a range of file descriptors. All file descriptors + * from @fd up to and including @max_fd are closed. + * Currently, errors to close a given file descriptor are ignored. + */ +SYSCALL_DEFINE3(close_range, unsigned int, fd, unsigned int, max_fd, + unsigned int, flags) +{ + return __close_range(fd, max_fd, flags); +} + /* * This routine simulates a hangup on the tty, to arrange that users * are given clean terminals at login time. diff --git a/fs/orangefs/acl.c b/fs/orangefs/acl.c index eced272a3c57..a25e6c890975 100644 --- a/fs/orangefs/acl.c +++ b/fs/orangefs/acl.c @@ -122,6 +122,8 @@ int orangefs_set_acl(struct inode *inode, struct posix_acl *acl, int type) struct iattr iattr; int rc; + memset(&iattr, 0, sizeof iattr); + if (type == ACL_TYPE_ACCESS && acl) { /* * posix_acl_update_mode checks to see if the permissions @@ -138,18 +140,17 @@ int orangefs_set_acl(struct inode *inode, struct posix_acl *acl, int type) return error; } - if (acl) { - rc = __orangefs_set_acl(inode, acl, type); - } else { + if (inode->i_mode != iattr.ia_mode) iattr.ia_valid = ATTR_MODE; - rc = __orangefs_setattr(inode, &iattr); - } - return rc; - - } else { - return -EINVAL; } + + rc = __orangefs_set_acl(inode, acl, type); + + if (!rc && (iattr.ia_valid == ATTR_MODE)) + rc = __orangefs_setattr(inode, &iattr); + + return rc; } int orangefs_init_acl(struct inode *inode, struct inode *dir) diff --git a/fs/orangefs/orangefs-mod.c b/fs/orangefs/orangefs-mod.c index 289b648ae196..74a3d6337ef4 100644 --- a/fs/orangefs/orangefs-mod.c +++ b/fs/orangefs/orangefs-mod.c @@ -149,7 +149,6 @@ static int __init orangefs_init(void) pr_info("%s: module version %s loaded\n", __func__, ORANGEFS_VERSION); - ret = 0; goto out; } diff --git a/fs/overlayfs/copy_up.c b/fs/overlayfs/copy_up.c index 5e0cde85bd6b..d07fb92b7253 100644 --- a/fs/overlayfs/copy_up.c +++ b/fs/overlayfs/copy_up.c @@ -787,7 +787,7 @@ static int ovl_copy_up_meta_inode_data(struct ovl_copy_up_ctx *c) struct path upperpath, datapath; int err; char *capability = NULL; - ssize_t uninitialized_var(cap_size); + ssize_t cap_size; ovl_path_upper(c->dentry, &upperpath); if (WARN_ON(upperpath.dentry == NULL)) diff --git a/fs/proc/array.c b/fs/proc/array.c index 55ecbeb3a721..65ec2029fa80 100644 --- a/fs/proc/array.c +++ b/fs/proc/array.c @@ -341,6 +341,8 @@ static inline void task_seccomp(struct seq_file *m, struct task_struct *p) seq_put_decimal_ull(m, "NoNewPrivs:\t", task_no_new_privs(p)); #ifdef CONFIG_SECCOMP seq_put_decimal_ull(m, "\nSeccomp:\t", p->seccomp.mode); + seq_put_decimal_ull(m, "\nSeccomp_filters:\t", + atomic_read(&p->seccomp.filter_count)); #endif seq_puts(m, "\nSpeculation_Store_Bypass:\t"); switch (arch_prctl_spec_ctrl_get(p, PR_SPEC_STORE_BYPASS)) { diff --git a/fs/proc/base.c b/fs/proc/base.c index d86c0afc8a85..617db4e0faa0 100644 --- a/fs/proc/base.c +++ b/fs/proc/base.c @@ -551,8 +551,17 @@ static int proc_oom_score(struct seq_file *m, struct pid_namespace *ns, { unsigned long totalpages = totalram_pages() + total_swap_pages; unsigned long points = 0; + long badness; + + badness = oom_badness(task, totalpages); + /* + * Special case OOM_SCORE_ADJ_MIN for all others scale the + * badness value into [0, 2000] range which we have been + * exporting for a long time so userspace might depend on it. + */ + if (badness != LONG_MIN) + points = (1000 + badness * 1000 / (long)totalpages) * 2 / 3; - points = oom_badness(task, totalpages) * 1000 / totalpages; seq_printf(m, "%lu\n", points); return 0; @@ -2189,16 +2198,16 @@ struct map_files_info { }; /* - * Only allow CAP_SYS_ADMIN to follow the links, due to concerns about how the - * symlinks may be used to bypass permissions on ancestor directories in the - * path to the file in question. + * Only allow CAP_SYS_ADMIN and CAP_CHECKPOINT_RESTORE to follow the links, due + * to concerns about how the symlinks may be used to bypass permissions on + * ancestor directories in the path to the file in question. */ static const char * proc_map_files_get_link(struct dentry *dentry, struct inode *inode, struct delayed_call *done) { - if (!capable(CAP_SYS_ADMIN)) + if (!checkpoint_restore_ns_capable(&init_user_ns)) return ERR_PTR(-EPERM); return proc_pid_get_link(dentry, inode, done); diff --git a/fs/proc/devices.c b/fs/proc/devices.c index 37d38697eaf8..837971e74109 100644 --- a/fs/proc/devices.c +++ b/fs/proc/devices.c @@ -3,6 +3,7 @@ #include <linux/init.h> #include <linux/proc_fs.h> #include <linux/seq_file.h> +#include <linux/blkdev.h> static int devinfo_show(struct seq_file *f, void *v) { diff --git a/fs/proc/meminfo.c b/fs/proc/meminfo.c index e9a6841fc25b..887a5532e449 100644 --- a/fs/proc/meminfo.c +++ b/fs/proc/meminfo.c @@ -41,7 +41,7 @@ static int meminfo_proc_show(struct seq_file *m, void *v) si_meminfo(&i); si_swapinfo(&i); - committed = percpu_counter_read_positive(&vm_committed_as); + committed = vm_memory_committed(); cached = global_node_page_state(NR_FILE_PAGES) - total_swapcache_pages() - i.bufferram; @@ -52,8 +52,8 @@ static int meminfo_proc_show(struct seq_file *m, void *v) pages[lru] = global_node_page_state(NR_LRU_BASE + lru); available = si_mem_available(); - sreclaimable = global_node_page_state(NR_SLAB_RECLAIMABLE); - sunreclaim = global_node_page_state(NR_SLAB_UNRECLAIMABLE); + sreclaimable = global_node_page_state_pages(NR_SLAB_RECLAIMABLE_B); + sunreclaim = global_node_page_state_pages(NR_SLAB_UNRECLAIMABLE_B); show_val_kb(m, "MemTotal: ", i.totalram); show_val_kb(m, "MemFree: ", i.freeram); @@ -101,10 +101,10 @@ static int meminfo_proc_show(struct seq_file *m, void *v) show_val_kb(m, "SReclaimable: ", sreclaimable); show_val_kb(m, "SUnreclaim: ", sunreclaim); seq_printf(m, "KernelStack: %8lu kB\n", - global_zone_page_state(NR_KERNEL_STACK_KB)); + global_node_page_state(NR_KERNEL_STACK_KB)); #ifdef CONFIG_SHADOW_CALL_STACK seq_printf(m, "ShadowCallStack:%8lu kB\n", - global_zone_page_state(NR_KERNEL_SCS_KB)); + global_node_page_state(NR_KERNEL_SCS_KB)); #endif show_val_kb(m, "PageTables: ", global_zone_page_state(NR_PAGETABLE)); diff --git a/fs/proc/proc_net.c b/fs/proc/proc_net.c index dba63b2429f0..ed8a6306990c 100644 --- a/fs/proc/proc_net.c +++ b/fs/proc/proc_net.c @@ -98,7 +98,7 @@ static const struct proc_ops proc_net_seq_ops = { .proc_release = seq_release_net, }; -int bpf_iter_init_seq_net(void *priv_data) +int bpf_iter_init_seq_net(void *priv_data, struct bpf_iter_aux_info *aux) { #ifdef CONFIG_NET_NS struct seq_net_private *p = priv_data; diff --git a/fs/proc/task_mmu.c b/fs/proc/task_mmu.c index dbda4499a859..5066b0251ed8 100644 --- a/fs/proc/task_mmu.c +++ b/fs/proc/task_mmu.c @@ -786,7 +786,7 @@ static void __show_smap(struct seq_file *m, const struct mem_size_stats *mss, SEQ_PUT_DEC(" kB\nLazyFree: ", mss->lazyfree); SEQ_PUT_DEC(" kB\nAnonHugePages: ", mss->anonymous_thp); SEQ_PUT_DEC(" kB\nShmemPmdMapped: ", mss->shmem_thp); - SEQ_PUT_DEC(" kB\nFilePmdMapped: ", mss->file_thp); + SEQ_PUT_DEC(" kB\nFilePmdMapped: ", mss->file_thp); SEQ_PUT_DEC(" kB\nShared_Hugetlb: ", mss->shared_hugetlb); seq_put_decimal_ull_width(m, " kB\nPrivate_Hugetlb: ", mss->private_hugetlb >> 10, 7); @@ -816,7 +816,7 @@ static int show_smap(struct seq_file *m, void *v) __show_smap(m, &mss, false); - seq_printf(m, "THPeligible: %d\n", + seq_printf(m, "THPeligible: %d\n", transparent_hugepage_enabled(vma)); if (arch_pkeys_enabled()) diff --git a/fs/pstore/platform.c b/fs/pstore/platform.c index a9e297eefdff..36714df37d5d 100644 --- a/fs/pstore/platform.c +++ b/fs/pstore/platform.c @@ -269,6 +269,9 @@ static int pstore_compress(const void *in, void *out, { int ret; + if (!IS_ENABLED(CONFIG_PSTORE_COMPRESSION)) + return -EINVAL; + ret = crypto_comp_compress(tfm, in, inlen, out, &outlen); if (ret) { pr_err("crypto_comp_compress failed, ret = %d!\n", ret); @@ -668,7 +671,7 @@ static void decompress_record(struct pstore_record *record) int unzipped_len; char *unzipped, *workspace; - if (!record->compressed) + if (!IS_ENABLED(CONFIG_PSTORE_COMPRESSION) || !record->compressed) return; /* Only PSTORE_TYPE_DMESG support compression. */ diff --git a/fs/pstore/zone.c b/fs/pstore/zone.c index 819428dfa32f..3ce89216670c 100644 --- a/fs/pstore/zone.c +++ b/fs/pstore/zone.c @@ -1081,7 +1081,6 @@ next_zone: readop = psz_ftrace_read; break; case PSTORE_TYPE_CONSOLE: - fallthrough; case PSTORE_TYPE_PMSG: readop = psz_record_read; break; diff --git a/fs/quota/Kconfig b/fs/quota/Kconfig index 7218314ca13f..d1ceb76adb71 100644 --- a/fs/quota/Kconfig +++ b/fs/quota/Kconfig @@ -15,7 +15,7 @@ config QUOTA Ext3, ext4 and reiserfs also support journaled quotas for which you don't need to run quotacheck(8) after an unclean shutdown. For further details, read the Quota mini-HOWTO, available from - <http://www.tldp.org/docs.html#howto>, or the documentation provided + <https://www.tldp.org/docs.html#howto>, or the documentation provided with the quota tools. Probably the quota support is only useful for multi user systems. If unsure, say N. diff --git a/fs/quota/dquot.c b/fs/quota/dquot.c index 7b4bac91146b..bb02989d92b6 100644 --- a/fs/quota/dquot.c +++ b/fs/quota/dquot.c @@ -78,6 +78,7 @@ #include <linux/namei.h> #include <linux/capability.h> #include <linux/quotaops.h> +#include <linux/blkdev.h> #include "../internal.h" /* ugh */ #include <linux/uaccess.h> diff --git a/fs/quota/quota.c b/fs/quota/quota.c index 5444d3c4d93f..47f9e151988b 100644 --- a/fs/quota/quota.c +++ b/fs/quota/quota.c @@ -38,7 +38,7 @@ static int check_quotactl_permission(struct super_block *sb, int type, int cmd, if ((type == USRQUOTA && uid_eq(current_euid(), make_kuid(current_user_ns(), id))) || (type == GRPQUOTA && in_egroup_p(make_kgid(current_user_ns(), id)))) break; - /*FALLTHROUGH*/ + fallthrough; default: if (!capable(CAP_SYS_ADMIN)) return -EPERM; diff --git a/fs/read_write.c b/fs/read_write.c index 4fb797822567..5db58b8c78d0 100644 --- a/fs/read_write.c +++ b/fs/read_write.c @@ -301,7 +301,7 @@ loff_t vfs_llseek(struct file *file, loff_t offset, int whence) } EXPORT_SYMBOL(vfs_llseek); -off_t ksys_lseek(unsigned int fd, off_t offset, unsigned int whence) +static off_t ksys_lseek(unsigned int fd, off_t offset, unsigned int whence) { off_t retval; struct fd f = fdget_pos(fd); diff --git a/fs/readdir.c b/fs/readdir.c index a49f07c11cfb..19434b3c982c 100644 --- a/fs/readdir.c +++ b/fs/readdir.c @@ -348,8 +348,8 @@ efault: return -EFAULT; } -int ksys_getdents64(unsigned int fd, struct linux_dirent64 __user *dirent, - unsigned int count) +SYSCALL_DEFINE3(getdents64, unsigned int, fd, + struct linux_dirent64 __user *, dirent, unsigned int, count) { struct fd f; struct getdents_callback64 buf = { @@ -380,13 +380,6 @@ int ksys_getdents64(unsigned int fd, struct linux_dirent64 __user *dirent, return error; } - -SYSCALL_DEFINE3(getdents64, unsigned int, fd, - struct linux_dirent64 __user *, dirent, unsigned int, count) -{ - return ksys_getdents64(fd, dirent, count); -} - #ifdef CONFIG_COMPAT struct compat_old_linux_dirent { compat_ulong_t d_ino; diff --git a/fs/reiserfs/dir.c b/fs/reiserfs/dir.c index 5b50689d8539..79ee2b436685 100644 --- a/fs/reiserfs/dir.c +++ b/fs/reiserfs/dir.c @@ -289,7 +289,7 @@ void make_empty_dir_item_v1(char *body, __le32 dirid, __le32 objid, /* direntry header of "." */ put_deh_offset(dot, DOT_OFFSET); - /* these two are from make_le_item_head, and are are LE */ + /* these two are from make_le_item_head, and are LE */ dot->deh_dir_id = dirid; dot->deh_objectid = objid; dot->deh_state = 0; /* Endian safe if 0 */ @@ -299,7 +299,7 @@ void make_empty_dir_item_v1(char *body, __le32 dirid, __le32 objid, /* direntry header of ".." */ put_deh_offset(dotdot, DOT_DOT_OFFSET); /* key of ".." for the root directory */ - /* these two are from the inode, and are are LE */ + /* these two are from the inode, and are LE */ dotdot->deh_dir_id = par_dirid; dotdot->deh_objectid = par_objid; dotdot->deh_state = 0; /* Endian safe if 0 */ @@ -323,7 +323,7 @@ void make_empty_dir_item(char *body, __le32 dirid, __le32 objid, /* direntry header of "." */ put_deh_offset(dot, DOT_OFFSET); - /* these two are from make_le_item_head, and are are LE */ + /* these two are from make_le_item_head, and are LE */ dot->deh_dir_id = dirid; dot->deh_objectid = objid; dot->deh_state = 0; /* Endian safe if 0 */ @@ -333,7 +333,7 @@ void make_empty_dir_item(char *body, __le32 dirid, __le32 objid, /* direntry header of ".." */ put_deh_offset(dotdot, DOT_DOT_OFFSET); /* key of ".." for the root directory */ - /* these two are from the inode, and are are LE */ + /* these two are from the inode, and are LE */ dotdot->deh_dir_id = par_dirid; dotdot->deh_objectid = par_objid; dotdot->deh_state = 0; /* Endian safe if 0 */ diff --git a/fs/reiserfs/fix_node.c b/fs/reiserfs/fix_node.c index 117092224111..fefe87e1c099 100644 --- a/fs/reiserfs/fix_node.c +++ b/fs/reiserfs/fix_node.c @@ -611,9 +611,9 @@ static int get_num_ver(int mode, struct tree_balance *tb, int h, * blk_num number of blocks that S[h] will be splitted into; * s012 number of items that fall into splitted nodes. * lbytes number of bytes which flow to the left neighbor from the - * item that is not not shifted entirely + * item that is not shifted entirely * rbytes number of bytes which flow to the right neighbor from the - * item that is not not shifted entirely + * item that is not shifted entirely * s1bytes number of bytes which flow to the first new node when * S[0] splits (this number is contained in s012 array) */ diff --git a/fs/reiserfs/journal.c b/fs/reiserfs/journal.c index 5c766330e493..e98f99338f8f 100644 --- a/fs/reiserfs/journal.c +++ b/fs/reiserfs/journal.c @@ -32,7 +32,7 @@ * to disk for all backgrounded commits that have been * around too long. * -- Note, if you call this as an immediate flush from - * from within kupdate, it will ignore the immediate flag + * within kupdate, it will ignore the immediate flag */ #include <linux/time.h> diff --git a/fs/reiserfs/procfs.c b/fs/reiserfs/procfs.c index ff336513c254..155b82870333 100644 --- a/fs/reiserfs/procfs.c +++ b/fs/reiserfs/procfs.c @@ -15,6 +15,7 @@ #include "reiserfs.h" #include <linux/init.h> #include <linux/proc_fs.h> +#include <linux/blkdev.h> /* * LOCKING: diff --git a/fs/reiserfs/reiserfs.h b/fs/reiserfs/reiserfs.h index 726580114d55..f69871516167 100644 --- a/fs/reiserfs/reiserfs.h +++ b/fs/reiserfs/reiserfs.h @@ -1109,7 +1109,7 @@ int is_reiserfs_jr(struct reiserfs_super_block *rs); * ReiserFS leaves the first 64k unused, so that partition labels have * enough space. If someone wants to write a fancy bootloader that * needs more than 64k, let us know, and this will be increased in size. - * This number must be larger than than the largest block size on any + * This number must be larger than the largest block size on any * platform, or code will break. -Hans */ #define REISERFS_DISK_OFFSET_IN_BYTES (64 * 1024) diff --git a/fs/reiserfs/xattr_acl.c b/fs/reiserfs/xattr_acl.c index 05f666794561..ccd40df6eb45 100644 --- a/fs/reiserfs/xattr_acl.c +++ b/fs/reiserfs/xattr_acl.c @@ -373,7 +373,7 @@ int reiserfs_cache_default_acl(struct inode *inode) /* Other xattrs can be created during inode creation. We don't * want to claim too many blocks, so we check to see if we - * we need to create the tree to the xattrs, and then we + * need to create the tree to the xattrs, and then we * just want two files. */ nblocks = reiserfs_xattr_jcreate_nblocks(inode); nblocks += JOURNAL_BLOCKS_PER_OBJECT(inode->i_sb); diff --git a/fs/romfs/storage.c b/fs/romfs/storage.c index 6b2b4362089e..b57b3ffcbc32 100644 --- a/fs/romfs/storage.c +++ b/fs/romfs/storage.c @@ -217,10 +217,8 @@ int romfs_dev_read(struct super_block *sb, unsigned long pos, size_t limit; limit = romfs_maxsize(sb); - if (pos >= limit) + if (pos >= limit || buflen > limit - pos) return -EIO; - if (buflen > limit - pos) - buflen = limit - pos; #ifdef CONFIG_ROMFS_ON_MTD if (sb->s_mtd) diff --git a/fs/seq_file.c b/fs/seq_file.c index 4e6239f33c06..31219c1db17d 100644 --- a/fs/seq_file.c +++ b/fs/seq_file.c @@ -295,7 +295,7 @@ loff_t seq_lseek(struct file *file, loff_t offset, int whence) switch (whence) { case SEEK_CUR: offset += file->f_pos; - /* fall through */ + fallthrough; case SEEK_SET: if (offset < 0) break; diff --git a/fs/signalfd.c b/fs/signalfd.c index 44b6845b071c..456046e15873 100644 --- a/fs/signalfd.c +++ b/fs/signalfd.c @@ -176,7 +176,7 @@ static ssize_t signalfd_dequeue(struct signalfd_ctx *ctx, kernel_siginfo_t *info if (!nonblock) break; ret = -EAGAIN; - /* fall through */ + fallthrough; default: spin_unlock_irq(¤t->sighand->siglock); return ret; @@ -314,9 +314,10 @@ SYSCALL_DEFINE4(signalfd4, int, ufd, sigset_t __user *, user_mask, { sigset_t mask; - if (sizemask != sizeof(sigset_t) || - copy_from_user(&mask, user_mask, sizeof(mask))) + if (sizemask != sizeof(sigset_t)) return -EINVAL; + if (copy_from_user(&mask, user_mask, sizeof(mask))) + return -EFAULT; return do_signalfd4(ufd, &mask, flags); } @@ -325,9 +326,10 @@ SYSCALL_DEFINE3(signalfd, int, ufd, sigset_t __user *, user_mask, { sigset_t mask; - if (sizemask != sizeof(sigset_t) || - copy_from_user(&mask, user_mask, sizeof(mask))) + if (sizemask != sizeof(sigset_t)) return -EINVAL; + if (copy_from_user(&mask, user_mask, sizeof(mask))) + return -EFAULT; return do_signalfd4(ufd, &mask, 0); } diff --git a/fs/squashfs/block.c b/fs/squashfs/block.c index 76bb1c846845..8a19773b5a0b 100644 --- a/fs/squashfs/block.c +++ b/fs/squashfs/block.c @@ -87,7 +87,11 @@ static int squashfs_bio_read(struct super_block *sb, u64 index, int length, int error, i; struct bio *bio; - bio = bio_alloc(GFP_NOIO, page_count); + if (page_count <= BIO_MAX_PAGES) + bio = bio_alloc(GFP_NOIO, page_count); + else + bio = bio_kmalloc(GFP_NOIO, page_count); + if (!bio) return -ENOMEM; diff --git a/fs/ubifs/commit.c b/fs/ubifs/commit.c index ad292c5a43a9..b5cdac9b0368 100644 --- a/fs/ubifs/commit.c +++ b/fs/ubifs/commit.c @@ -552,11 +552,11 @@ out: */ int dbg_check_old_index(struct ubifs_info *c, struct ubifs_zbranch *zroot) { - int lnum, offs, len, err = 0, uninitialized_var(last_level), child_cnt; + int lnum, offs, len, err = 0, last_level, child_cnt; int first = 1, iip; struct ubifs_debug_info *d = c->dbg; - union ubifs_key uninitialized_var(lower_key), upper_key, l_key, u_key; - unsigned long long uninitialized_var(last_sqnum); + union ubifs_key lower_key, upper_key, l_key, u_key; + unsigned long long last_sqnum; struct ubifs_idx_node *idx; struct list_head list; struct idx_node *i; diff --git a/fs/ubifs/dir.c b/fs/ubifs/dir.c index ef85ec167a84..9d042942d8b2 100644 --- a/fs/ubifs/dir.c +++ b/fs/ubifs/dir.c @@ -1260,7 +1260,7 @@ static int do_rename(struct inode *old_dir, struct dentry *old_dentry, struct ubifs_budget_req ino_req = { .dirtied_ino = 1, .dirtied_ino_d = ALIGN(old_inode_ui->data_len, 8) }; struct timespec64 time; - unsigned int uninitialized_var(saved_nlink); + unsigned int saved_nlink; struct fscrypt_name old_nm, new_nm; /* diff --git a/fs/ubifs/file.c b/fs/ubifs/file.c index 49fe062ce45e..b77d1637bbbc 100644 --- a/fs/ubifs/file.c +++ b/fs/ubifs/file.c @@ -222,7 +222,7 @@ static int write_begin_slow(struct address_space *mapping, struct ubifs_info *c = inode->i_sb->s_fs_info; pgoff_t index = pos >> PAGE_SHIFT; struct ubifs_budget_req req = { .new_page = 1 }; - int uninitialized_var(err), appending = !!(pos + len > inode->i_size); + int err, appending = !!(pos + len > inode->i_size); struct page *page; dbg_gen("ino %lu, pos %llu, len %u, i_size %lld", @@ -426,7 +426,7 @@ static int ubifs_write_begin(struct file *file, struct address_space *mapping, struct ubifs_info *c = inode->i_sb->s_fs_info; struct ubifs_inode *ui = ubifs_inode(inode); pgoff_t index = pos >> PAGE_SHIFT; - int uninitialized_var(err), appending = !!(pos + len > inode->i_size); + int err, appending = !!(pos + len > inode->i_size); int skipped_read = 0; struct page *page; diff --git a/fs/ubifs/journal.c b/fs/ubifs/journal.c index e5ec1afe1c66..4a5b06f8d812 100644 --- a/fs/ubifs/journal.c +++ b/fs/ubifs/journal.c @@ -539,7 +539,7 @@ int ubifs_jnl_update(struct ubifs_info *c, const struct inode *dir, const struct fscrypt_name *nm, const struct inode *inode, int deletion, int xent) { - int err, dlen, ilen, len, lnum, ino_offs, dent_offs; + int err, dlen, ilen, len, lnum, ino_offs, dent_offs, orphan_added = 0; int aligned_dlen, aligned_ilen, sync = IS_DIRSYNC(dir); int last_reference = !!(deletion && inode->i_nlink == 0); struct ubifs_inode *ui = ubifs_inode(inode); @@ -630,6 +630,7 @@ int ubifs_jnl_update(struct ubifs_info *c, const struct inode *dir, goto out_finish; } ui->del_cmtno = c->cmt_no; + orphan_added = 1; } err = write_head(c, BASEHD, dent, len, &lnum, &dent_offs, sync); @@ -702,7 +703,7 @@ out_release: kfree(dent); out_ro: ubifs_ro_mode(c, err); - if (last_reference) + if (orphan_added) ubifs_delete_orphan(c, inode->i_ino); finish_reservation(c); return err; @@ -1218,11 +1219,11 @@ int ubifs_jnl_rename(struct ubifs_info *c, const struct inode *old_dir, void *p; union ubifs_key key; struct ubifs_dent_node *dent, *dent2; - int err, dlen1, dlen2, ilen, lnum, offs, len; + int err, dlen1, dlen2, ilen, lnum, offs, len, orphan_added = 0; int aligned_dlen1, aligned_dlen2, plen = UBIFS_INO_NODE_SZ; int last_reference = !!(new_inode && new_inode->i_nlink == 0); int move = (old_dir != new_dir); - struct ubifs_inode *uninitialized_var(new_ui); + struct ubifs_inode *new_ui; u8 hash_old_dir[UBIFS_HASH_ARR_SZ]; u8 hash_new_dir[UBIFS_HASH_ARR_SZ]; u8 hash_new_inode[UBIFS_HASH_ARR_SZ]; @@ -1334,6 +1335,7 @@ int ubifs_jnl_rename(struct ubifs_info *c, const struct inode *old_dir, goto out_finish; } new_ui->del_cmtno = c->cmt_no; + orphan_added = 1; } err = write_head(c, BASEHD, dent, len, &lnum, &offs, sync); @@ -1415,7 +1417,7 @@ out_release: release_head(c, BASEHD); out_ro: ubifs_ro_mode(c, err); - if (last_reference) + if (orphan_added) ubifs_delete_orphan(c, new_inode->i_ino); out_finish: finish_reservation(c); @@ -1507,7 +1509,7 @@ int ubifs_jnl_truncate(struct ubifs_info *c, const struct inode *inode, union ubifs_key key, to_key; struct ubifs_ino_node *ino; struct ubifs_trun_node *trun; - struct ubifs_data_node *uninitialized_var(dn); + struct ubifs_data_node *dn; int err, dlen, len, lnum, offs, bit, sz, sync = IS_SYNC(inode); struct ubifs_inode *ui = ubifs_inode(inode); ino_t inum = inode->i_ino; diff --git a/fs/ubifs/lprops.c b/fs/ubifs/lprops.c index 22bfda158f7f..6d6cd85c2b4c 100644 --- a/fs/ubifs/lprops.c +++ b/fs/ubifs/lprops.c @@ -269,7 +269,7 @@ void ubifs_add_to_cat(struct ubifs_info *c, struct ubifs_lprops *lprops, break; /* No more room on heap so make it un-categorized */ cat = LPROPS_UNCAT; - /* Fall through */ + fallthrough; case LPROPS_UNCAT: list_add(&lprops->list, &c->uncat_list); break; @@ -313,7 +313,7 @@ static void ubifs_remove_from_cat(struct ubifs_info *c, case LPROPS_FREEABLE: c->freeable_cnt -= 1; ubifs_assert(c, c->freeable_cnt >= 0); - /* Fall through */ + fallthrough; case LPROPS_UNCAT: case LPROPS_EMPTY: case LPROPS_FRDI_IDX: diff --git a/fs/ubifs/lpt.c b/fs/ubifs/lpt.c index e21abf250951..6e0a153b7194 100644 --- a/fs/ubifs/lpt.c +++ b/fs/ubifs/lpt.c @@ -275,7 +275,7 @@ uint32_t ubifs_unpack_bits(const struct ubifs_info *c, uint8_t **addr, int *pos, const int k = 32 - nrbits; uint8_t *p = *addr; int b = *pos; - uint32_t uninitialized_var(val); + uint32_t val; const int bytes = (nrbits + b + 7) >> 3; ubifs_assert(c, nrbits > 0); diff --git a/fs/ubifs/misc.h b/fs/ubifs/misc.h index c97a4d537d83..615878e884ba 100644 --- a/fs/ubifs/misc.h +++ b/fs/ubifs/misc.h @@ -121,7 +121,7 @@ static inline const char *ubifs_compr_name(struct ubifs_info *c, int compr_type) * ubifs_wbuf_sync - synchronize write-buffer. * @wbuf: write-buffer to synchronize * - * This is the same as as 'ubifs_wbuf_sync_nolock()' but it does not assume + * This is the same as 'ubifs_wbuf_sync_nolock()' but it does not assume * that the write-buffer is already locked. */ static inline int ubifs_wbuf_sync(struct ubifs_wbuf *wbuf) diff --git a/fs/ubifs/sb.c b/fs/ubifs/sb.c index 4b4b65b48c57..c0d3e4008d23 100644 --- a/fs/ubifs/sb.c +++ b/fs/ubifs/sb.c @@ -174,7 +174,8 @@ static int create_default_filesystem(struct ubifs_info *c) tmp64 = (long long)max_buds * c->leb_size; if (big_lpt) sup_flags |= UBIFS_FLG_BIGLPT; - sup_flags |= UBIFS_FLG_DOUBLE_HASH; + if (ubifs_default_version > 4) + sup_flags |= UBIFS_FLG_DOUBLE_HASH; if (ubifs_authenticated(c)) { sup_flags |= UBIFS_FLG_AUTHENTICATION; @@ -200,7 +201,7 @@ static int create_default_filesystem(struct ubifs_info *c) sup->jhead_cnt = cpu_to_le32(DEFAULT_JHEADS_CNT); sup->fanout = cpu_to_le32(DEFAULT_FANOUT); sup->lsave_cnt = cpu_to_le32(c->lsave_cnt); - sup->fmt_version = cpu_to_le32(UBIFS_FORMAT_VERSION); + sup->fmt_version = cpu_to_le32(ubifs_default_version); sup->time_gran = cpu_to_le32(DEFAULT_TIME_GRAN); if (c->mount_opts.override_compr) sup->default_compr = cpu_to_le16(c->mount_opts.compr_type); diff --git a/fs/ubifs/super.c b/fs/ubifs/super.c index 7fc2f3f07c16..a2420c900275 100644 --- a/fs/ubifs/super.c +++ b/fs/ubifs/super.c @@ -26,6 +26,24 @@ #include <linux/writeback.h> #include "ubifs.h" +static int ubifs_default_version_set(const char *val, const struct kernel_param *kp) +{ + int n = 0, ret; + + ret = kstrtoint(val, 10, &n); + if (ret != 0 || n < 4 || n > UBIFS_FORMAT_VERSION) + return -EINVAL; + return param_set_int(val, kp); +} + +static const struct kernel_param_ops ubifs_default_version_ops = { + .set = ubifs_default_version_set, + .get = param_get_int, +}; + +int ubifs_default_version = UBIFS_FORMAT_VERSION; +module_param_cb(default_version, &ubifs_default_version_ops, &ubifs_default_version, 0600); + /* * Maximum amount of memory we may 'kmalloc()' without worrying that we are * allocating too much. diff --git a/fs/ubifs/tnc.c b/fs/ubifs/tnc.c index e8e7b0e9532e..f609f6cdde70 100644 --- a/fs/ubifs/tnc.c +++ b/fs/ubifs/tnc.c @@ -892,7 +892,7 @@ static int fallible_resolve_collision(struct ubifs_info *c, int adding) { struct ubifs_znode *o_znode = NULL, *znode = *zn; - int uninitialized_var(o_n), err, cmp, unsure = 0, nn = *n; + int o_n, err, cmp, unsure = 0, nn = *n; cmp = fallible_matches_name(c, &znode->zbranch[nn], nm); if (unlikely(cmp < 0)) @@ -1514,8 +1514,8 @@ out: */ int ubifs_tnc_get_bu_keys(struct ubifs_info *c, struct bu_info *bu) { - int n, err = 0, lnum = -1, uninitialized_var(offs); - int uninitialized_var(len); + int n, err = 0, lnum = -1, offs; + int len; unsigned int block = key_block(c, &bu->key); struct ubifs_znode *znode; diff --git a/fs/ubifs/tnc_misc.c b/fs/ubifs/tnc_misc.c index 49cb34c3f324..ccaf94ea5be3 100644 --- a/fs/ubifs/tnc_misc.c +++ b/fs/ubifs/tnc_misc.c @@ -126,8 +126,8 @@ int ubifs_search_zbranch(const struct ubifs_info *c, const struct ubifs_znode *znode, const union ubifs_key *key, int *n) { - int beg = 0, end = znode->child_cnt, uninitialized_var(mid); - int uninitialized_var(cmp); + int beg = 0, end = znode->child_cnt, mid; + int cmp; const struct ubifs_zbranch *zbr = &znode->zbranch[0]; ubifs_assert(c, end > beg); diff --git a/fs/ubifs/ubifs.h b/fs/ubifs/ubifs.h index bff682309fbe..4ffd832e3b93 100644 --- a/fs/ubifs/ubifs.h +++ b/fs/ubifs/ubifs.h @@ -1504,6 +1504,7 @@ extern const struct file_operations ubifs_dir_operations; extern const struct inode_operations ubifs_dir_inode_operations; extern const struct inode_operations ubifs_symlink_inode_operations; extern struct ubifs_compressor *ubifs_compressors[UBIFS_COMPR_TYPES_CNT]; +extern int ubifs_default_version; /* auth.c */ static inline int ubifs_authenticated(const struct ubifs_info *c) diff --git a/fs/udf/balloc.c b/fs/udf/balloc.c index 02f03fadb75b..8e597db4d971 100644 --- a/fs/udf/balloc.c +++ b/fs/udf/balloc.c @@ -564,7 +564,7 @@ static udf_pblk_t udf_table_new_block(struct super_block *sb, udf_pblk_t newblock = 0; uint32_t adsize; uint32_t elen, goal_elen = 0; - struct kernel_lb_addr eloc, uninitialized_var(goal_eloc); + struct kernel_lb_addr eloc, goal_eloc; struct extent_position epos, goal_epos; int8_t etype; struct udf_inode_info *iinfo = UDF_I(table); diff --git a/fs/udf/ecma_167.h b/fs/udf/ecma_167.h index 736ebc5dc441..185c3e247648 100644 --- a/fs/udf/ecma_167.h +++ b/fs/udf/ecma_167.h @@ -2,7 +2,7 @@ * ecma_167.h * * This file is based on ECMA-167 3rd edition (June 1997) - * http://www.ecma.ch + * https://www.ecma.ch * * Copyright (c) 2001-2002 Ben Fennema * Copyright (c) 2017-2019 Pali Rohár <pali@kernel.org> diff --git a/fs/udf/osta_udf.h b/fs/udf/osta_udf.h index d5fbfab3ddb6..22bc4fb2feb9 100644 --- a/fs/udf/osta_udf.h +++ b/fs/udf/osta_udf.h @@ -226,7 +226,7 @@ struct sparingTable { #define ICBTAG_FILE_TYPE_MIRROR 0xFB #define ICBTAG_FILE_TYPE_BITMAP 0xFC -/* struct struct long_ad ICB - ADImpUse (UDF 2.60 2.2.4.3) */ +/* struct long_ad ICB - ADImpUse (UDF 2.60 2.2.4.3) */ struct allocDescImpUse { __le16 flags; uint8_t impUse[4]; diff --git a/fs/udf/super.c b/fs/udf/super.c index f747bf72edbe..1c42f544096d 100644 --- a/fs/udf/super.c +++ b/fs/udf/super.c @@ -11,8 +11,8 @@ * This code is based on version 2.00 of the UDF specification, * and revision 3 of the ECMA 167 standard [equivalent to ISO 13346]. * http://www.osta.org/ - * http://www.ecma.ch/ - * http://www.iso.org/ + * https://www.ecma.ch/ + * https://www.iso.org/ * * COPYRIGHT * This file is distributed under the terms of the GNU General Public diff --git a/fs/udf/symlink.c b/fs/udf/symlink.c index 6023c97c6da2..25ff91c7e94a 100644 --- a/fs/udf/symlink.c +++ b/fs/udf/symlink.c @@ -52,7 +52,7 @@ static int udf_pc_to_char(struct super_block *sb, unsigned char *from, elen += pc->lengthComponentIdent; break; } - /* Fall through */ + fallthrough; case 2: if (tolen == 0) return -ENAMETOOLONG; diff --git a/fs/ufs/super.c b/fs/ufs/super.c index 1da0be667409..e3b69fb280e8 100644 --- a/fs/ufs/super.c +++ b/fs/ufs/super.c @@ -101,7 +101,7 @@ static struct inode *ufs_nfs_get_inode(struct super_block *sb, u64 ino, u32 gene struct ufs_sb_private_info *uspi = UFS_SB(sb)->s_uspi; struct inode *inode; - if (ino < UFS_ROOTINO || ino > uspi->s_ncg * uspi->s_ipg) + if (ino < UFS_ROOTINO || ino > (u64)uspi->s_ncg * uspi->s_ipg) return ERR_PTR(-ESTALE); inode = ufs_iget(sb, ino); diff --git a/fs/ufs/util.h b/fs/ufs/util.h index e1f1b2e868a7..4931bec1a01c 100644 --- a/fs/ufs/util.h +++ b/fs/ufs/util.h @@ -42,7 +42,7 @@ ufs_get_fs_state(struct super_block *sb, struct ufs_super_block_first *usb1, case UFS_ST_SUNOS: if (fs32_to_cpu(sb, usb3->fs_postblformat) == UFS_42POSTBLFMT) return fs32_to_cpu(sb, usb1->fs_u0.fs_sun.fs_state); - /* Fall Through - to UFS_ST_SUN */ + fallthrough; /* to UFS_ST_SUN */ case UFS_ST_SUN: return fs32_to_cpu(sb, usb3->fs_un2.fs_sun.fs_state); case UFS_ST_SUNx86: @@ -63,7 +63,7 @@ ufs_set_fs_state(struct super_block *sb, struct ufs_super_block_first *usb1, usb1->fs_u0.fs_sun.fs_state = cpu_to_fs32(sb, value); break; } - /* Fall Through - to UFS_ST_SUN */ + fallthrough; /* to UFS_ST_SUN */ case UFS_ST_SUN: usb3->fs_un2.fs_sun.fs_state = cpu_to_fs32(sb, value); break; @@ -197,7 +197,7 @@ ufs_get_inode_uid(struct super_block *sb, struct ufs_inode *inode) case UFS_UID_EFT: if (inode->ui_u1.oldids.ui_suid == 0xFFFF) return fs32_to_cpu(sb, inode->ui_u3.ui_sun.ui_uid); - /* Fall through */ + fallthrough; default: return fs16_to_cpu(sb, inode->ui_u1.oldids.ui_suid); } @@ -215,7 +215,7 @@ ufs_set_inode_uid(struct super_block *sb, struct ufs_inode *inode, u32 value) inode->ui_u3.ui_sun.ui_uid = cpu_to_fs32(sb, value); if (value > 0xFFFF) value = 0xFFFF; - /* Fall through */ + fallthrough; default: inode->ui_u1.oldids.ui_suid = cpu_to_fs16(sb, value); break; @@ -231,7 +231,7 @@ ufs_get_inode_gid(struct super_block *sb, struct ufs_inode *inode) case UFS_UID_EFT: if (inode->ui_u1.oldids.ui_sgid == 0xFFFF) return fs32_to_cpu(sb, inode->ui_u3.ui_sun.ui_gid); - /* Fall through */ + fallthrough; default: return fs16_to_cpu(sb, inode->ui_u1.oldids.ui_sgid); } @@ -249,7 +249,7 @@ ufs_set_inode_gid(struct super_block *sb, struct ufs_inode *inode, u32 value) inode->ui_u3.ui_sun.ui_gid = cpu_to_fs32(sb, value); if (value > 0xFFFF) value = 0xFFFF; - /* Fall through */ + fallthrough; default: inode->ui_u1.oldids.ui_sgid = cpu_to_fs16(sb, value); break; diff --git a/fs/userfaultfd.c b/fs/userfaultfd.c index 52de29000c7e..0e4a3837da52 100644 --- a/fs/userfaultfd.c +++ b/fs/userfaultfd.c @@ -61,7 +61,7 @@ struct userfaultfd_ctx { /* waitqueue head for events */ wait_queue_head_t event_wqh; /* a refile sequence protected by fault_pending_wqh lock */ - struct seqcount refile_seq; + seqcount_spinlock_t refile_seq; /* pseudo fd refcounting */ refcount_t refcount; /* userfaultfd syscall flags */ @@ -339,7 +339,6 @@ out: return ret; } -/* Should pair with userfaultfd_signal_pending() */ static inline long userfaultfd_get_blocking_state(unsigned int flags) { if (flags & FAULT_FLAG_INTERRUPTIBLE) @@ -351,18 +350,6 @@ static inline long userfaultfd_get_blocking_state(unsigned int flags) return TASK_UNINTERRUPTIBLE; } -/* Should pair with userfaultfd_get_blocking_state() */ -static inline bool userfaultfd_signal_pending(unsigned int flags) -{ - if (flags & FAULT_FLAG_INTERRUPTIBLE) - return signal_pending(current); - - if (flags & FAULT_FLAG_KILLABLE) - return fatal_signal_pending(current); - - return false; -} - /* * The locking rules involved in returning VM_FAULT_RETRY depending on * FAULT_FLAG_ALLOW_RETRY, FAULT_FLAG_RETRY_NOWAIT and @@ -516,33 +503,9 @@ vm_fault_t handle_userfault(struct vm_fault *vmf, unsigned long reason) vmf->flags, reason); mmap_read_unlock(mm); - if (likely(must_wait && !READ_ONCE(ctx->released) && - !userfaultfd_signal_pending(vmf->flags))) { + if (likely(must_wait && !READ_ONCE(ctx->released))) { wake_up_poll(&ctx->fd_wqh, EPOLLIN); schedule(); - ret |= VM_FAULT_MAJOR; - - /* - * False wakeups can orginate even from rwsem before - * up_read() however userfaults will wait either for a - * targeted wakeup on the specific uwq waitqueue from - * wake_userfault() or for signals or for uffd - * release. - */ - while (!READ_ONCE(uwq.waken)) { - /* - * This needs the full smp_store_mb() - * guarantee as the state write must be - * visible to other CPUs before reading - * uwq.waken from other CPUs. - */ - set_current_state(blocking_state); - if (READ_ONCE(uwq.waken) || - READ_ONCE(ctx->released) || - userfaultfd_signal_pending(vmf->flags)) - break; - schedule(); - } } __set_current_state(TASK_RUNNING); @@ -1998,7 +1961,7 @@ static void init_once_userfaultfd_ctx(void *mem) init_waitqueue_head(&ctx->fault_wqh); init_waitqueue_head(&ctx->event_wqh); init_waitqueue_head(&ctx->fd_wqh); - seqcount_init(&ctx->refile_seq); + seqcount_spinlock_init(&ctx->refile_seq, &ctx->fault_pending_wqh.lock); } SYSCALL_DEFINE1(userfaultfd, int, flags) diff --git a/fs/utimes.c b/fs/utimes.c index b7b927502d6e..fd3cc4226224 100644 --- a/fs/utimes.c +++ b/fs/utimes.c @@ -16,21 +16,26 @@ static bool nsec_valid(long nsec) return nsec >= 0 && nsec <= 999999999; } -static int utimes_common(const struct path *path, struct timespec64 *times) +int vfs_utimes(const struct path *path, struct timespec64 *times) { int error; struct iattr newattrs; struct inode *inode = path->dentry->d_inode; struct inode *delegated_inode = NULL; + if (times) { + if (!nsec_valid(times[0].tv_nsec) || + !nsec_valid(times[1].tv_nsec)) + return -EINVAL; + if (times[0].tv_nsec == UTIME_NOW && + times[1].tv_nsec == UTIME_NOW) + times = NULL; + } + error = mnt_want_write(path->mnt); if (error) goto out; - if (times && times[0].tv_nsec == UTIME_NOW && - times[1].tv_nsec == UTIME_NOW) - times = NULL; - newattrs.ia_valid = ATTR_CTIME | ATTR_MTIME | ATTR_ATIME; if (times) { if (times[0].tv_nsec == UTIME_OMIT) @@ -70,6 +75,51 @@ out: return error; } +static int do_utimes_path(int dfd, const char __user *filename, + struct timespec64 *times, int flags) +{ + struct path path; + int lookup_flags = 0, error; + + if (flags & ~(AT_SYMLINK_NOFOLLOW | AT_EMPTY_PATH)) + return -EINVAL; + + if (!(flags & AT_SYMLINK_NOFOLLOW)) + lookup_flags |= LOOKUP_FOLLOW; + if (flags & AT_EMPTY_PATH) + lookup_flags |= LOOKUP_EMPTY; + +retry: + error = user_path_at(dfd, filename, lookup_flags, &path); + if (error) + return error; + + error = vfs_utimes(&path, times); + path_put(&path); + if (retry_estale(error, lookup_flags)) { + lookup_flags |= LOOKUP_REVAL; + goto retry; + } + + return error; +} + +static int do_utimes_fd(int fd, struct timespec64 *times, int flags) +{ + struct fd f; + int error; + + if (flags) + return -EINVAL; + + f = fdget(fd); + if (!f.file) + return -EBADF; + error = vfs_utimes(&f.file->f_path, times); + fdput(f); + return error; +} + /* * do_utimes - change times on filename or file descriptor * @dfd: open file descriptor, -1 or AT_FDCWD @@ -88,52 +138,9 @@ out: long do_utimes(int dfd, const char __user *filename, struct timespec64 *times, int flags) { - int error = -EINVAL; - - if (times && (!nsec_valid(times[0].tv_nsec) || - !nsec_valid(times[1].tv_nsec))) { - goto out; - } - - if (flags & ~(AT_SYMLINK_NOFOLLOW | AT_EMPTY_PATH)) - goto out; - - if (filename == NULL && dfd != AT_FDCWD) { - struct fd f; - - if (flags) - goto out; - - f = fdget(dfd); - error = -EBADF; - if (!f.file) - goto out; - - error = utimes_common(&f.file->f_path, times); - fdput(f); - } else { - struct path path; - int lookup_flags = 0; - - if (!(flags & AT_SYMLINK_NOFOLLOW)) - lookup_flags |= LOOKUP_FOLLOW; - if (flags & AT_EMPTY_PATH) - lookup_flags |= LOOKUP_EMPTY; -retry: - error = user_path_at(dfd, filename, lookup_flags, &path); - if (error) - goto out; - - error = utimes_common(&path, times); - path_put(&path); - if (retry_estale(error, lookup_flags)) { - lookup_flags |= LOOKUP_REVAL; - goto retry; - } - } - -out: - return error; + if (filename == NULL && dfd != AT_FDCWD) + return do_utimes_fd(dfd, times, flags); + return do_utimes_path(dfd, filename, times, flags); } SYSCALL_DEFINE4(utimensat, int, dfd, const char __user *, filename, diff --git a/fs/vboxsf/utils.c b/fs/vboxsf/utils.c index 96bd160da48b..018057546067 100644 --- a/fs/vboxsf/utils.c +++ b/fs/vboxsf/utils.c @@ -226,7 +226,7 @@ int vboxsf_getattr(const struct path *path, struct kstat *kstat, break; case AT_STATX_FORCE_SYNC: sf_i->force_restat = 1; - /* fall-through */ + fallthrough; default: err = vboxsf_inode_revalidate(dentry); } diff --git a/fs/verity/open.c b/fs/verity/open.c index d007db0c9304..bfe0280c14e4 100644 --- a/fs/verity/open.c +++ b/fs/verity/open.c @@ -221,11 +221,20 @@ out: void fsverity_set_info(struct inode *inode, struct fsverity_info *vi) { /* - * Multiple processes may race to set ->i_verity_info, so use cmpxchg. - * This pairs with the READ_ONCE() in fsverity_get_info(). + * Multiple tasks may race to set ->i_verity_info, so use + * cmpxchg_release(). This pairs with the smp_load_acquire() in + * fsverity_get_info(). I.e., here we publish ->i_verity_info with a + * RELEASE barrier so that other tasks can ACQUIRE it. */ - if (cmpxchg(&inode->i_verity_info, NULL, vi) != NULL) + if (cmpxchg_release(&inode->i_verity_info, NULL, vi) != NULL) { + /* Lost the race, so free the fsverity_info we allocated. */ fsverity_free_info(vi); + /* + * Afterwards, the caller may access ->i_verity_info directly, + * so make sure to ACQUIRE the winning fsverity_info. + */ + (void)fsverity_get_info(inode); + } } void fsverity_free_info(struct fsverity_info *vi) diff --git a/fs/xattr.c b/fs/xattr.c index 91608d9bfc6a..386b45676d7e 100644 --- a/fs/xattr.c +++ b/fs/xattr.c @@ -134,6 +134,33 @@ xattr_permission(struct inode *inode, const char *name, int mask) return inode_permission(inode, mask); } +/* + * Look for any handler that deals with the specified namespace. + */ +int +xattr_supported_namespace(struct inode *inode, const char *prefix) +{ + const struct xattr_handler **handlers = inode->i_sb->s_xattr; + const struct xattr_handler *handler; + size_t preflen; + + if (!(inode->i_opflags & IOP_XATTR)) { + if (unlikely(is_bad_inode(inode))) + return -EIO; + return -EOPNOTSUPP; + } + + preflen = strlen(prefix); + + for_each_xattr_handler(handlers, handler) { + if (!strncmp(xattr_prefix(handler), prefix, preflen)) + return 0; + } + + return -EOPNOTSUPP; +} +EXPORT_SYMBOL(xattr_supported_namespace); + int __vfs_setxattr(struct dentry *dentry, struct inode *inode, const char *name, const void *value, size_t size, int flags) @@ -204,10 +231,22 @@ int __vfs_setxattr_noperm(struct dentry *dentry, const char *name, return error; } - +/** + * __vfs_setxattr_locked: set an extended attribute while holding the inode + * lock + * + * @dentry - object to perform setxattr on + * @name - xattr name to set + * @value - value to set @name to + * @size - size of @value + * @flags - flags to pass into filesystem operations + * @delegated_inode - on return, will contain an inode pointer that + * a delegation was broken on, NULL if none. + */ int -vfs_setxattr(struct dentry *dentry, const char *name, const void *value, - size_t size, int flags) +__vfs_setxattr_locked(struct dentry *dentry, const char *name, + const void *value, size_t size, int flags, + struct inode **delegated_inode) { struct inode *inode = dentry->d_inode; int error; @@ -216,15 +255,40 @@ vfs_setxattr(struct dentry *dentry, const char *name, const void *value, if (error) return error; - inode_lock(inode); error = security_inode_setxattr(dentry, name, value, size, flags); if (error) goto out; + error = try_break_deleg(inode, delegated_inode); + if (error) + goto out; + error = __vfs_setxattr_noperm(dentry, name, value, size, flags); out: + return error; +} +EXPORT_SYMBOL_GPL(__vfs_setxattr_locked); + +int +vfs_setxattr(struct dentry *dentry, const char *name, const void *value, + size_t size, int flags) +{ + struct inode *inode = dentry->d_inode; + struct inode *delegated_inode = NULL; + int error; + +retry_deleg: + inode_lock(inode); + error = __vfs_setxattr_locked(dentry, name, value, size, flags, + &delegated_inode); inode_unlock(inode); + + if (delegated_inode) { + error = break_deleg_wait(&delegated_inode); + if (!error) + goto retry_deleg; + } return error; } EXPORT_SYMBOL_GPL(vfs_setxattr); @@ -378,8 +442,18 @@ __vfs_removexattr(struct dentry *dentry, const char *name) } EXPORT_SYMBOL(__vfs_removexattr); +/** + * __vfs_removexattr_locked: set an extended attribute while holding the inode + * lock + * + * @dentry - object to perform setxattr on + * @name - name of xattr to remove + * @delegated_inode - on return, will contain an inode pointer that + * a delegation was broken on, NULL if none. + */ int -vfs_removexattr(struct dentry *dentry, const char *name) +__vfs_removexattr_locked(struct dentry *dentry, const char *name, + struct inode **delegated_inode) { struct inode *inode = dentry->d_inode; int error; @@ -388,11 +462,14 @@ vfs_removexattr(struct dentry *dentry, const char *name) if (error) return error; - inode_lock(inode); error = security_inode_removexattr(dentry, name); if (error) goto out; + error = try_break_deleg(inode, delegated_inode); + if (error) + goto out; + error = __vfs_removexattr(dentry, name); if (!error) { @@ -401,12 +478,32 @@ vfs_removexattr(struct dentry *dentry, const char *name) } out: + return error; +} +EXPORT_SYMBOL_GPL(__vfs_removexattr_locked); + +int +vfs_removexattr(struct dentry *dentry, const char *name) +{ + struct inode *inode = dentry->d_inode; + struct inode *delegated_inode = NULL; + int error; + +retry_deleg: + inode_lock(inode); + error = __vfs_removexattr_locked(dentry, name, &delegated_inode); inode_unlock(inode); + + if (delegated_inode) { + error = break_deleg_wait(&delegated_inode); + if (!error) + goto retry_deleg; + } + return error; } EXPORT_SYMBOL_GPL(vfs_removexattr); - /* * Extended attribute SET operations */ diff --git a/fs/xfs/kmem.c b/fs/xfs/kmem.c index f1366475c389..e841ed781a25 100644 --- a/fs/xfs/kmem.c +++ b/fs/xfs/kmem.c @@ -115,24 +115,3 @@ kmem_realloc(const void *old, size_t newsize, xfs_km_flags_t flags) congestion_wait(BLK_RW_ASYNC, HZ/50); } while (1); } - -void * -kmem_zone_alloc(kmem_zone_t *zone, xfs_km_flags_t flags) -{ - int retries = 0; - gfp_t lflags = kmem_flags_convert(flags); - void *ptr; - - trace_kmem_zone_alloc(kmem_cache_size(zone), flags, _RET_IP_); - do { - ptr = kmem_cache_alloc(zone, lflags); - if (ptr || (flags & KM_MAYFAIL)) - return ptr; - if (!(++retries % 100)) - xfs_err(NULL, - "%s(%u) possible memory allocation deadlock in %s (mode:0x%x)", - current->comm, current->pid, - __func__, lflags); - congestion_wait(BLK_RW_ASYNC, HZ/50); - } while (1); -} diff --git a/fs/xfs/kmem.h b/fs/xfs/kmem.h index 34cbcfde9228..8e8555817e6d 100644 --- a/fs/xfs/kmem.h +++ b/fs/xfs/kmem.h @@ -85,14 +85,6 @@ kmem_zalloc_large(size_t size, xfs_km_flags_t flags) #define kmem_zone kmem_cache #define kmem_zone_t struct kmem_cache -extern void *kmem_zone_alloc(kmem_zone_t *, xfs_km_flags_t); - -static inline void * -kmem_zone_zalloc(kmem_zone_t *zone, xfs_km_flags_t flags) -{ - return kmem_zone_alloc(zone, flags | KM_ZERO); -} - static inline struct page * kmem_to_page(void *addr) { diff --git a/fs/xfs/libxfs/xfs_ag.c b/fs/xfs/libxfs/xfs_ag.c index 9d84007a5c65..8cf73fe4338e 100644 --- a/fs/xfs/libxfs/xfs_ag.c +++ b/fs/xfs/libxfs/xfs_ag.c @@ -563,7 +563,8 @@ xfs_ag_get_geometry( error = xfs_alloc_read_agf(mp, NULL, agno, 0, &agf_bp); if (error) goto out_agi; - pag = xfs_perag_get(mp, agno); + + pag = agi_bp->b_pag; /* Fill out form. */ memset(ageo, 0, sizeof(*ageo)); @@ -583,7 +584,6 @@ xfs_ag_get_geometry( xfs_ag_geom_health(pag, ageo); /* Release resources. */ - xfs_perag_put(pag); xfs_buf_relse(agf_bp); out_agi: xfs_buf_relse(agi_bp); diff --git a/fs/xfs/libxfs/xfs_ag_resv.h b/fs/xfs/libxfs/xfs_ag_resv.h index f3fd0ee9a7f7..8a8eb4bc48bb 100644 --- a/fs/xfs/libxfs/xfs_ag_resv.h +++ b/fs/xfs/libxfs/xfs_ag_resv.h @@ -37,16 +37,4 @@ xfs_ag_resv_rmapbt_alloc( xfs_perag_put(pag); } -static inline void -xfs_ag_resv_rmapbt_free( - struct xfs_mount *mp, - xfs_agnumber_t agno) -{ - struct xfs_perag *pag; - - pag = xfs_perag_get(mp, agno); - xfs_ag_resv_free_extent(pag, XFS_AG_RESV_RMAPBT, NULL, 1); - xfs_perag_put(pag); -} - #endif /* __XFS_AG_RESV_H__ */ diff --git a/fs/xfs/libxfs/xfs_alloc.c b/fs/xfs/libxfs/xfs_alloc.c index 203e74fa64aa..852b536551b5 100644 --- a/fs/xfs/libxfs/xfs_alloc.c +++ b/fs/xfs/libxfs/xfs_alloc.c @@ -710,13 +710,12 @@ xfs_alloc_read_agfl( STATIC int xfs_alloc_update_counters( struct xfs_trans *tp, - struct xfs_perag *pag, struct xfs_buf *agbp, long len) { struct xfs_agf *agf = agbp->b_addr; - pag->pagf_freeblks += len; + agbp->b_pag->pagf_freeblks += len; be32_add_cpu(&agf->agf_freeblks, len); xfs_trans_agblocks_delta(tp, len); @@ -1175,8 +1174,7 @@ xfs_alloc_ag_vextent( } if (!args->wasfromfl) { - error = xfs_alloc_update_counters(args->tp, args->pag, - args->agbp, + error = xfs_alloc_update_counters(args->tp, args->agbp, -((long)(args->len))); if (error) return error; @@ -1887,7 +1885,6 @@ xfs_free_ag_extent( enum xfs_ag_resv_type type) { struct xfs_mount *mp; - struct xfs_perag *pag; struct xfs_btree_cur *bno_cur; struct xfs_btree_cur *cnt_cur; xfs_agblock_t gtbno; /* start of right neighbor */ @@ -2167,10 +2164,8 @@ xfs_free_ag_extent( /* * Update the freespace totals in the ag and superblock. */ - pag = xfs_perag_get(mp, agno); - error = xfs_alloc_update_counters(tp, pag, agbp, len); - xfs_ag_resv_free_extent(pag, type, tp, len); - xfs_perag_put(pag); + error = xfs_alloc_update_counters(tp, agbp, len); + xfs_ag_resv_free_extent(agbp->b_pag, type, tp, len); if (error) goto error0; @@ -2467,7 +2462,8 @@ xfs_defer_agfl_block( ASSERT(xfs_bmap_free_item_zone != NULL); ASSERT(oinfo != NULL); - new = kmem_zone_alloc(xfs_bmap_free_item_zone, 0); + new = kmem_cache_alloc(xfs_bmap_free_item_zone, + GFP_KERNEL | __GFP_NOFAIL); new->xefi_startblock = XFS_AGB_TO_FSB(mp, agno, agbno); new->xefi_blockcount = 1; new->xefi_oinfo = *oinfo; @@ -2689,7 +2685,7 @@ xfs_alloc_get_freelist( if (be32_to_cpu(agf->agf_flfirst) == xfs_agfl_size(mp)) agf->agf_flfirst = 0; - pag = xfs_perag_get(mp, be32_to_cpu(agf->agf_seqno)); + pag = agbp->b_pag; ASSERT(!pag->pagf_agflreset); be32_add_cpu(&agf->agf_flcount, -1); xfs_trans_agflist_delta(tp, -1); @@ -2701,7 +2697,6 @@ xfs_alloc_get_freelist( pag->pagf_btreeblks++; logflags |= XFS_AGF_BTREEBLKS; } - xfs_perag_put(pag); xfs_alloc_log_agf(tp, agbp, logflags); *bnop = bno; @@ -2797,7 +2792,7 @@ xfs_alloc_put_freelist( if (be32_to_cpu(agf->agf_fllast) == xfs_agfl_size(mp)) agf->agf_fllast = 0; - pag = xfs_perag_get(mp, be32_to_cpu(agf->agf_seqno)); + pag = agbp->b_pag; ASSERT(!pag->pagf_agflreset); be32_add_cpu(&agf->agf_flcount, 1); xfs_trans_agflist_delta(tp, 1); @@ -2809,7 +2804,6 @@ xfs_alloc_put_freelist( pag->pagf_btreeblks--; logflags |= XFS_AGF_BTREEBLKS; } - xfs_perag_put(pag); xfs_alloc_log_agf(tp, agbp, logflags); @@ -3006,7 +3000,7 @@ xfs_alloc_read_agf( ASSERT(!(*bpp)->b_error); agf = (*bpp)->b_addr; - pag = xfs_perag_get(mp, agno); + pag = (*bpp)->b_pag; if (!pag->pagf_init) { pag->pagf_freeblks = be32_to_cpu(agf->agf_freeblks); pag->pagf_btreeblks = be32_to_cpu(agf->agf_btreeblks); @@ -3034,7 +3028,6 @@ xfs_alloc_read_agf( be32_to_cpu(agf->agf_levels[XFS_BTNUM_CNTi])); } #endif - xfs_perag_put(pag); return 0; } diff --git a/fs/xfs/libxfs/xfs_alloc_btree.c b/fs/xfs/libxfs/xfs_alloc_btree.c index 60c453cb3ee3..8e01231b308e 100644 --- a/fs/xfs/libxfs/xfs_alloc_btree.c +++ b/fs/xfs/libxfs/xfs_alloc_btree.c @@ -38,16 +38,14 @@ xfs_allocbt_set_root( { struct xfs_buf *agbp = cur->bc_ag.agbp; struct xfs_agf *agf = agbp->b_addr; - xfs_agnumber_t seqno = be32_to_cpu(agf->agf_seqno); int btnum = cur->bc_btnum; - struct xfs_perag *pag = xfs_perag_get(cur->bc_mp, seqno); + struct xfs_perag *pag = agbp->b_pag; ASSERT(ptr->s != 0); agf->agf_roots[btnum] = ptr->s; be32_add_cpu(&agf->agf_levels[btnum], inc); pag->pagf_levels[btnum] += inc; - xfs_perag_put(pag); xfs_alloc_log_agf(cur->bc_tp, agbp, XFS_AGF_ROOTS | XFS_AGF_LEVELS); } @@ -115,7 +113,6 @@ xfs_allocbt_update_lastrec( int reason) { struct xfs_agf *agf = cur->bc_ag.agbp->b_addr; - xfs_agnumber_t seqno = be32_to_cpu(agf->agf_seqno); struct xfs_perag *pag; __be32 len; int numrecs; @@ -160,9 +157,8 @@ xfs_allocbt_update_lastrec( } agf->agf_longest = len; - pag = xfs_perag_get(cur->bc_mp, seqno); + pag = cur->bc_ag.agbp->b_pag; pag->pagf_longest = be32_to_cpu(len); - xfs_perag_put(pag); xfs_alloc_log_agf(cur->bc_tp, cur->bc_ag.agbp, XFS_AGF_LONGEST); } @@ -484,7 +480,7 @@ xfs_allocbt_init_common( ASSERT(btnum == XFS_BTNUM_BNO || btnum == XFS_BTNUM_CNT); - cur = kmem_zone_zalloc(xfs_btree_cur_zone, KM_NOFS); + cur = kmem_cache_zalloc(xfs_btree_cur_zone, GFP_NOFS | __GFP_NOFAIL); cur->bc_tp = tp; cur->bc_mp = mp; diff --git a/fs/xfs/libxfs/xfs_attr.c b/fs/xfs/libxfs/xfs_attr.c index 3b1bd6e112f8..2e055c079f39 100644 --- a/fs/xfs/libxfs/xfs_attr.c +++ b/fs/xfs/libxfs/xfs_attr.c @@ -46,6 +46,7 @@ STATIC int xfs_attr_shortform_addname(xfs_da_args_t *args); STATIC int xfs_attr_leaf_get(xfs_da_args_t *args); STATIC int xfs_attr_leaf_addname(xfs_da_args_t *args); STATIC int xfs_attr_leaf_removename(xfs_da_args_t *args); +STATIC int xfs_attr_leaf_hasname(struct xfs_da_args *args, struct xfs_buf **bp); /* * Internal routines when attribute list is more than one block. @@ -53,6 +54,8 @@ STATIC int xfs_attr_leaf_removename(xfs_da_args_t *args); STATIC int xfs_attr_node_get(xfs_da_args_t *args); STATIC int xfs_attr_node_addname(xfs_da_args_t *args); STATIC int xfs_attr_node_removename(xfs_da_args_t *args); +STATIC int xfs_attr_node_hasname(xfs_da_args_t *args, + struct xfs_da_state **state); STATIC int xfs_attr_fillstate(xfs_da_state_t *state); STATIC int xfs_attr_refillstate(xfs_da_state_t *state); @@ -175,8 +178,13 @@ xfs_attr_try_sf_addname( struct xfs_da_args *args) { - struct xfs_mount *mp = dp->i_mount; - int error, error2; + int error; + + /* + * Build initial attribute list (if required). + */ + if (dp->i_afp->if_format == XFS_DINODE_FMT_EXTENTS) + xfs_attr_shortform_create(args); error = xfs_attr_shortform_addname(args); if (error == -ENOSPC) @@ -189,12 +197,70 @@ xfs_attr_try_sf_addname( if (!error && !(args->op_flags & XFS_DA_OP_NOTIME)) xfs_trans_ichgtime(args->trans, dp, XFS_ICHGTIME_CHG); - if (mp->m_flags & XFS_MOUNT_WSYNC) + if (dp->i_mount->m_flags & XFS_MOUNT_WSYNC) xfs_trans_set_sync(args->trans); - error2 = xfs_trans_commit(args->trans); - args->trans = NULL; - return error ? error : error2; + return error; +} + +/* + * Check to see if the attr should be upgraded from non-existent or shortform to + * single-leaf-block attribute list. + */ +static inline bool +xfs_attr_is_shortform( + struct xfs_inode *ip) +{ + return ip->i_afp->if_format == XFS_DINODE_FMT_LOCAL || + (ip->i_afp->if_format == XFS_DINODE_FMT_EXTENTS && + ip->i_afp->if_nextents == 0); +} + +/* + * Attempts to set an attr in shortform, or converts short form to leaf form if + * there is not enough room. If the attr is set, the transaction is committed + * and set to NULL. + */ +STATIC int +xfs_attr_set_shortform( + struct xfs_da_args *args, + struct xfs_buf **leaf_bp) +{ + struct xfs_inode *dp = args->dp; + int error, error2 = 0; + + /* + * Try to add the attr to the attribute list in the inode. + */ + error = xfs_attr_try_sf_addname(dp, args); + if (error != -ENOSPC) { + error2 = xfs_trans_commit(args->trans); + args->trans = NULL; + return error ? error : error2; + } + /* + * It won't fit in the shortform, transform to a leaf block. GROT: + * another possible req'mt for a double-split btree op. + */ + error = xfs_attr_shortform_to_leaf(args, leaf_bp); + if (error) + return error; + + /* + * Prevent the leaf buffer from being unlocked so that a concurrent AIL + * push cannot grab the half-baked leaf buffer and run into problems + * with the write verifier. Once we're done rolling the transaction we + * can release the hold and add the attr to the leaf. + */ + xfs_trans_bhold(args->trans, *leaf_bp); + error = xfs_defer_finish(&args->trans); + xfs_trans_bhold_release(args->trans, *leaf_bp); + if (error) { + xfs_trans_brelse(args->trans, *leaf_bp); + return error; + } + + return 0; } /* @@ -206,61 +272,94 @@ xfs_attr_set_args( { struct xfs_inode *dp = args->dp; struct xfs_buf *leaf_bp = NULL; - int error; + int error = 0; /* - * If the attribute list is non-existent or a shortform list, - * upgrade it to a single-leaf-block attribute list. + * If the attribute list is already in leaf format, jump straight to + * leaf handling. Otherwise, try to add the attribute to the shortform + * list; if there's no room then convert the list to leaf format and try + * again. */ - if (dp->i_afp->if_format == XFS_DINODE_FMT_LOCAL || - (dp->i_afp->if_format == XFS_DINODE_FMT_EXTENTS && - dp->i_afp->if_nextents == 0)) { + if (xfs_attr_is_shortform(dp)) { /* - * Build initial attribute list (if required). + * If the attr was successfully set in shortform, the + * transaction is committed and set to NULL. Otherwise, is it + * converted from shortform to leaf, and the transaction is + * retained. */ - if (dp->i_afp->if_format == XFS_DINODE_FMT_EXTENTS) - xfs_attr_shortform_create(args); + error = xfs_attr_set_shortform(args, &leaf_bp); + if (error || !args->trans) + return error; + } - /* - * Try to add the attr to the attribute list in the inode. - */ - error = xfs_attr_try_sf_addname(dp, args); + if (xfs_bmap_one_block(dp, XFS_ATTR_FORK)) { + error = xfs_attr_leaf_addname(args); if (error != -ENOSPC) return error; /* - * It won't fit in the shortform, transform to a leaf block. - * GROT: another possible req'mt for a double-split btree op. + * Promote the attribute list to the Btree format. */ - error = xfs_attr_shortform_to_leaf(args, &leaf_bp); + error = xfs_attr3_leaf_to_node(args); if (error) return error; /* - * Prevent the leaf buffer from being unlocked so that a - * concurrent AIL push cannot grab the half-baked leaf - * buffer and run into problems with the write verifier. - * Once we're done rolling the transaction we can release - * the hold and add the attr to the leaf. + * Finish any deferred work items and roll the transaction once + * more. The goal here is to call node_addname with the inode + * and transaction in the same state (inode locked and joined, + * transaction clean) no matter how we got to this step. */ - xfs_trans_bhold(args->trans, leaf_bp); error = xfs_defer_finish(&args->trans); - xfs_trans_bhold_release(args->trans, leaf_bp); - if (error) { - xfs_trans_brelse(args->trans, leaf_bp); + if (error) + return error; + + /* + * Commit the current trans (including the inode) and + * start a new one. + */ + error = xfs_trans_roll_inode(&args->trans, dp); + if (error) return error; - } } - if (xfs_bmap_one_block(dp, XFS_ATTR_FORK)) - error = xfs_attr_leaf_addname(args); - else - error = xfs_attr_node_addname(args); + error = xfs_attr_node_addname(args); return error; } /* + * Return EEXIST if attr is found, or ENOATTR if not + */ +int +xfs_has_attr( + struct xfs_da_args *args) +{ + struct xfs_inode *dp = args->dp; + struct xfs_buf *bp = NULL; + int error; + + if (!xfs_inode_hasattr(dp)) + return -ENOATTR; + + if (dp->i_afp->if_format == XFS_DINODE_FMT_LOCAL) { + ASSERT(dp->i_afp->if_flags & XFS_IFINLINE); + return xfs_attr_sf_findname(args, NULL, NULL); + } + + if (xfs_bmap_one_block(dp, XFS_ATTR_FORK)) { + error = xfs_attr_leaf_hasname(args, &bp); + + if (bp) + xfs_trans_brelse(args->trans, bp); + + return error; + } + + return xfs_attr_node_hasname(args, NULL); +} + +/* * Remove the attribute specified in @args. */ int @@ -370,6 +469,15 @@ xfs_attr_set( args->total, 0, quota_flags); if (error) goto out_trans_cancel; + + error = xfs_has_attr(args); + if (error == -EEXIST && (args->attr_flags & XATTR_CREATE)) + goto out_trans_cancel; + if (error == -ENOATTR && (args->attr_flags & XATTR_REPLACE)) + goto out_trans_cancel; + if (error != -ENOATTR && error != -EEXIST) + goto out_trans_cancel; + error = xfs_attr_set_args(args); if (error) goto out_trans_cancel; @@ -377,6 +485,10 @@ xfs_attr_set( if (!args->trans) goto out_unlock; } else { + error = xfs_has_attr(args); + if (error != -EEXIST) + goto out_trans_cancel; + error = xfs_attr_remove_args(args); if (error) goto out_trans_cancel; @@ -459,36 +571,54 @@ xfs_attr_shortform_addname(xfs_da_args_t *args) * External routines when attribute list is one block *========================================================================*/ +/* Store info about a remote block */ +STATIC void +xfs_attr_save_rmt_blk( + struct xfs_da_args *args) +{ + args->blkno2 = args->blkno; + args->index2 = args->index; + args->rmtblkno2 = args->rmtblkno; + args->rmtblkcnt2 = args->rmtblkcnt; + args->rmtvaluelen2 = args->rmtvaluelen; +} + +/* Set stored info about a remote block */ +STATIC void +xfs_attr_restore_rmt_blk( + struct xfs_da_args *args) +{ + args->blkno = args->blkno2; + args->index = args->index2; + args->rmtblkno = args->rmtblkno2; + args->rmtblkcnt = args->rmtblkcnt2; + args->rmtvaluelen = args->rmtvaluelen2; +} + /* - * Add a name to the leaf attribute list structure + * Tries to add an attribute to an inode in leaf form * - * This leaf block cannot have a "remote" value, we only call this routine - * if bmap_one_block() says there is only one block (ie: no remote blks). + * This function is meant to execute as part of a delayed operation and leaves + * the transaction handling to the caller. On success the attribute is added + * and the inode and transaction are left dirty. If there is not enough space, + * the attr data is converted to node format and -ENOSPC is returned. Caller is + * responsible for handling the dirty inode and transaction or adding the attr + * in node format. */ STATIC int -xfs_attr_leaf_addname( - struct xfs_da_args *args) +xfs_attr_leaf_try_add( + struct xfs_da_args *args, + struct xfs_buf *bp) { - struct xfs_inode *dp; - struct xfs_buf *bp; - int retval, error, forkoff; - - trace_xfs_attr_leaf_addname(args); - - /* - * Read the (only) block in the attribute list in. - */ - dp = args->dp; - args->blkno = 0; - error = xfs_attr3_leaf_read(args->trans, args->dp, args->blkno, &bp); - if (error) - return error; + int retval; /* * Look up the given attribute in the leaf block. Figure out if * the given flags produce an error or call for an atomic rename. */ - retval = xfs_attr3_leaf_lookup_int(bp, args); + retval = xfs_attr_leaf_hasname(args, &bp); + if (retval != -ENOATTR && retval != -EEXIST) + return retval; if (retval == -ENOATTR && (args->attr_flags & XATTR_REPLACE)) goto out_brelse; if (retval == -EEXIST) { @@ -499,11 +629,7 @@ xfs_attr_leaf_addname( /* save the attribute state for later removal*/ args->op_flags |= XFS_DA_OP_RENAME; /* an atomic rename */ - args->blkno2 = args->blkno; /* set 2nd entry info*/ - args->index2 = args->index; - args->rmtblkno2 = args->rmtblkno; - args->rmtblkcnt2 = args->rmtblkcnt; - args->rmtvaluelen2 = args->rmtvaluelen; + xfs_attr_save_rmt_blk(args); /* * clear the remote attr state now that it is saved so that the @@ -516,37 +642,35 @@ xfs_attr_leaf_addname( } /* - * Add the attribute to the leaf block, transitioning to a Btree - * if required. + * Add the attribute to the leaf block */ - retval = xfs_attr3_leaf_add(bp, args); - if (retval == -ENOSPC) { - /* - * Promote the attribute list to the Btree format, then - * Commit that transaction so that the node_addname() call - * can manage its own transactions. - */ - error = xfs_attr3_leaf_to_node(args); - if (error) - return error; - error = xfs_defer_finish(&args->trans); - if (error) - return error; + return xfs_attr3_leaf_add(bp, args); - /* - * Commit the current trans (including the inode) and start - * a new one. - */ - error = xfs_trans_roll_inode(&args->trans, dp); - if (error) - return error; +out_brelse: + xfs_trans_brelse(args->trans, bp); + return retval; +} - /* - * Fob the whole rest of the problem off on the Btree code. - */ - error = xfs_attr_node_addname(args); + +/* + * Add a name to the leaf attribute list structure + * + * This leaf block cannot have a "remote" value, we only call this routine + * if bmap_one_block() says there is only one block (ie: no remote blks). + */ +STATIC int +xfs_attr_leaf_addname( + struct xfs_da_args *args) +{ + int error, forkoff; + struct xfs_buf *bp = NULL; + struct xfs_inode *dp = args->dp; + + trace_xfs_attr_leaf_addname(args); + + error = xfs_attr_leaf_try_add(args, bp); + if (error) return error; - } /* * Commit the transaction that added the attr name so that @@ -568,75 +692,93 @@ xfs_attr_leaf_addname( return error; } - /* - * If this is an atomic rename operation, we must "flip" the - * incomplete flags on the "new" and "old" attribute/value pairs - * so that one disappears and one appears atomically. Then we - * must remove the "old" attribute/value pair. - */ - if (args->op_flags & XFS_DA_OP_RENAME) { + if (!(args->op_flags & XFS_DA_OP_RENAME)) { /* - * In a separate transaction, set the incomplete flag on the - * "old" attr and clear the incomplete flag on the "new" attr. + * Added a "remote" value, just clear the incomplete flag. */ - error = xfs_attr3_leaf_flipflags(args); + if (args->rmtblkno > 0) + error = xfs_attr3_leaf_clearflag(args); + + return error; + } + + /* + * If this is an atomic rename operation, we must "flip" the incomplete + * flags on the "new" and "old" attribute/value pairs so that one + * disappears and one appears atomically. Then we must remove the "old" + * attribute/value pair. + * + * In a separate transaction, set the incomplete flag on the "old" attr + * and clear the incomplete flag on the "new" attr. + */ + + error = xfs_attr3_leaf_flipflags(args); + if (error) + return error; + /* + * Commit the flag value change and start the next trans in series. + */ + error = xfs_trans_roll_inode(&args->trans, args->dp); + if (error) + return error; + + /* + * Dismantle the "old" attribute/value pair by removing a "remote" value + * (if it exists). + */ + xfs_attr_restore_rmt_blk(args); + + if (args->rmtblkno) { + error = xfs_attr_rmtval_invalidate(args); if (error) return error; - /* - * Dismantle the "old" attribute/value pair by removing - * a "remote" value (if it exists). - */ - args->index = args->index2; - args->blkno = args->blkno2; - args->rmtblkno = args->rmtblkno2; - args->rmtblkcnt = args->rmtblkcnt2; - args->rmtvaluelen = args->rmtvaluelen2; - if (args->rmtblkno) { - error = xfs_attr_rmtval_remove(args); - if (error) - return error; - } - - /* - * Read in the block containing the "old" attr, then - * remove the "old" attr from that block (neat, huh!) - */ - error = xfs_attr3_leaf_read(args->trans, args->dp, args->blkno, - &bp); + error = xfs_attr_rmtval_remove(args); if (error) return error; + } - xfs_attr3_leaf_remove(bp, args); + /* + * Read in the block containing the "old" attr, then remove the "old" + * attr from that block (neat, huh!) + */ + error = xfs_attr3_leaf_read(args->trans, args->dp, args->blkno, + &bp); + if (error) + return error; - /* - * If the result is small enough, shrink it all into the inode. - */ - if ((forkoff = xfs_attr_shortform_allfit(bp, dp))) { - error = xfs_attr3_leaf_to_shortform(bp, args, forkoff); - /* bp is gone due to xfs_da_shrink_inode */ - if (error) - return error; - error = xfs_defer_finish(&args->trans); - if (error) - return error; - } + xfs_attr3_leaf_remove(bp, args); - /* - * Commit the remove and start the next trans in series. - */ - error = xfs_trans_roll_inode(&args->trans, dp); + /* + * If the result is small enough, shrink it all into the inode. + */ + forkoff = xfs_attr_shortform_allfit(bp, dp); + if (forkoff) + error = xfs_attr3_leaf_to_shortform(bp, args, forkoff); + /* bp is gone due to xfs_da_shrink_inode */ + + return error; +} + +/* + * Return EEXIST if attr is found, or ENOATTR if not + */ +STATIC int +xfs_attr_leaf_hasname( + struct xfs_da_args *args, + struct xfs_buf **bp) +{ + int error = 0; + + error = xfs_attr3_leaf_read(args->trans, args->dp, 0, bp); + if (error) + return error; + + error = xfs_attr3_leaf_lookup_int(*bp, args); + if (error != -ENOATTR && error != -EEXIST) + xfs_trans_brelse(args->trans, *bp); - } else if (args->rmtblkno > 0) { - /* - * Added a "remote" value, just clear the incomplete flag. - */ - error = xfs_attr3_leaf_clearflag(args); - } return error; -out_brelse: - xfs_trans_brelse(args->trans, bp); - return retval; } /* @@ -659,31 +801,25 @@ xfs_attr_leaf_removename( * Remove the attribute. */ dp = args->dp; - args->blkno = 0; - error = xfs_attr3_leaf_read(args->trans, args->dp, args->blkno, &bp); - if (error) - return error; - error = xfs_attr3_leaf_lookup_int(bp, args); + error = xfs_attr_leaf_hasname(args, &bp); + if (error == -ENOATTR) { xfs_trans_brelse(args->trans, bp); return error; - } + } else if (error != -EEXIST) + return error; xfs_attr3_leaf_remove(bp, args); /* * If the result is small enough, shrink it all into the inode. */ - if ((forkoff = xfs_attr_shortform_allfit(bp, dp))) { - error = xfs_attr3_leaf_to_shortform(bp, args, forkoff); + forkoff = xfs_attr_shortform_allfit(bp, dp); + if (forkoff) + return xfs_attr3_leaf_to_shortform(bp, args, forkoff); /* bp is gone due to xfs_da_shrink_inode */ - if (error) - return error; - error = xfs_defer_finish(&args->trans); - if (error) - return error; - } + return 0; } @@ -703,21 +839,53 @@ xfs_attr_leaf_get(xfs_da_args_t *args) trace_xfs_attr_leaf_get(args); - args->blkno = 0; - error = xfs_attr3_leaf_read(args->trans, args->dp, args->blkno, &bp); - if (error) - return error; + error = xfs_attr_leaf_hasname(args, &bp); - error = xfs_attr3_leaf_lookup_int(bp, args); - if (error != -EEXIST) { + if (error == -ENOATTR) { xfs_trans_brelse(args->trans, bp); return error; - } + } else if (error != -EEXIST) + return error; + + error = xfs_attr3_leaf_getvalue(bp, args); xfs_trans_brelse(args->trans, bp); return error; } +/* + * Return EEXIST if attr is found, or ENOATTR if not + * statep: If not null is set to point at the found state. Caller will + * be responsible for freeing the state in this case. + */ +STATIC int +xfs_attr_node_hasname( + struct xfs_da_args *args, + struct xfs_da_state **statep) +{ + struct xfs_da_state *state; + int retval, error; + + state = xfs_da_state_alloc(args); + if (statep != NULL) + *statep = NULL; + + /* + * Search to see if name exists, and get back a pointer to it. + */ + error = xfs_da3_node_lookup_int(state, &retval); + if (error) { + xfs_da_state_free(state); + return error; + } + + if (statep != NULL) + *statep = state; + else + xfs_da_state_free(state); + return retval; +} + /*======================================================================== * External routines when attribute list size > geo->blksize *========================================================================*/ @@ -739,7 +907,6 @@ xfs_attr_node_addname( struct xfs_da_state *state; struct xfs_da_state_blk *blk; struct xfs_inode *dp; - struct xfs_mount *mp; int retval, error; trace_xfs_attr_node_addname(args); @@ -748,19 +915,15 @@ xfs_attr_node_addname( * Fill in bucket of arguments/results/context to carry around. */ dp = args->dp; - mp = dp->i_mount; restart: - state = xfs_da_state_alloc(); - state->args = args; - state->mp = mp; - /* * Search to see if name already exists, and get back a pointer * to where it should go. */ - error = xfs_da3_node_lookup_int(state, &retval); - if (error) + retval = xfs_attr_node_hasname(args, &state); + if (retval != -ENOATTR && retval != -EEXIST) goto out; + blk = &state->path.blk[ state->path.active-1 ]; ASSERT(blk->magic == XFS_ATTR_LEAF_MAGIC); if (retval == -ENOATTR && (args->attr_flags & XATTR_REPLACE)) @@ -773,11 +936,7 @@ restart: /* save the attribute state for later removal*/ args->op_flags |= XFS_DA_OP_RENAME; /* atomic rename op */ - args->blkno2 = args->blkno; /* set 2nd entry info*/ - args->index2 = args->index; - args->rmtblkno2 = args->rmtblkno; - args->rmtblkcnt2 = args->rmtblkcnt; - args->rmtvaluelen2 = args->rmtvaluelen; + xfs_attr_save_rmt_blk(args); /* * clear the remote attr state now that it is saved so that the @@ -863,82 +1022,75 @@ restart: return error; } - /* - * If this is an atomic rename operation, we must "flip" the - * incomplete flags on the "new" and "old" attribute/value pairs - * so that one disappears and one appears atomically. Then we - * must remove the "old" attribute/value pair. - */ - if (args->op_flags & XFS_DA_OP_RENAME) { + if (!(args->op_flags & XFS_DA_OP_RENAME)) { /* - * In a separate transaction, set the incomplete flag on the - * "old" attr and clear the incomplete flag on the "new" attr. + * Added a "remote" value, just clear the incomplete flag. */ - error = xfs_attr3_leaf_flipflags(args); - if (error) - goto out; + if (args->rmtblkno > 0) + error = xfs_attr3_leaf_clearflag(args); + retval = error; + goto out; + } - /* - * Dismantle the "old" attribute/value pair by removing - * a "remote" value (if it exists). - */ - args->index = args->index2; - args->blkno = args->blkno2; - args->rmtblkno = args->rmtblkno2; - args->rmtblkcnt = args->rmtblkcnt2; - args->rmtvaluelen = args->rmtvaluelen2; - if (args->rmtblkno) { - error = xfs_attr_rmtval_remove(args); - if (error) - return error; - } + /* + * If this is an atomic rename operation, we must "flip" the incomplete + * flags on the "new" and "old" attribute/value pairs so that one + * disappears and one appears atomically. Then we must remove the "old" + * attribute/value pair. + * + * In a separate transaction, set the incomplete flag on the "old" attr + * and clear the incomplete flag on the "new" attr. + */ + error = xfs_attr3_leaf_flipflags(args); + if (error) + goto out; + /* + * Commit the flag value change and start the next trans in series + */ + error = xfs_trans_roll_inode(&args->trans, args->dp); + if (error) + goto out; - /* - * Re-find the "old" attribute entry after any split ops. - * The INCOMPLETE flag means that we will find the "old" - * attr, not the "new" one. - */ - args->attr_filter |= XFS_ATTR_INCOMPLETE; - state = xfs_da_state_alloc(); - state->args = args; - state->mp = mp; - state->inleaf = 0; - error = xfs_da3_node_lookup_int(state, &retval); + /* + * Dismantle the "old" attribute/value pair by removing a "remote" value + * (if it exists). + */ + xfs_attr_restore_rmt_blk(args); + + if (args->rmtblkno) { + error = xfs_attr_rmtval_invalidate(args); if (error) - goto out; + return error; - /* - * Remove the name and update the hashvals in the tree. - */ - blk = &state->path.blk[ state->path.active-1 ]; - ASSERT(blk->magic == XFS_ATTR_LEAF_MAGIC); - error = xfs_attr3_leaf_remove(blk->bp, args); - xfs_da3_fixhashpath(state, &state->path); + error = xfs_attr_rmtval_remove(args); + if (error) + return error; + } - /* - * Check to see if the tree needs to be collapsed. - */ - if (retval && (state->path.active > 1)) { - error = xfs_da3_join(state); - if (error) - goto out; - error = xfs_defer_finish(&args->trans); - if (error) - goto out; - } + /* + * Re-find the "old" attribute entry after any split ops. The INCOMPLETE + * flag means that we will find the "old" attr, not the "new" one. + */ + args->attr_filter |= XFS_ATTR_INCOMPLETE; + state = xfs_da_state_alloc(args); + state->inleaf = 0; + error = xfs_da3_node_lookup_int(state, &retval); + if (error) + goto out; - /* - * Commit and start the next trans in the chain. - */ - error = xfs_trans_roll_inode(&args->trans, dp); - if (error) - goto out; + /* + * Remove the name and update the hashvals in the tree. + */ + blk = &state->path.blk[state->path.active-1]; + ASSERT(blk->magic == XFS_ATTR_LEAF_MAGIC); + error = xfs_attr3_leaf_remove(blk->bp, args); + xfs_da3_fixhashpath(state, &state->path); - } else if (args->rmtblkno > 0) { - /* - * Added a "remote" value, just clear the incomplete flag. - */ - error = xfs_attr3_leaf_clearflag(args); + /* + * Check to see if the tree needs to be collapsed. + */ + if (retval && (state->path.active > 1)) { + error = xfs_da3_join(state); if (error) goto out; } @@ -953,6 +1105,114 @@ out: } /* + * Shrink an attribute from leaf to shortform + */ +STATIC int +xfs_attr_node_shrink( + struct xfs_da_args *args, + struct xfs_da_state *state) +{ + struct xfs_inode *dp = args->dp; + int error, forkoff; + struct xfs_buf *bp; + + /* + * Have to get rid of the copy of this dabuf in the state. + */ + ASSERT(state->path.active == 1); + ASSERT(state->path.blk[0].bp); + state->path.blk[0].bp = NULL; + + error = xfs_attr3_leaf_read(args->trans, args->dp, 0, &bp); + if (error) + return error; + + forkoff = xfs_attr_shortform_allfit(bp, dp); + if (forkoff) { + error = xfs_attr3_leaf_to_shortform(bp, args, forkoff); + /* bp is gone due to xfs_da_shrink_inode */ + } else + xfs_trans_brelse(args->trans, bp); + + return error; +} + +/* + * Mark an attribute entry INCOMPLETE and save pointers to the relevant buffers + * for later deletion of the entry. + */ +STATIC int +xfs_attr_leaf_mark_incomplete( + struct xfs_da_args *args, + struct xfs_da_state *state) +{ + int error; + + /* + * Fill in disk block numbers in the state structure + * so that we can get the buffers back after we commit + * several transactions in the following calls. + */ + error = xfs_attr_fillstate(state); + if (error) + return error; + + /* + * Mark the attribute as INCOMPLETE + */ + return xfs_attr3_leaf_setflag(args); +} + +/* + * Initial setup for xfs_attr_node_removename. Make sure the attr is there and + * the blocks are valid. Attr keys with remote blocks will be marked + * incomplete. + */ +STATIC +int xfs_attr_node_removename_setup( + struct xfs_da_args *args, + struct xfs_da_state **state) +{ + int error; + + error = xfs_attr_node_hasname(args, state); + if (error != -EEXIST) + return error; + + ASSERT((*state)->path.blk[(*state)->path.active - 1].bp != NULL); + ASSERT((*state)->path.blk[(*state)->path.active - 1].magic == + XFS_ATTR_LEAF_MAGIC); + + if (args->rmtblkno > 0) { + error = xfs_attr_leaf_mark_incomplete(args, *state); + if (error) + return error; + + return xfs_attr_rmtval_invalidate(args); + } + + return 0; +} + +STATIC int +xfs_attr_node_remove_rmt( + struct xfs_da_args *args, + struct xfs_da_state *state) +{ + int error = 0; + + error = xfs_attr_rmtval_remove(args); + if (error) + return error; + + /* + * Refill the state structure with buffers, the prior calls released our + * buffers. + */ + return xfs_attr_refillstate(state); +} + +/* * Remove a name from a B-tree attribute list. * * This will involve walking down the Btree, and may involve joining @@ -965,64 +1225,22 @@ xfs_attr_node_removename( { struct xfs_da_state *state; struct xfs_da_state_blk *blk; - struct xfs_inode *dp; - struct xfs_buf *bp; - int retval, error, forkoff; + int retval, error; + struct xfs_inode *dp = args->dp; trace_xfs_attr_node_removename(args); - /* - * Tie a string around our finger to remind us where we are. - */ - dp = args->dp; - state = xfs_da_state_alloc(); - state->args = args; - state->mp = dp->i_mount; - - /* - * Search to see if name exists, and get back a pointer to it. - */ - error = xfs_da3_node_lookup_int(state, &retval); - if (error || (retval != -EEXIST)) { - if (error == 0) - error = retval; + error = xfs_attr_node_removename_setup(args, &state); + if (error) goto out; - } /* * If there is an out-of-line value, de-allocate the blocks. * This is done before we remove the attribute so that we don't * overflow the maximum size of a transaction and/or hit a deadlock. */ - blk = &state->path.blk[ state->path.active-1 ]; - ASSERT(blk->bp != NULL); - ASSERT(blk->magic == XFS_ATTR_LEAF_MAGIC); if (args->rmtblkno > 0) { - /* - * Fill in disk block numbers in the state structure - * so that we can get the buffers back after we commit - * several transactions in the following calls. - */ - error = xfs_attr_fillstate(state); - if (error) - goto out; - - /* - * Mark the attribute as INCOMPLETE, then bunmapi() the - * remote value. - */ - error = xfs_attr3_leaf_setflag(args); - if (error) - goto out; - error = xfs_attr_rmtval_remove(args); - if (error) - goto out; - - /* - * Refill the state structure with buffers, the prior calls - * released our buffers. - */ - error = xfs_attr_refillstate(state); + error = xfs_attr_node_remove_rmt(args, state); if (error) goto out; } @@ -1056,33 +1274,12 @@ xfs_attr_node_removename( /* * If the result is small enough, push it all into the inode. */ - if (xfs_bmap_one_block(dp, XFS_ATTR_FORK)) { - /* - * Have to get rid of the copy of this dabuf in the state. - */ - ASSERT(state->path.active == 1); - ASSERT(state->path.blk[0].bp); - state->path.blk[0].bp = NULL; - - error = xfs_attr3_leaf_read(args->trans, args->dp, 0, &bp); - if (error) - goto out; - - if ((forkoff = xfs_attr_shortform_allfit(bp, dp))) { - error = xfs_attr3_leaf_to_shortform(bp, args, forkoff); - /* bp is gone due to xfs_da_shrink_inode */ - if (error) - goto out; - error = xfs_defer_finish(&args->trans); - if (error) - goto out; - } else - xfs_trans_brelse(args->trans, bp); - } - error = 0; + if (xfs_bmap_one_block(dp, XFS_ATTR_FORK)) + error = xfs_attr_node_shrink(args, state); out: - xfs_da_state_free(state); + if (state) + xfs_da_state_free(state); return error; } @@ -1198,47 +1395,41 @@ xfs_attr_refillstate(xfs_da_state_t *state) * Returns 0 on successful retrieval, otherwise an error. */ STATIC int -xfs_attr_node_get(xfs_da_args_t *args) +xfs_attr_node_get( + struct xfs_da_args *args) { - xfs_da_state_t *state; - xfs_da_state_blk_t *blk; - int error, retval; - int i; + struct xfs_da_state *state; + struct xfs_da_state_blk *blk; + int i; + int error; trace_xfs_attr_node_get(args); - state = xfs_da_state_alloc(); - state->args = args; - state->mp = args->dp->i_mount; - /* * Search to see if name exists, and get back a pointer to it. */ - error = xfs_da3_node_lookup_int(state, &retval); - if (error) { - retval = error; - goto out_release; - } - if (retval != -EEXIST) + error = xfs_attr_node_hasname(args, &state); + if (error != -EEXIST) goto out_release; /* * Get the value, local or "remote" */ blk = &state->path.blk[state->path.active - 1]; - retval = xfs_attr3_leaf_getvalue(blk->bp, args); + error = xfs_attr3_leaf_getvalue(blk->bp, args); /* * If not in a transaction, we have to release all the buffers. */ out_release: - for (i = 0; i < state->path.active; i++) { + for (i = 0; state != NULL && i < state->path.active; i++) { xfs_trans_brelse(args->trans, state->path.blk[i].bp); state->path.blk[i].bp = NULL; } - xfs_da_state_free(state); - return retval; + if (state) + xfs_da_state_free(state); + return error; } /* Returns true if the attribute entry name is valid. */ diff --git a/fs/xfs/libxfs/xfs_attr.h b/fs/xfs/libxfs/xfs_attr.h index db4717657ca1..3e97a935e712 100644 --- a/fs/xfs/libxfs/xfs_attr.h +++ b/fs/xfs/libxfs/xfs_attr.h @@ -89,6 +89,7 @@ int xfs_attr_get_ilocked(struct xfs_da_args *args); int xfs_attr_get(struct xfs_da_args *args); int xfs_attr_set(struct xfs_da_args *args); int xfs_attr_set_args(struct xfs_da_args *args); +int xfs_has_attr(struct xfs_da_args *args); int xfs_attr_remove_args(struct xfs_da_args *args); bool xfs_attr_namecheck(const void *name, size_t length); diff --git a/fs/xfs/libxfs/xfs_attr_leaf.c b/fs/xfs/libxfs/xfs_attr_leaf.c index 2f7e89e4be3e..305d4bc07337 100644 --- a/fs/xfs/libxfs/xfs_attr_leaf.c +++ b/fs/xfs/libxfs/xfs_attr_leaf.c @@ -653,25 +653,72 @@ xfs_attr_shortform_create( ASSERT(ifp->if_flags & XFS_IFINLINE); } xfs_idata_realloc(dp, sizeof(*hdr), XFS_ATTR_FORK); - hdr = (xfs_attr_sf_hdr_t *)ifp->if_u1.if_data; - hdr->count = 0; + hdr = (struct xfs_attr_sf_hdr *)ifp->if_u1.if_data; + memset(hdr, 0, sizeof(*hdr)); hdr->totsize = cpu_to_be16(sizeof(*hdr)); xfs_trans_log_inode(args->trans, dp, XFS_ILOG_CORE | XFS_ILOG_ADATA); } /* + * Return -EEXIST if attr is found, or -ENOATTR if not + * args: args containing attribute name and namelen + * sfep: If not null, pointer will be set to the last attr entry found on + -EEXIST. On -ENOATTR pointer is left at the last entry in the list + * basep: If not null, pointer is set to the byte offset of the entry in the + * list on -EEXIST. On -ENOATTR, pointer is left at the byte offset of + * the last entry in the list + */ +int +xfs_attr_sf_findname( + struct xfs_da_args *args, + struct xfs_attr_sf_entry **sfep, + unsigned int *basep) +{ + struct xfs_attr_shortform *sf; + struct xfs_attr_sf_entry *sfe; + unsigned int base = sizeof(struct xfs_attr_sf_hdr); + int size = 0; + int end; + int i; + + sf = (struct xfs_attr_shortform *)args->dp->i_afp->if_u1.if_data; + sfe = &sf->list[0]; + end = sf->hdr.count; + for (i = 0; i < end; sfe = XFS_ATTR_SF_NEXTENTRY(sfe), + base += size, i++) { + size = XFS_ATTR_SF_ENTSIZE(sfe); + if (!xfs_attr_match(args, sfe->namelen, sfe->nameval, + sfe->flags)) + continue; + break; + } + + if (sfep != NULL) + *sfep = sfe; + + if (basep != NULL) + *basep = base; + + if (i == end) + return -ENOATTR; + return -EEXIST; +} + +/* * Add a name/value pair to the shortform attribute list. * Overflow from the inode has already been checked for. */ void -xfs_attr_shortform_add(xfs_da_args_t *args, int forkoff) +xfs_attr_shortform_add( + struct xfs_da_args *args, + int forkoff) { - xfs_attr_shortform_t *sf; - xfs_attr_sf_entry_t *sfe; - int i, offset, size; - xfs_mount_t *mp; - xfs_inode_t *dp; - struct xfs_ifork *ifp; + struct xfs_attr_shortform *sf; + struct xfs_attr_sf_entry *sfe; + int offset, size; + struct xfs_mount *mp; + struct xfs_inode *dp; + struct xfs_ifork *ifp; trace_xfs_attr_sf_add(args); @@ -682,11 +729,8 @@ xfs_attr_shortform_add(xfs_da_args_t *args, int forkoff) ifp = dp->i_afp; ASSERT(ifp->if_flags & XFS_IFINLINE); sf = (xfs_attr_shortform_t *)ifp->if_u1.if_data; - sfe = &sf->list[0]; - for (i = 0; i < sf->hdr.count; sfe = XFS_ATTR_SF_NEXTENTRY(sfe), i++) { - ASSERT(!xfs_attr_match(args, sfe->namelen, sfe->nameval, - sfe->flags)); - } + if (xfs_attr_sf_findname(args, &sfe, NULL) == -EEXIST) + ASSERT(0); offset = (char *)sfe - (char *)sf; size = XFS_ATTR_SF_ENTSIZE_BYNAME(args->namelen, args->valuelen); @@ -728,31 +772,27 @@ xfs_attr_fork_remove( * Remove an attribute from the shortform attribute list structure. */ int -xfs_attr_shortform_remove(xfs_da_args_t *args) +xfs_attr_shortform_remove( + struct xfs_da_args *args) { - xfs_attr_shortform_t *sf; - xfs_attr_sf_entry_t *sfe; - int base, size=0, end, totsize, i; - xfs_mount_t *mp; - xfs_inode_t *dp; + struct xfs_attr_shortform *sf; + struct xfs_attr_sf_entry *sfe; + int size = 0, end, totsize; + unsigned int base; + struct xfs_mount *mp; + struct xfs_inode *dp; + int error; trace_xfs_attr_sf_remove(args); dp = args->dp; mp = dp->i_mount; - base = sizeof(xfs_attr_sf_hdr_t); sf = (xfs_attr_shortform_t *)dp->i_afp->if_u1.if_data; - sfe = &sf->list[0]; - end = sf->hdr.count; - for (i = 0; i < end; sfe = XFS_ATTR_SF_NEXTENTRY(sfe), - base += size, i++) { - size = XFS_ATTR_SF_ENTSIZE(sfe); - if (xfs_attr_match(args, sfe->namelen, sfe->nameval, - sfe->flags)) - break; - } - if (i == end) - return -ENOATTR; + + error = xfs_attr_sf_findname(args, &sfe, &base); + if (error != -EEXIST) + return error; + size = XFS_ATTR_SF_ENTSIZE(sfe); /* * Fix up the attribute fork data, covering the hole @@ -996,8 +1036,10 @@ xfs_attr_shortform_verify( * struct xfs_attr_sf_entry has a variable length. * Check the fixed-offset parts of the structure are * within the data buffer. + * xfs_attr_sf_entry is defined with a 1-byte variable + * array at the end, so we must subtract that off. */ - if (((char *)sfep + sizeof(*sfep)) >= endp) + if (((char *)sfep + sizeof(*sfep) - 1) >= endp) return __this_address; /* Don't allow names with known bad length. */ @@ -2742,10 +2784,7 @@ xfs_attr3_leaf_clearflag( XFS_DA_LOGRANGE(leaf, name_rmt, sizeof(*name_rmt))); } - /* - * Commit the flag value change and start the next trans in series. - */ - return xfs_trans_roll_inode(&args->trans, args->dp); + return 0; } /* @@ -2793,10 +2832,7 @@ xfs_attr3_leaf_setflag( XFS_DA_LOGRANGE(leaf, name_rmt, sizeof(*name_rmt))); } - /* - * Commit the flag value change and start the next trans in series. - */ - return xfs_trans_roll_inode(&args->trans, args->dp); + return 0; } /* @@ -2911,10 +2947,5 @@ xfs_attr3_leaf_flipflags( XFS_DA_LOGRANGE(leaf2, name_rmt, sizeof(*name_rmt))); } - /* - * Commit the flag value change and start the next trans in series. - */ - error = xfs_trans_roll_inode(&args->trans, args->dp); - - return error; + return 0; } diff --git a/fs/xfs/libxfs/xfs_attr_leaf.h b/fs/xfs/libxfs/xfs_attr_leaf.h index 5be6be309302..9b1c59f40a26 100644 --- a/fs/xfs/libxfs/xfs_attr_leaf.h +++ b/fs/xfs/libxfs/xfs_attr_leaf.h @@ -52,6 +52,9 @@ int xfs_attr_shortform_getvalue(struct xfs_da_args *args); int xfs_attr_shortform_to_leaf(struct xfs_da_args *args, struct xfs_buf **leaf_bp); int xfs_attr_shortform_remove(struct xfs_da_args *args); +int xfs_attr_sf_findname(struct xfs_da_args *args, + struct xfs_attr_sf_entry **sfep, + unsigned int *basep); int xfs_attr_shortform_allfit(struct xfs_buf *bp, struct xfs_inode *dp); int xfs_attr_shortform_bytesfit(struct xfs_inode *dp, int bytes); xfs_failaddr_t xfs_attr_shortform_verify(struct xfs_inode *ip); diff --git a/fs/xfs/libxfs/xfs_attr_remote.c b/fs/xfs/libxfs/xfs_attr_remote.c index 01ad7f353e08..3f80cede7406 100644 --- a/fs/xfs/libxfs/xfs_attr_remote.c +++ b/fs/xfs/libxfs/xfs_attr_remote.c @@ -440,32 +440,23 @@ xfs_attr_rmtval_get( } /* - * Write the value associated with an attribute into the out-of-line buffer - * that we have defined for it. + * Find a "hole" in the attribute address space large enough for us to drop the + * new attribute's value into */ -int -xfs_attr_rmtval_set( +STATIC int +xfs_attr_rmt_find_hole( struct xfs_da_args *args) { struct xfs_inode *dp = args->dp; struct xfs_mount *mp = dp->i_mount; - struct xfs_bmbt_irec map; - xfs_dablk_t lblkno; - xfs_fileoff_t lfileoff = 0; - uint8_t *src = args->value; - int blkcnt; - int valuelen; - int nmap; int error; - int offset = 0; - - trace_xfs_attr_rmtval_set(args); + int blkcnt; + xfs_fileoff_t lfileoff = 0; /* - * Find a "hole" in the attribute address space large enough for - * us to drop the new attribute's value into. Because CRC enable - * attributes have headers, we can't just do a straight byte to FSB - * conversion and have to take the header space into account. + * Because CRC enable attributes have headers, we can't just do a + * straight byte to FSB conversion and have to take the header space + * into account. */ blkcnt = xfs_attr3_rmt_blocks(mp, args->rmtvaluelen); error = xfs_bmap_first_unused(args->trans, args->dp, blkcnt, &lfileoff, @@ -473,48 +464,26 @@ xfs_attr_rmtval_set( if (error) return error; - args->rmtblkno = lblkno = (xfs_dablk_t)lfileoff; + args->rmtblkno = (xfs_dablk_t)lfileoff; args->rmtblkcnt = blkcnt; - /* - * Roll through the "value", allocating blocks on disk as required. - */ - while (blkcnt > 0) { - /* - * Allocate a single extent, up to the size of the value. - * - * Note that we have to consider this a data allocation as we - * write the remote attribute without logging the contents. - * Hence we must ensure that we aren't using blocks that are on - * the busy list so that we don't overwrite blocks which have - * recently been freed but their transactions are not yet - * committed to disk. If we overwrite the contents of a busy - * extent and then crash then the block may not contain the - * correct metadata after log recovery occurs. - */ - nmap = 1; - error = xfs_bmapi_write(args->trans, dp, (xfs_fileoff_t)lblkno, - blkcnt, XFS_BMAPI_ATTRFORK, args->total, &map, - &nmap); - if (error) - return error; - error = xfs_defer_finish(&args->trans); - if (error) - return error; - - ASSERT(nmap == 1); - ASSERT((map.br_startblock != DELAYSTARTBLOCK) && - (map.br_startblock != HOLESTARTBLOCK)); - lblkno += map.br_blockcount; - blkcnt -= map.br_blockcount; + return 0; +} - /* - * Start the next trans in the chain. - */ - error = xfs_trans_roll_inode(&args->trans, dp); - if (error) - return error; - } +STATIC int +xfs_attr_rmtval_set_value( + struct xfs_da_args *args) +{ + struct xfs_inode *dp = args->dp; + struct xfs_mount *mp = dp->i_mount; + struct xfs_bmbt_irec map; + xfs_dablk_t lblkno; + uint8_t *src = args->value; + int blkcnt; + int valuelen; + int nmap; + int error; + int offset = 0; /* * Roll through the "value", copying the attribute value to the @@ -595,19 +564,82 @@ xfs_attr_rmtval_stale( } /* + * Write the value associated with an attribute into the out-of-line buffer + * that we have defined for it. + */ +int +xfs_attr_rmtval_set( + struct xfs_da_args *args) +{ + struct xfs_inode *dp = args->dp; + struct xfs_bmbt_irec map; + xfs_dablk_t lblkno; + int blkcnt; + int nmap; + int error; + + trace_xfs_attr_rmtval_set(args); + + error = xfs_attr_rmt_find_hole(args); + if (error) + return error; + + blkcnt = args->rmtblkcnt; + lblkno = (xfs_dablk_t)args->rmtblkno; + /* + * Roll through the "value", allocating blocks on disk as required. + */ + while (blkcnt > 0) { + /* + * Allocate a single extent, up to the size of the value. + * + * Note that we have to consider this a data allocation as we + * write the remote attribute without logging the contents. + * Hence we must ensure that we aren't using blocks that are on + * the busy list so that we don't overwrite blocks which have + * recently been freed but their transactions are not yet + * committed to disk. If we overwrite the contents of a busy + * extent and then crash then the block may not contain the + * correct metadata after log recovery occurs. + */ + nmap = 1; + error = xfs_bmapi_write(args->trans, dp, (xfs_fileoff_t)lblkno, + blkcnt, XFS_BMAPI_ATTRFORK, args->total, &map, + &nmap); + if (error) + return error; + error = xfs_defer_finish(&args->trans); + if (error) + return error; + + ASSERT(nmap == 1); + ASSERT((map.br_startblock != DELAYSTARTBLOCK) && + (map.br_startblock != HOLESTARTBLOCK)); + lblkno += map.br_blockcount; + blkcnt -= map.br_blockcount; + + /* + * Start the next trans in the chain. + */ + error = xfs_trans_roll_inode(&args->trans, dp); + if (error) + return error; + } + + return xfs_attr_rmtval_set_value(args); +} + +/* * Remove the value associated with an attribute by deleting the * out-of-line buffer that it is stored on. */ int -xfs_attr_rmtval_remove( +xfs_attr_rmtval_invalidate( struct xfs_da_args *args) { xfs_dablk_t lblkno; int blkcnt; int error; - int done; - - trace_xfs_attr_rmtval_remove(args); /* * Roll through the "value", invalidating the attribute value's blocks. @@ -635,21 +667,29 @@ xfs_attr_rmtval_remove( lblkno += map.br_blockcount; blkcnt -= map.br_blockcount; } + return 0; +} + +/* + * Remove the value associated with an attribute by deleting the + * out-of-line buffer that it is stored on. + */ +int +xfs_attr_rmtval_remove( + struct xfs_da_args *args) +{ + int error; + int retval; + + trace_xfs_attr_rmtval_remove(args); /* * Keep de-allocating extents until the remote-value region is gone. */ - lblkno = args->rmtblkno; - blkcnt = args->rmtblkcnt; - done = 0; - while (!done) { - error = xfs_bunmapi(args->trans, args->dp, lblkno, blkcnt, - XFS_BMAPI_ATTRFORK, 1, &done); - if (error) - return error; - error = xfs_defer_finish(&args->trans); - if (error) - return error; + do { + retval = __xfs_attr_rmtval_remove(args); + if (retval && retval != -EAGAIN) + return retval; /* * Close out trans and start the next one in the chain. @@ -657,6 +697,36 @@ xfs_attr_rmtval_remove( error = xfs_trans_roll_inode(&args->trans, args->dp); if (error) return error; - } + } while (retval == -EAGAIN); + return 0; } + +/* + * Remove the value associated with an attribute by deleting the out-of-line + * buffer that it is stored on. Returns EAGAIN for the caller to refresh the + * transaction and re-call the function + */ +int +__xfs_attr_rmtval_remove( + struct xfs_da_args *args) +{ + int error, done; + + /* + * Unmap value blocks for this attr. + */ + error = xfs_bunmapi(args->trans, args->dp, args->rmtblkno, + args->rmtblkcnt, XFS_BMAPI_ATTRFORK, 1, &done); + if (error) + return error; + + error = xfs_defer_finish(&args->trans); + if (error) + return error; + + if (!done) + return -EAGAIN; + + return error; +} diff --git a/fs/xfs/libxfs/xfs_attr_remote.h b/fs/xfs/libxfs/xfs_attr_remote.h index e1144f22b005..9eee615da156 100644 --- a/fs/xfs/libxfs/xfs_attr_remote.h +++ b/fs/xfs/libxfs/xfs_attr_remote.h @@ -13,5 +13,6 @@ int xfs_attr_rmtval_set(struct xfs_da_args *args); int xfs_attr_rmtval_remove(struct xfs_da_args *args); int xfs_attr_rmtval_stale(struct xfs_inode *ip, struct xfs_bmbt_irec *map, xfs_buf_flags_t incore_flags); - +int xfs_attr_rmtval_invalidate(struct xfs_da_args *args); +int __xfs_attr_rmtval_remove(struct xfs_da_args *args); #endif /* __XFS_ATTR_REMOTE_H__ */ diff --git a/fs/xfs/libxfs/xfs_bmap.c b/fs/xfs/libxfs/xfs_bmap.c index 667cdd0dfdf4..1b0a01b06a05 100644 --- a/fs/xfs/libxfs/xfs_bmap.c +++ b/fs/xfs/libxfs/xfs_bmap.c @@ -553,7 +553,8 @@ __xfs_bmap_add_free( #endif ASSERT(xfs_bmap_free_item_zone != NULL); - new = kmem_zone_alloc(xfs_bmap_free_item_zone, 0); + new = kmem_cache_alloc(xfs_bmap_free_item_zone, + GFP_KERNEL | __GFP_NOFAIL); new->xefi_startblock = bno; new->xefi_blockcount = (xfs_extlen_t)len; if (oinfo) @@ -1098,7 +1099,10 @@ xfs_bmap_add_attrfork( if (error) goto trans_cancel; ASSERT(ip->i_afp == NULL); - ip->i_afp = kmem_zone_zalloc(xfs_ifork_zone, 0); + + ip->i_afp = kmem_cache_zalloc(xfs_ifork_zone, + GFP_KERNEL | __GFP_NOFAIL); + ip->i_afp->if_format = XFS_DINODE_FMT_EXTENTS; ip->i_afp->if_flags = XFS_IFEXTENTS; logflags = 0; @@ -6222,7 +6226,7 @@ xfs_bmap_validate_extent( isrt = XFS_IS_REALTIME_INODE(ip); endfsb = irec->br_startblock + irec->br_blockcount - 1; - if (isrt) { + if (isrt && whichfork == XFS_DATA_FORK) { if (!xfs_verify_rtbno(mp, irec->br_startblock)) return __this_address; if (!xfs_verify_rtbno(mp, endfsb)) diff --git a/fs/xfs/libxfs/xfs_bmap.h b/fs/xfs/libxfs/xfs_bmap.h index 6028a3c825ba..e1bd484e5548 100644 --- a/fs/xfs/libxfs/xfs_bmap.h +++ b/fs/xfs/libxfs/xfs_bmap.h @@ -158,17 +158,22 @@ static inline int xfs_bmapi_whichfork(int bmapi_flags) { BMAP_ATTRFORK, "ATTR" }, \ { BMAP_COWFORK, "COW" } +/* Return true if the extent is an allocated extent, written or not. */ +static inline bool xfs_bmap_is_real_extent(struct xfs_bmbt_irec *irec) +{ + return irec->br_startblock != HOLESTARTBLOCK && + irec->br_startblock != DELAYSTARTBLOCK && + !isnullstartblock(irec->br_startblock); +} /* * Return true if the extent is a real, allocated extent, or false if it is a * delayed allocation, and unwritten extent or a hole. */ -static inline bool xfs_bmap_is_real_extent(struct xfs_bmbt_irec *irec) +static inline bool xfs_bmap_is_written_extent(struct xfs_bmbt_irec *irec) { - return irec->br_state != XFS_EXT_UNWRITTEN && - irec->br_startblock != HOLESTARTBLOCK && - irec->br_startblock != DELAYSTARTBLOCK && - !isnullstartblock(irec->br_startblock); + return xfs_bmap_is_real_extent(irec) && + irec->br_state != XFS_EXT_UNWRITTEN; } /* diff --git a/fs/xfs/libxfs/xfs_bmap_btree.c b/fs/xfs/libxfs/xfs_bmap_btree.c index d9c63f17d2de..ecec604e6e4d 100644 --- a/fs/xfs/libxfs/xfs_bmap_btree.c +++ b/fs/xfs/libxfs/xfs_bmap_btree.c @@ -552,7 +552,7 @@ xfs_bmbt_init_cursor( struct xfs_btree_cur *cur; ASSERT(whichfork != XFS_COW_FORK); - cur = kmem_zone_zalloc(xfs_btree_cur_zone, KM_NOFS); + cur = kmem_cache_zalloc(xfs_btree_cur_zone, GFP_NOFS | __GFP_NOFAIL); cur->bc_tp = tp; cur->bc_mp = mp; diff --git a/fs/xfs/libxfs/xfs_btree_staging.h b/fs/xfs/libxfs/xfs_btree_staging.h index 643f0f9b2994..f0d2976050ae 100644 --- a/fs/xfs/libxfs/xfs_btree_staging.h +++ b/fs/xfs/libxfs/xfs_btree_staging.h @@ -18,7 +18,7 @@ struct xbtree_afakeroot { unsigned int af_blocks; }; -/* Cursor interactions with with fake roots for AG-rooted btrees. */ +/* Cursor interactions with fake roots for AG-rooted btrees. */ void xfs_btree_stage_afakeroot(struct xfs_btree_cur *cur, struct xbtree_afakeroot *afake); void xfs_btree_commit_afakeroot(struct xfs_btree_cur *cur, struct xfs_trans *tp, @@ -45,7 +45,7 @@ struct xbtree_ifakeroot { unsigned int if_extents; }; -/* Cursor interactions with with fake roots for inode-rooted btrees. */ +/* Cursor interactions with fake roots for inode-rooted btrees. */ void xfs_btree_stage_ifakeroot(struct xfs_btree_cur *cur, struct xbtree_ifakeroot *ifake, struct xfs_btree_ops **new_ops); @@ -90,7 +90,7 @@ struct xfs_btree_bload { /* * Number of free records to leave in each leaf block. If the caller - * sets this to -1, the slack value will be calculated to be be halfway + * sets this to -1, the slack value will be calculated to be halfway * between maxrecs and minrecs. This typically leaves the block 75% * full. Note that slack values are not enforced on inode root blocks. */ diff --git a/fs/xfs/libxfs/xfs_da_btree.c b/fs/xfs/libxfs/xfs_da_btree.c index 897749c41f36..e46bc03365db 100644 --- a/fs/xfs/libxfs/xfs_da_btree.c +++ b/fs/xfs/libxfs/xfs_da_btree.c @@ -78,10 +78,16 @@ kmem_zone_t *xfs_da_state_zone; /* anchor for state struct zone */ * Allocate a dir-state structure. * We don't put them on the stack since they're large. */ -xfs_da_state_t * -xfs_da_state_alloc(void) +struct xfs_da_state * +xfs_da_state_alloc( + struct xfs_da_args *args) { - return kmem_zone_zalloc(xfs_da_state_zone, KM_NOFS); + struct xfs_da_state *state; + + state = kmem_cache_zalloc(xfs_da_state_zone, GFP_NOFS | __GFP_NOFAIL); + state->args = args; + state->mp = args->dp->i_mount; + return state; } /* diff --git a/fs/xfs/libxfs/xfs_da_btree.h b/fs/xfs/libxfs/xfs_da_btree.h index 6e25de6621e4..ad5dd324631a 100644 --- a/fs/xfs/libxfs/xfs_da_btree.h +++ b/fs/xfs/libxfs/xfs_da_btree.h @@ -219,7 +219,7 @@ enum xfs_dacmp xfs_da_compname(struct xfs_da_args *args, const unsigned char *name, int len); -xfs_da_state_t *xfs_da_state_alloc(void); +struct xfs_da_state *xfs_da_state_alloc(struct xfs_da_args *args); void xfs_da_state_free(xfs_da_state_t *state); void xfs_da3_node_hdr_from_disk(struct xfs_mount *mp, diff --git a/fs/xfs/libxfs/xfs_dir2_node.c b/fs/xfs/libxfs/xfs_dir2_node.c index 6ac4aad98cd7..5d51265d29d6 100644 --- a/fs/xfs/libxfs/xfs_dir2_node.c +++ b/fs/xfs/libxfs/xfs_dir2_node.c @@ -2015,9 +2015,7 @@ xfs_dir2_node_addname( /* * Allocate and initialize the state (btree cursor). */ - state = xfs_da_state_alloc(); - state->args = args; - state->mp = args->dp->i_mount; + state = xfs_da_state_alloc(args); /* * Look up the name. We're not supposed to find it, but * this gives us the insertion point. @@ -2086,9 +2084,8 @@ xfs_dir2_node_lookup( /* * Allocate and initialize the btree cursor. */ - state = xfs_da_state_alloc(); - state->args = args; - state->mp = args->dp->i_mount; + state = xfs_da_state_alloc(args); + /* * Fill in the path to the entry in the cursor. */ @@ -2139,9 +2136,7 @@ xfs_dir2_node_removename( /* * Allocate and initialize the btree cursor. */ - state = xfs_da_state_alloc(); - state->args = args; - state->mp = args->dp->i_mount; + state = xfs_da_state_alloc(args); /* Look up the entry we're deleting, set up the cursor. */ error = xfs_da3_node_lookup_int(state, &rval); @@ -2206,9 +2201,7 @@ xfs_dir2_node_replace( /* * Allocate and initialize the btree cursor. */ - state = xfs_da_state_alloc(); - state->args = args; - state->mp = args->dp->i_mount; + state = xfs_da_state_alloc(args); /* * We have to save new inode number and ftype since diff --git a/fs/xfs/libxfs/xfs_dquot_buf.c b/fs/xfs/libxfs/xfs_dquot_buf.c index bedc1e752b60..5a2db00b9d5f 100644 --- a/fs/xfs/libxfs/xfs_dquot_buf.c +++ b/fs/xfs/libxfs/xfs_dquot_buf.c @@ -37,9 +37,10 @@ xfs_failaddr_t xfs_dquot_verify( struct xfs_mount *mp, struct xfs_disk_dquot *ddq, - xfs_dqid_t id, - uint type) /* used only during quotacheck */ + xfs_dqid_t id) /* used only during quotacheck */ { + __u8 ddq_type; + /* * We can encounter an uninitialized dquot buffer for 2 reasons: * 1. If we crash while deleting the quotainode(s), and those blks got @@ -60,11 +61,12 @@ xfs_dquot_verify( if (ddq->d_version != XFS_DQUOT_VERSION) return __this_address; - if (type && ddq->d_flags != type) + if (ddq->d_type & ~XFS_DQTYPE_ANY) return __this_address; - if (ddq->d_flags != XFS_DQ_USER && - ddq->d_flags != XFS_DQ_PROJ && - ddq->d_flags != XFS_DQ_GROUP) + ddq_type = ddq->d_type & XFS_DQTYPE_REC_MASK; + if (ddq_type != XFS_DQTYPE_USER && + ddq_type != XFS_DQTYPE_PROJ && + ddq_type != XFS_DQTYPE_GROUP) return __this_address; if (id != -1 && id != be32_to_cpu(ddq->d_id)) @@ -95,14 +97,13 @@ xfs_failaddr_t xfs_dqblk_verify( struct xfs_mount *mp, struct xfs_dqblk *dqb, - xfs_dqid_t id, - uint type) /* used only during quotacheck */ + xfs_dqid_t id) /* used only during quotacheck */ { if (xfs_sb_version_hascrc(&mp->m_sb) && !uuid_equal(&dqb->dd_uuid, &mp->m_sb.sb_meta_uuid)) return __this_address; - return xfs_dquot_verify(mp, &dqb->dd_diskdq, id, type); + return xfs_dquot_verify(mp, &dqb->dd_diskdq, id); } /* @@ -113,7 +114,7 @@ xfs_dqblk_repair( struct xfs_mount *mp, struct xfs_dqblk *dqb, xfs_dqid_t id, - uint type) + xfs_dqtype_t type) { /* * Typically, a repair is only requested by quotacheck. @@ -123,7 +124,7 @@ xfs_dqblk_repair( dqb->dd_diskdq.d_magic = cpu_to_be16(XFS_DQUOT_MAGIC); dqb->dd_diskdq.d_version = XFS_DQUOT_VERSION; - dqb->dd_diskdq.d_flags = type; + dqb->dd_diskdq.d_type = type; dqb->dd_diskdq.d_id = cpu_to_be32(id); if (xfs_sb_version_hascrc(&mp->m_sb)) { @@ -205,7 +206,7 @@ xfs_dquot_buf_verify( if (i == 0) id = be32_to_cpu(ddq->d_id); - fa = xfs_dqblk_verify(mp, &dqb[i], id + i, 0); + fa = xfs_dqblk_verify(mp, &dqb[i], id + i); if (fa) { if (!readahead) xfs_buf_verifier_error(bp, -EFSCORRUPTED, diff --git a/fs/xfs/libxfs/xfs_format.h b/fs/xfs/libxfs/xfs_format.h index b42a52bfa1e9..31b7ece985bb 100644 --- a/fs/xfs/libxfs/xfs_format.h +++ b/fs/xfs/libxfs/xfs_format.h @@ -1149,16 +1149,26 @@ static inline void xfs_dinode_put_rdev(struct xfs_dinode *dip, xfs_dev_t rdev) #define XFS_DQUOT_MAGIC 0x4451 /* 'DQ' */ #define XFS_DQUOT_VERSION (uint8_t)0x01 /* latest version number */ +#define XFS_DQTYPE_USER 0x01 /* user dquot record */ +#define XFS_DQTYPE_PROJ 0x02 /* project dquot record */ +#define XFS_DQTYPE_GROUP 0x04 /* group dquot record */ + +/* bitmask to determine if this is a user/group/project dquot */ +#define XFS_DQTYPE_REC_MASK (XFS_DQTYPE_USER | \ + XFS_DQTYPE_PROJ | \ + XFS_DQTYPE_GROUP) + +#define XFS_DQTYPE_ANY (XFS_DQTYPE_REC_MASK) + /* - * This is the main portion of the on-disk representation of quota - * information for a user. This is the q_core of the struct xfs_dquot that - * is kept in kernel memory. We pad this with some more expansion room - * to construct the on disk structure. + * This is the main portion of the on-disk representation of quota information + * for a user. We pad this with some more expansion room to construct the on + * disk structure. */ struct xfs_disk_dquot { __be16 d_magic; /* dquot magic = XFS_DQUOT_MAGIC */ __u8 d_version; /* dquot version */ - __u8 d_flags; /* XFS_DQ_USER/PROJ/GROUP */ + __u8 d_type; /* XFS_DQTYPE_USER/PROJ/GROUP */ __be32 d_id; /* user,project,group id */ __be64 d_blk_hardlimit;/* absolute limit on disk blks */ __be64 d_blk_softlimit;/* preferred limit on disk blks */ @@ -1199,6 +1209,22 @@ typedef struct xfs_dqblk { #define XFS_DQUOT_CRC_OFF offsetof(struct xfs_dqblk, dd_crc) /* + * This defines the unit of allocation of dquots. + * + * Currently, it is just one file system block, and a 4K blk contains 30 + * (136 * 30 = 4080) dquots. It's probably not worth trying to make + * this more dynamic. + * + * However, if this number is changed, we have to make sure that we don't + * implicitly assume that we do allocations in chunks of a single filesystem + * block in the dquot/xqm code. + * + * This is part of the ondisk format because the structure size is not a power + * of two, which leaves slack at the end of the disk block. + */ +#define XFS_DQUOT_CLUSTER_SIZE_FSB (xfs_filblks_t)1 + +/* * Remote symlink format and access functions. */ #define XFS_SYMLINK_MAGIC 0x58534c4d /* XSLM */ diff --git a/fs/xfs/libxfs/xfs_ialloc.c b/fs/xfs/libxfs/xfs_ialloc.c index 7fcf62b324b0..a6b37db55169 100644 --- a/fs/xfs/libxfs/xfs_ialloc.c +++ b/fs/xfs/libxfs/xfs_ialloc.c @@ -688,7 +688,7 @@ xfs_ialloc_ag_alloc( args.minalignslop = igeo->cluster_align - 1; /* Allow space for the inode btree to split. */ - args.minleft = igeo->inobt_maxlevels - 1; + args.minleft = igeo->inobt_maxlevels; if ((error = xfs_alloc_vextent(&args))) return error; @@ -736,7 +736,7 @@ xfs_ialloc_ag_alloc( /* * Allow space for the inode btree to split. */ - args.minleft = igeo->inobt_maxlevels - 1; + args.minleft = igeo->inobt_maxlevels; if ((error = xfs_alloc_vextent(&args))) return error; } @@ -888,10 +888,9 @@ sparse_alloc: */ be32_add_cpu(&agi->agi_count, newlen); be32_add_cpu(&agi->agi_freecount, newlen); - pag = xfs_perag_get(args.mp, agno); + pag = agbp->b_pag; pag->pagi_freecount += newlen; pag->pagi_count += newlen; - xfs_perag_put(pag); agi->agi_newino = cpu_to_be32(newino); /* @@ -1134,7 +1133,7 @@ xfs_dialloc_ag_inobt( xfs_agnumber_t agno = be32_to_cpu(agi->agi_seqno); xfs_agnumber_t pagno = XFS_INO_TO_AGNO(mp, parent); xfs_agino_t pagino = XFS_INO_TO_AGINO(mp, parent); - struct xfs_perag *pag; + struct xfs_perag *pag = agbp->b_pag; struct xfs_btree_cur *cur, *tcur; struct xfs_inobt_rec_incore rec, trec; xfs_ino_t ino; @@ -1143,8 +1142,6 @@ xfs_dialloc_ag_inobt( int i, j; int searchdistance = 10; - pag = xfs_perag_get(mp, agno); - ASSERT(pag->pagi_init); ASSERT(pag->pagi_inodeok); ASSERT(pag->pagi_freecount > 0); @@ -1384,14 +1381,12 @@ alloc_inode: xfs_btree_del_cursor(cur, XFS_BTREE_NOERROR); xfs_trans_mod_sb(tp, XFS_TRANS_SB_IFREE, -1); - xfs_perag_put(pag); *inop = ino; return 0; error1: xfs_btree_del_cursor(tcur, XFS_BTREE_ERROR); error0: xfs_btree_del_cursor(cur, XFS_BTREE_ERROR); - xfs_perag_put(pag); return error; } @@ -1587,7 +1582,6 @@ xfs_dialloc_ag( xfs_agnumber_t agno = be32_to_cpu(agi->agi_seqno); xfs_agnumber_t pagno = XFS_INO_TO_AGNO(mp, parent); xfs_agino_t pagino = XFS_INO_TO_AGINO(mp, parent); - struct xfs_perag *pag; struct xfs_btree_cur *cur; /* finobt cursor */ struct xfs_btree_cur *icur; /* inobt cursor */ struct xfs_inobt_rec_incore rec; @@ -1599,8 +1593,6 @@ xfs_dialloc_ag( if (!xfs_sb_version_hasfinobt(&mp->m_sb)) return xfs_dialloc_ag_inobt(tp, agbp, parent, inop); - pag = xfs_perag_get(mp, agno); - /* * If pagino is 0 (this is the root inode allocation) use newino. * This must work because we've just allocated some. @@ -1667,7 +1659,7 @@ xfs_dialloc_ag( */ be32_add_cpu(&agi->agi_freecount, -1); xfs_ialloc_log_agi(tp, agbp, XFS_AGI_FREECOUNT); - pag->pagi_freecount--; + agbp->b_pag->pagi_freecount--; xfs_trans_mod_sb(tp, XFS_TRANS_SB_IFREE, -1); @@ -1680,7 +1672,6 @@ xfs_dialloc_ag( xfs_btree_del_cursor(icur, XFS_BTREE_NOERROR); xfs_btree_del_cursor(cur, XFS_BTREE_NOERROR); - xfs_perag_put(pag); *inop = ino; return 0; @@ -1688,7 +1679,6 @@ error_icur: xfs_btree_del_cursor(icur, XFS_BTREE_ERROR); error_cur: xfs_btree_del_cursor(cur, XFS_BTREE_ERROR); - xfs_perag_put(pag); return error; } @@ -1945,7 +1935,6 @@ xfs_difree_inobt( { struct xfs_agi *agi = agbp->b_addr; xfs_agnumber_t agno = be32_to_cpu(agi->agi_seqno); - struct xfs_perag *pag; struct xfs_btree_cur *cur; struct xfs_inobt_rec_incore rec; int ilen; @@ -2007,6 +1996,8 @@ xfs_difree_inobt( if (!(mp->m_flags & XFS_MOUNT_IKEEP) && rec.ir_free == XFS_INOBT_ALL_FREE && mp->m_sb.sb_inopblock <= XFS_INODES_PER_CHUNK) { + struct xfs_perag *pag = agbp->b_pag; + xic->deleted = true; xic->first_ino = XFS_AGINO_TO_INO(mp, agno, rec.ir_startino); xic->alloc = xfs_inobt_irec_to_allocmask(&rec); @@ -2020,10 +2011,8 @@ xfs_difree_inobt( be32_add_cpu(&agi->agi_count, -ilen); be32_add_cpu(&agi->agi_freecount, -(ilen - 1)); xfs_ialloc_log_agi(tp, agbp, XFS_AGI_COUNT | XFS_AGI_FREECOUNT); - pag = xfs_perag_get(mp, agno); pag->pagi_freecount -= ilen - 1; pag->pagi_count -= ilen; - xfs_perag_put(pag); xfs_trans_mod_sb(tp, XFS_TRANS_SB_ICOUNT, -ilen); xfs_trans_mod_sb(tp, XFS_TRANS_SB_IFREE, -(ilen - 1)); @@ -2049,9 +2038,7 @@ xfs_difree_inobt( */ be32_add_cpu(&agi->agi_freecount, 1); xfs_ialloc_log_agi(tp, agbp, XFS_AGI_FREECOUNT); - pag = xfs_perag_get(mp, agno); - pag->pagi_freecount++; - xfs_perag_put(pag); + agbp->b_pag->pagi_freecount++; xfs_trans_mod_sb(tp, XFS_TRANS_SB_IFREE, 1); } @@ -2661,7 +2648,7 @@ xfs_ialloc_read_agi( return error; agi = (*bpp)->b_addr; - pag = xfs_perag_get(mp, agno); + pag = (*bpp)->b_pag; if (!pag->pagi_init) { pag->pagi_freecount = be32_to_cpu(agi->agi_freecount); pag->pagi_count = be32_to_cpu(agi->agi_count); @@ -2674,7 +2661,6 @@ xfs_ialloc_read_agi( */ ASSERT(pag->pagi_freecount == be32_to_cpu(agi->agi_freecount) || XFS_FORCED_SHUTDOWN(mp)); - xfs_perag_put(pag); return 0; } diff --git a/fs/xfs/libxfs/xfs_ialloc_btree.c b/fs/xfs/libxfs/xfs_ialloc_btree.c index b2c122ad8f0e..3c8aebc36e64 100644 --- a/fs/xfs/libxfs/xfs_ialloc_btree.c +++ b/fs/xfs/libxfs/xfs_ialloc_btree.c @@ -411,7 +411,7 @@ xfs_inobt_init_common( { struct xfs_btree_cur *cur; - cur = kmem_zone_zalloc(xfs_btree_cur_zone, KM_NOFS); + cur = kmem_cache_zalloc(xfs_btree_cur_zone, GFP_NOFS | __GFP_NOFAIL); cur->bc_tp = tp; cur->bc_mp = mp; cur->bc_btnum = btnum; diff --git a/fs/xfs/libxfs/xfs_inode_buf.c b/fs/xfs/libxfs/xfs_inode_buf.c index 6f84ea85fdd8..8d5dd08eab75 100644 --- a/fs/xfs/libxfs/xfs_inode_buf.c +++ b/fs/xfs/libxfs/xfs_inode_buf.c @@ -21,30 +21,6 @@ #include <linux/iversion.h> /* - * Check that none of the inode's in the buffer have a next - * unlinked field of 0. - */ -#if defined(DEBUG) -void -xfs_inobp_check( - xfs_mount_t *mp, - xfs_buf_t *bp) -{ - int i; - xfs_dinode_t *dip; - - for (i = 0; i < M_IGEO(mp)->inodes_per_cluster; i++) { - dip = xfs_buf_offset(bp, i * mp->m_sb.sb_inodesize); - if (!dip->di_next_unlinked) { - xfs_alert(mp, - "Detected bogus zero next_unlinked field in inode %d buffer 0x%llx.", - i, (long long)bp->b_bn); - } - } -} -#endif - -/* * If we are doing readahead on an inode buffer, we might be in log recovery * reading an inode allocation buffer that hasn't yet been replayed, and hence * has not had the inode cores stamped into it. Hence for readahead, the buffer @@ -53,10 +29,10 @@ xfs_inobp_check( * If the readahead buffer is invalid, we need to mark it with an error and * clear the DONE status of the buffer so that a followup read will re-read it * from disk. We don't report the error otherwise to avoid warnings during log - * recovery and we don't get unnecssary panics on debug kernels. We use EIO here + * recovery and we don't get unnecessary panics on debug kernels. We use EIO here * because all we want to do is say readahead failed; there is no-one to report * the error to, so this will distinguish it from a non-ra verifier failure. - * Changes to this readahead error behavour also need to be reflected in + * Changes to this readahead error behaviour also need to be reflected in * xfs_dquot_buf_readahead_verify(). */ static void @@ -176,7 +152,8 @@ xfs_imap_to_bp( } *bpp = bp; - *dipp = xfs_buf_offset(bp, imap->im_boffset); + if (dipp) + *dipp = xfs_buf_offset(bp, imap->im_boffset); return 0; } @@ -203,7 +180,7 @@ xfs_inode_from_disk( /* * First get the permanent information that is needed to allocate an * inode. If the inode is unused, mode is zero and we shouldn't mess - * with the unitialized part of it. + * with the uninitialized part of it. */ to->di_flushiter = be16_to_cpu(from->di_flushiter); inode->i_generation = be32_to_cpu(from->di_gen); diff --git a/fs/xfs/libxfs/xfs_inode_buf.h b/fs/xfs/libxfs/xfs_inode_buf.h index 865ac493c72a..6b08b9d060c2 100644 --- a/fs/xfs/libxfs/xfs_inode_buf.h +++ b/fs/xfs/libxfs/xfs_inode_buf.h @@ -52,12 +52,6 @@ int xfs_inode_from_disk(struct xfs_inode *ip, struct xfs_dinode *from); void xfs_log_dinode_to_disk(struct xfs_log_dinode *from, struct xfs_dinode *to); -#if defined(DEBUG) -void xfs_inobp_check(struct xfs_mount *, struct xfs_buf *); -#else -#define xfs_inobp_check(mp, bp) -#endif /* DEBUG */ - xfs_failaddr_t xfs_dinode_verify(struct xfs_mount *mp, xfs_ino_t ino, struct xfs_dinode *dip); xfs_failaddr_t xfs_inode_validate_extsize(struct xfs_mount *mp, diff --git a/fs/xfs/libxfs/xfs_inode_fork.c b/fs/xfs/libxfs/xfs_inode_fork.c index 28b366275ae0..0cf853d42d62 100644 --- a/fs/xfs/libxfs/xfs_inode_fork.c +++ b/fs/xfs/libxfs/xfs_inode_fork.c @@ -291,7 +291,7 @@ xfs_iformat_attr_fork( * Initialize the extent count early, as the per-format routines may * depend on it. */ - ip->i_afp = kmem_zone_zalloc(xfs_ifork_zone, KM_NOFS); + ip->i_afp = kmem_cache_zalloc(xfs_ifork_zone, GFP_NOFS | __GFP_NOFAIL); ip->i_afp->if_format = dip->di_aformat; if (unlikely(ip->i_afp->if_format == 0)) /* pre IRIX 6.2 file system */ ip->i_afp->if_format = XFS_DINODE_FMT_EXTENTS; @@ -673,8 +673,8 @@ xfs_ifork_init_cow( if (ip->i_cowfp) return; - ip->i_cowfp = kmem_zone_zalloc(xfs_ifork_zone, - KM_NOFS); + ip->i_cowfp = kmem_cache_zalloc(xfs_ifork_zone, + GFP_NOFS | __GFP_NOFAIL); ip->i_cowfp->if_flags = XFS_IFEXTENTS; ip->i_cowfp->if_format = XFS_DINODE_FMT_EXTENTS; } diff --git a/fs/xfs/libxfs/xfs_quota_defs.h b/fs/xfs/libxfs/xfs_quota_defs.h index 56d9dd787e7b..076bdc7037ee 100644 --- a/fs/xfs/libxfs/xfs_quota_defs.h +++ b/fs/xfs/libxfs/xfs_quota_defs.h @@ -18,23 +18,22 @@ typedef uint64_t xfs_qcnt_t; typedef uint16_t xfs_qwarncnt_t; +typedef uint8_t xfs_dqtype_t; + +#define XFS_DQTYPE_STRINGS \ + { XFS_DQTYPE_USER, "USER" }, \ + { XFS_DQTYPE_PROJ, "PROJ" }, \ + { XFS_DQTYPE_GROUP, "GROUP" } + /* * flags for q_flags field in the dquot. */ -#define XFS_DQ_USER 0x0001 /* a user quota */ -#define XFS_DQ_PROJ 0x0002 /* project quota */ -#define XFS_DQ_GROUP 0x0004 /* a group quota */ -#define XFS_DQ_DIRTY 0x0008 /* dquot is dirty */ -#define XFS_DQ_FREEING 0x0010 /* dquot is being torn down */ - -#define XFS_DQ_ALLTYPES (XFS_DQ_USER|XFS_DQ_PROJ|XFS_DQ_GROUP) +#define XFS_DQFLAG_DIRTY (1 << 0) /* dquot is dirty */ +#define XFS_DQFLAG_FREEING (1 << 1) /* dquot is being torn down */ -#define XFS_DQ_FLAGS \ - { XFS_DQ_USER, "USER" }, \ - { XFS_DQ_PROJ, "PROJ" }, \ - { XFS_DQ_GROUP, "GROUP" }, \ - { XFS_DQ_DIRTY, "DIRTY" }, \ - { XFS_DQ_FREEING, "FREEING" } +#define XFS_DQFLAG_STRINGS \ + { XFS_DQFLAG_DIRTY, "DIRTY" }, \ + { XFS_DQFLAG_FREEING, "FREEING" } /* * We have the possibility of all three quota types being active at once, and @@ -137,11 +136,11 @@ typedef uint16_t xfs_qwarncnt_t; #define XFS_QMOPT_RESBLK_MASK (XFS_QMOPT_RES_REGBLKS | XFS_QMOPT_RES_RTBLKS) extern xfs_failaddr_t xfs_dquot_verify(struct xfs_mount *mp, - struct xfs_disk_dquot *ddq, xfs_dqid_t id, uint type); + struct xfs_disk_dquot *ddq, xfs_dqid_t id); extern xfs_failaddr_t xfs_dqblk_verify(struct xfs_mount *mp, - struct xfs_dqblk *dqb, xfs_dqid_t id, uint type); + struct xfs_dqblk *dqb, xfs_dqid_t id); extern int xfs_calc_dquots_per_chunk(unsigned int nbblks); extern void xfs_dqblk_repair(struct xfs_mount *mp, struct xfs_dqblk *dqb, - xfs_dqid_t id, uint type); + xfs_dqid_t id, xfs_dqtype_t type); #endif /* __XFS_QUOTA_H__ */ diff --git a/fs/xfs/libxfs/xfs_refcount_btree.c b/fs/xfs/libxfs/xfs_refcount_btree.c index 7fd6044a4f78..a6ac60ae9421 100644 --- a/fs/xfs/libxfs/xfs_refcount_btree.c +++ b/fs/xfs/libxfs/xfs_refcount_btree.c @@ -37,15 +37,13 @@ xfs_refcountbt_set_root( { struct xfs_buf *agbp = cur->bc_ag.agbp; struct xfs_agf *agf = agbp->b_addr; - xfs_agnumber_t seqno = be32_to_cpu(agf->agf_seqno); - struct xfs_perag *pag = xfs_perag_get(cur->bc_mp, seqno); + struct xfs_perag *pag = agbp->b_pag; ASSERT(ptr->s != 0); agf->agf_refcount_root = ptr->s; be32_add_cpu(&agf->agf_refcount_level, inc); pag->pagf_refcount_level += inc; - xfs_perag_put(pag); xfs_alloc_log_agf(cur->bc_tp, agbp, XFS_AGF_REFCOUNT_ROOT | XFS_AGF_REFCOUNT_LEVEL); @@ -325,7 +323,7 @@ xfs_refcountbt_init_common( ASSERT(agno != NULLAGNUMBER); ASSERT(agno < mp->m_sb.sb_agcount); - cur = kmem_zone_zalloc(xfs_btree_cur_zone, KM_NOFS); + cur = kmem_cache_zalloc(xfs_btree_cur_zone, GFP_NOFS | __GFP_NOFAIL); cur->bc_tp = tp; cur->bc_mp = mp; cur->bc_btnum = XFS_BTNUM_REFC; diff --git a/fs/xfs/libxfs/xfs_rmap_btree.c b/fs/xfs/libxfs/xfs_rmap_btree.c index b7c05314d07c..beb81c84a937 100644 --- a/fs/xfs/libxfs/xfs_rmap_btree.c +++ b/fs/xfs/libxfs/xfs_rmap_btree.c @@ -63,16 +63,14 @@ xfs_rmapbt_set_root( { struct xfs_buf *agbp = cur->bc_ag.agbp; struct xfs_agf *agf = agbp->b_addr; - xfs_agnumber_t seqno = be32_to_cpu(agf->agf_seqno); int btnum = cur->bc_btnum; - struct xfs_perag *pag = xfs_perag_get(cur->bc_mp, seqno); + struct xfs_perag *pag = agbp->b_pag; ASSERT(ptr->s != 0); agf->agf_roots[btnum] = ptr->s; be32_add_cpu(&agf->agf_levels[btnum], inc); pag->pagf_levels[btnum] += inc; - xfs_perag_put(pag); xfs_alloc_log_agf(cur->bc_tp, agbp, XFS_AGF_ROOTS | XFS_AGF_LEVELS); } @@ -123,6 +121,7 @@ xfs_rmapbt_free_block( { struct xfs_buf *agbp = cur->bc_ag.agbp; struct xfs_agf *agf = agbp->b_addr; + struct xfs_perag *pag; xfs_agblock_t bno; int error; @@ -139,8 +138,8 @@ xfs_rmapbt_free_block( XFS_EXTENT_BUSY_SKIP_DISCARD); xfs_trans_agbtree_delta(cur->bc_tp, -1); - xfs_ag_resv_rmapbt_free(cur->bc_mp, cur->bc_ag.agno); - + pag = cur->bc_ag.agbp->b_pag; + xfs_ag_resv_free_extent(pag, XFS_AG_RESV_RMAPBT, NULL, 1); return 0; } @@ -457,7 +456,7 @@ xfs_rmapbt_init_common( { struct xfs_btree_cur *cur; - cur = kmem_zone_zalloc(xfs_btree_cur_zone, KM_NOFS); + cur = kmem_cache_zalloc(xfs_btree_cur_zone, GFP_NOFS | __GFP_NOFAIL); cur->bc_tp = tp; cur->bc_mp = mp; /* Overlapping btree; 2 keys per pointer. */ diff --git a/fs/xfs/libxfs/xfs_rtbitmap.c b/fs/xfs/libxfs/xfs_rtbitmap.c index 9498ced947be..1d9fa8a300f1 100644 --- a/fs/xfs/libxfs/xfs_rtbitmap.c +++ b/fs/xfs/libxfs/xfs_rtbitmap.c @@ -70,7 +70,7 @@ xfs_rtbuf_get( if (error) return error; - if (XFS_IS_CORRUPT(mp, nmap == 0 || !xfs_bmap_is_real_extent(&map))) + if (XFS_IS_CORRUPT(mp, nmap == 0 || !xfs_bmap_is_written_extent(&map))) return -EFSCORRUPTED; ASSERT(map.br_startblock != NULLFSBLOCK); diff --git a/fs/xfs/libxfs/xfs_sb.c b/fs/xfs/libxfs/xfs_sb.c index 4df87546bd40..ae9aaf1f34bf 100644 --- a/fs/xfs/libxfs/xfs_sb.c +++ b/fs/xfs/libxfs/xfs_sb.c @@ -600,7 +600,7 @@ xfs_sb_quota_to_disk( * disk. If neither are active, we should NULL the inode. * * In all cases, the separate pquotino must remain 0 because it - * it beyond the "end" of the valid non-pquotino superblock. + * is beyond the "end" of the valid non-pquotino superblock. */ if (from->sb_qflags & XFS_GQUOTA_ACCT) to->sb_gquotino = cpu_to_be64(from->sb_gquotino); diff --git a/fs/xfs/libxfs/xfs_shared.h b/fs/xfs/libxfs/xfs_shared.h index c45acbd3add9..708feb8eac76 100644 --- a/fs/xfs/libxfs/xfs_shared.h +++ b/fs/xfs/libxfs/xfs_shared.h @@ -65,6 +65,7 @@ void xfs_log_get_max_trans_res(struct xfs_mount *mp, #define XFS_TRANS_DQ_DIRTY 0x10 /* at least one dquot in trx dirty */ #define XFS_TRANS_RESERVE 0x20 /* OK to use reserved data blocks */ #define XFS_TRANS_NO_WRITECOUNT 0x40 /* do not elevate SB writecount */ +#define XFS_TRANS_RES_FDBLKS 0x80 /* reserve newly freed blocks */ /* * LOWMODE is used by the allocator to activate the lowspace algorithm - when * free space is running low the extent allocator may choose to allocate an diff --git a/fs/xfs/libxfs/xfs_trans_inode.c b/fs/xfs/libxfs/xfs_trans_inode.c index b5dfb6654842..b7e222befb08 100644 --- a/fs/xfs/libxfs/xfs_trans_inode.c +++ b/fs/xfs/libxfs/xfs_trans_inode.c @@ -8,6 +8,8 @@ #include "xfs_shared.h" #include "xfs_format.h" #include "xfs_log_format.h" +#include "xfs_trans_resv.h" +#include "xfs_mount.h" #include "xfs_inode.h" #include "xfs_trans.h" #include "xfs_trans_priv.h" @@ -36,6 +38,7 @@ xfs_trans_ijoin( ASSERT(iip->ili_lock_flags == 0); iip->ili_lock_flags = lock_flags; + ASSERT(!xfs_iflags_test(ip, XFS_ISTALE)); /* * Get a log_item_desc to point at the new item. @@ -71,24 +74,35 @@ xfs_trans_ichgtime( } /* - * This is called to mark the fields indicated in fieldmask as needing - * to be logged when the transaction is committed. The inode must - * already be associated with the given transaction. + * This is called to mark the fields indicated in fieldmask as needing to be + * logged when the transaction is committed. The inode must already be + * associated with the given transaction. * - * The values for fieldmask are defined in xfs_inode_item.h. We always - * log all of the core inode if any of it has changed, and we always log - * all of the inline data/extents/b-tree root if any of them has changed. + * The values for fieldmask are defined in xfs_inode_item.h. We always log all + * of the core inode if any of it has changed, and we always log all of the + * inline data/extents/b-tree root if any of them has changed. + * + * Grab and pin the cluster buffer associated with this inode to avoid RMW + * cycles at inode writeback time. Avoid the need to add error handling to every + * xfs_trans_log_inode() call by shutting down on read error. This will cause + * transactions to fail and everything to error out, just like if we return a + * read error in a dirty transaction and cancel it. */ void xfs_trans_log_inode( - xfs_trans_t *tp, - xfs_inode_t *ip, - uint flags) + struct xfs_trans *tp, + struct xfs_inode *ip, + uint flags) { - struct inode *inode = VFS_I(ip); + struct xfs_inode_log_item *iip = ip->i_itemp; + struct inode *inode = VFS_I(ip); + uint iversion_flags = 0; - ASSERT(ip->i_itemp != NULL); + ASSERT(iip); ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL)); + ASSERT(!xfs_iflags_test(ip, XFS_ISTALE)); + + tp->t_flags |= XFS_TRANS_DIRTY; /* * Don't bother with i_lock for the I_DIRTY_TIME check here, as races @@ -96,22 +110,13 @@ xfs_trans_log_inode( * to log the timestamps, or will clear already cleared fields in the * worst case. */ - if (inode->i_state & (I_DIRTY_TIME | I_DIRTY_TIME_EXPIRED)) { + if (inode->i_state & I_DIRTY_TIME) { spin_lock(&inode->i_lock); - inode->i_state &= ~(I_DIRTY_TIME | I_DIRTY_TIME_EXPIRED); + inode->i_state &= ~I_DIRTY_TIME; spin_unlock(&inode->i_lock); } /* - * Record the specific change for fdatasync optimisation. This - * allows fdatasync to skip log forces for inodes that are only - * timestamp dirty. We do this before the change count so that - * the core being logged in this case does not impact on fdatasync - * behaviour. - */ - ip->i_itemp->ili_fsync_fields |= flags; - - /* * First time we log the inode in a transaction, bump the inode change * counter if it is configured for this to occur. While we have the * inode locked exclusively for metadata modification, we can usually @@ -120,23 +125,64 @@ xfs_trans_log_inode( * set however, then go ahead and bump the i_version counter * unconditionally. */ - if (!test_and_set_bit(XFS_LI_DIRTY, &ip->i_itemp->ili_item.li_flags) && - IS_I_VERSION(VFS_I(ip))) { - if (inode_maybe_inc_iversion(VFS_I(ip), flags & XFS_ILOG_CORE)) - flags |= XFS_ILOG_CORE; + if (!test_and_set_bit(XFS_LI_DIRTY, &iip->ili_item.li_flags)) { + if (IS_I_VERSION(inode) && + inode_maybe_inc_iversion(inode, flags & XFS_ILOG_CORE)) + iversion_flags = XFS_ILOG_CORE; } - tp->t_flags |= XFS_TRANS_DIRTY; + /* + * Record the specific change for fdatasync optimisation. This allows + * fdatasync to skip log forces for inodes that are only timestamp + * dirty. + */ + spin_lock(&iip->ili_lock); + iip->ili_fsync_fields |= flags; + + if (!iip->ili_item.li_buf) { + struct xfs_buf *bp; + int error; + + /* + * We hold the ILOCK here, so this inode is not going to be + * flushed while we are here. Further, because there is no + * buffer attached to the item, we know that there is no IO in + * progress, so nothing will clear the ili_fields while we read + * in the buffer. Hence we can safely drop the spin lock and + * read the buffer knowing that the state will not change from + * here. + */ + spin_unlock(&iip->ili_lock); + error = xfs_imap_to_bp(ip->i_mount, tp, &ip->i_imap, NULL, + &bp, 0); + if (error) { + xfs_force_shutdown(ip->i_mount, SHUTDOWN_META_IO_ERROR); + return; + } + + /* + * We need an explicit buffer reference for the log item but + * don't want the buffer to remain attached to the transaction. + * Hold the buffer but release the transaction reference once + * we've attached the inode log item to the buffer log item + * list. + */ + xfs_buf_hold(bp); + spin_lock(&iip->ili_lock); + iip->ili_item.li_buf = bp; + bp->b_flags |= _XBF_INODES; + list_add_tail(&iip->ili_item.li_bio_list, &bp->b_li_list); + xfs_trans_brelse(tp, bp); + } /* - * Always OR in the bits from the ili_last_fields field. - * This is to coordinate with the xfs_iflush() and xfs_iflush_done() - * routines in the eventual clearing of the ili_fields bits. - * See the big comment in xfs_iflush() for an explanation of - * this coordination mechanism. + * Always OR in the bits from the ili_last_fields field. This is to + * coordinate with the xfs_iflush() and xfs_iflush_done() routines in + * the eventual clearing of the ili_fields bits. See the big comment in + * xfs_iflush() for an explanation of this coordination mechanism. */ - flags |= ip->i_itemp->ili_last_fields; - ip->i_itemp->ili_fields |= flags; + iip->ili_fields |= (flags | iip->ili_last_fields | iversion_flags); + spin_unlock(&iip->ili_lock); } int diff --git a/fs/xfs/libxfs/xfs_trans_space.h b/fs/xfs/libxfs/xfs_trans_space.h index 88221c7a04cc..7ad3659c5d2a 100644 --- a/fs/xfs/libxfs/xfs_trans_space.h +++ b/fs/xfs/libxfs/xfs_trans_space.h @@ -57,8 +57,8 @@ XFS_DAREMOVE_SPACE_RES(mp, XFS_DATA_FORK) #define XFS_IALLOC_SPACE_RES(mp) \ (M_IGEO(mp)->ialloc_blks + \ - (xfs_sb_version_hasfinobt(&mp->m_sb) ? 2 : 1 * \ - (M_IGEO(mp)->inobt_maxlevels - 1))) + ((xfs_sb_version_hasfinobt(&mp->m_sb) ? 2 : 1) * \ + M_IGEO(mp)->inobt_maxlevels)) /* * Space reservation values for various transactions. diff --git a/fs/xfs/scrub/bmap.c b/fs/xfs/scrub/bmap.c index 7badd6dfe544..955302e7cdde 100644 --- a/fs/xfs/scrub/bmap.c +++ b/fs/xfs/scrub/bmap.c @@ -45,9 +45,27 @@ xchk_setup_inode_bmap( */ if (S_ISREG(VFS_I(sc->ip)->i_mode) && sc->sm->sm_type == XFS_SCRUB_TYPE_BMBTD) { + struct address_space *mapping = VFS_I(sc->ip)->i_mapping; + inode_dio_wait(VFS_I(sc->ip)); - error = filemap_write_and_wait(VFS_I(sc->ip)->i_mapping); - if (error) + + /* + * Try to flush all incore state to disk before we examine the + * space mappings for the data fork. Leave accumulated errors + * in the mapping for the writer threads to consume. + * + * On ENOSPC or EIO writeback errors, we continue into the + * extent mapping checks because write failures do not + * necessarily imply anything about the correctness of the file + * metadata. The metadata and the file data could be on + * completely separate devices; a media failure might only + * affect a subset of the disk, etc. We can handle delalloc + * extents in the scrubber, so leaving them in memory is fine. + */ + error = filemap_fdatawrite(mapping); + if (!error) + error = filemap_fdatawait_keep_errors(mapping); + if (error && (error != -ENOSPC && error != -EIO)) goto out; } diff --git a/fs/xfs/scrub/dabtree.c b/fs/xfs/scrub/dabtree.c index 44b15015021f..e56786f0a13c 100644 --- a/fs/xfs/scrub/dabtree.c +++ b/fs/xfs/scrub/dabtree.c @@ -476,9 +476,7 @@ xchk_da_btree( ds.dargs.whichfork = whichfork; ds.dargs.trans = sc->tp; ds.dargs.op_flags = XFS_DA_OP_OKNOENT; - ds.state = xfs_da_state_alloc(); - ds.state->args = &ds.dargs; - ds.state->mp = mp; + ds.state = xfs_da_state_alloc(&ds.dargs); ds.sc = sc; ds.private = private; if (whichfork == XFS_ATTR_FORK) { diff --git a/fs/xfs/scrub/quota.c b/fs/xfs/scrub/quota.c index 905a34558361..e34ca20ae8e4 100644 --- a/fs/xfs/scrub/quota.c +++ b/fs/xfs/scrub/quota.c @@ -18,17 +18,17 @@ #include "scrub/common.h" /* Convert a scrub type code to a DQ flag, or return 0 if error. */ -static inline uint +static inline xfs_dqtype_t xchk_quota_to_dqtype( struct xfs_scrub *sc) { switch (sc->sm->sm_type) { case XFS_SCRUB_TYPE_UQUOTA: - return XFS_DQ_USER; + return XFS_DQTYPE_USER; case XFS_SCRUB_TYPE_GQUOTA: - return XFS_DQ_GROUP; + return XFS_DQTYPE_GROUP; case XFS_SCRUB_TYPE_PQUOTA: - return XFS_DQ_PROJ; + return XFS_DQTYPE_PROJ; default: return 0; } @@ -40,7 +40,7 @@ xchk_setup_quota( struct xfs_scrub *sc, struct xfs_inode *ip) { - uint dqtype; + xfs_dqtype_t dqtype; int error; if (!XFS_IS_QUOTA_RUNNING(sc->mp) || !XFS_IS_QUOTA_ON(sc->mp)) @@ -73,26 +73,15 @@ struct xchk_quota_info { STATIC int xchk_quota_item( struct xfs_dquot *dq, - uint dqtype, + xfs_dqtype_t dqtype, void *priv) { struct xchk_quota_info *sqi = priv; struct xfs_scrub *sc = sqi->sc; struct xfs_mount *mp = sc->mp; - struct xfs_disk_dquot *d = &dq->q_core; struct xfs_quotainfo *qi = mp->m_quotainfo; xfs_fileoff_t offset; - unsigned long long bsoft; - unsigned long long isoft; - unsigned long long rsoft; - unsigned long long bhard; - unsigned long long ihard; - unsigned long long rhard; - unsigned long long bcount; - unsigned long long icount; - unsigned long long rcount; xfs_ino_t fs_icount; - xfs_dqid_t id = be32_to_cpu(d->d_id); int error = 0; if (xchk_should_terminate(sc, &error)) @@ -102,27 +91,11 @@ xchk_quota_item( * Except for the root dquot, the actual dquot we got must either have * the same or higher id as we saw before. */ - offset = id / qi->qi_dqperchunk; - if (id && id <= sqi->last_id) + offset = dq->q_id / qi->qi_dqperchunk; + if (dq->q_id && dq->q_id <= sqi->last_id) xchk_fblock_set_corrupt(sc, XFS_DATA_FORK, offset); - sqi->last_id = id; - - /* Did we get the dquot type we wanted? */ - if (dqtype != (d->d_flags & XFS_DQ_ALLTYPES)) - xchk_fblock_set_corrupt(sc, XFS_DATA_FORK, offset); - - if (d->d_pad0 != cpu_to_be32(0) || d->d_pad != cpu_to_be16(0)) - xchk_fblock_set_corrupt(sc, XFS_DATA_FORK, offset); - - /* Check the limits. */ - bhard = be64_to_cpu(d->d_blk_hardlimit); - ihard = be64_to_cpu(d->d_ino_hardlimit); - rhard = be64_to_cpu(d->d_rtb_hardlimit); - - bsoft = be64_to_cpu(d->d_blk_softlimit); - isoft = be64_to_cpu(d->d_ino_softlimit); - rsoft = be64_to_cpu(d->d_rtb_softlimit); + sqi->last_id = dq->q_id; /* * Warn if the hard limits are larger than the fs. @@ -132,25 +105,22 @@ xchk_quota_item( * Complain about corruption if the soft limit is greater than * the hard limit. */ - if (bhard > mp->m_sb.sb_dblocks) + if (dq->q_blk.hardlimit > mp->m_sb.sb_dblocks) xchk_fblock_set_warning(sc, XFS_DATA_FORK, offset); - if (bsoft > bhard) + if (dq->q_blk.softlimit > dq->q_blk.hardlimit) xchk_fblock_set_corrupt(sc, XFS_DATA_FORK, offset); - if (ihard > M_IGEO(mp)->maxicount) + if (dq->q_ino.hardlimit > M_IGEO(mp)->maxicount) xchk_fblock_set_warning(sc, XFS_DATA_FORK, offset); - if (isoft > ihard) + if (dq->q_ino.softlimit > dq->q_ino.hardlimit) xchk_fblock_set_corrupt(sc, XFS_DATA_FORK, offset); - if (rhard > mp->m_sb.sb_rblocks) + if (dq->q_rtb.hardlimit > mp->m_sb.sb_rblocks) xchk_fblock_set_warning(sc, XFS_DATA_FORK, offset); - if (rsoft > rhard) + if (dq->q_rtb.softlimit > dq->q_rtb.hardlimit) xchk_fblock_set_corrupt(sc, XFS_DATA_FORK, offset); /* Check the resource counts. */ - bcount = be64_to_cpu(d->d_bcount); - icount = be64_to_cpu(d->d_icount); - rcount = be64_to_cpu(d->d_rtbcount); fs_icount = percpu_counter_sum(&mp->m_icount); /* @@ -159,15 +129,15 @@ xchk_quota_item( * if there are no quota limits. */ if (xfs_sb_version_hasreflink(&mp->m_sb)) { - if (mp->m_sb.sb_dblocks < bcount) + if (mp->m_sb.sb_dblocks < dq->q_blk.count) xchk_fblock_set_warning(sc, XFS_DATA_FORK, offset); } else { - if (mp->m_sb.sb_dblocks < bcount) + if (mp->m_sb.sb_dblocks < dq->q_blk.count) xchk_fblock_set_corrupt(sc, XFS_DATA_FORK, offset); } - if (icount > fs_icount || rcount > mp->m_sb.sb_rblocks) + if (dq->q_ino.count > fs_icount || dq->q_rtb.count > mp->m_sb.sb_rblocks) xchk_fblock_set_corrupt(sc, XFS_DATA_FORK, offset); /* @@ -175,13 +145,22 @@ xchk_quota_item( * lower limit than the actual usage. However, we flag it for * admin review. */ - if (id != 0 && bhard != 0 && bcount > bhard) + if (dq->q_id == 0) + goto out; + + if (dq->q_blk.hardlimit != 0 && + dq->q_blk.count > dq->q_blk.hardlimit) xchk_fblock_set_warning(sc, XFS_DATA_FORK, offset); - if (id != 0 && ihard != 0 && icount > ihard) + + if (dq->q_ino.hardlimit != 0 && + dq->q_ino.count > dq->q_ino.hardlimit) xchk_fblock_set_warning(sc, XFS_DATA_FORK, offset); - if (id != 0 && rhard != 0 && rcount > rhard) + + if (dq->q_rtb.hardlimit != 0 && + dq->q_rtb.count > dq->q_rtb.hardlimit) xchk_fblock_set_warning(sc, XFS_DATA_FORK, offset); +out: if (sc->sm->sm_flags & XFS_SCRUB_OFLAG_CORRUPT) return -EFSCORRUPTED; @@ -235,7 +214,7 @@ xchk_quota( struct xchk_quota_info sqi; struct xfs_mount *mp = sc->mp; struct xfs_quotainfo *qi = mp->m_quotainfo; - uint dqtype; + xfs_dqtype_t dqtype; int error = 0; dqtype = xchk_quota_to_dqtype(sc); diff --git a/fs/xfs/scrub/repair.c b/fs/xfs/scrub/repair.c index db3cfd12803d..25e86c71e7b9 100644 --- a/fs/xfs/scrub/repair.c +++ b/fs/xfs/scrub/repair.c @@ -899,11 +899,11 @@ xrep_find_ag_btree_roots( void xrep_force_quotacheck( struct xfs_scrub *sc, - uint dqtype) + xfs_dqtype_t type) { uint flag; - flag = xfs_quota_chkd_flag(dqtype); + flag = xfs_quota_chkd_flag(type); if (!(flag & sc->mp->m_qflags)) return; @@ -939,11 +939,11 @@ xrep_ino_dqattach( "inode %llu repair encountered quota error %d, quotacheck forced.", (unsigned long long)sc->ip->i_ino, error); if (XFS_IS_UQUOTA_ON(sc->mp) && !sc->ip->i_udquot) - xrep_force_quotacheck(sc, XFS_DQ_USER); + xrep_force_quotacheck(sc, XFS_DQTYPE_USER); if (XFS_IS_GQUOTA_ON(sc->mp) && !sc->ip->i_gdquot) - xrep_force_quotacheck(sc, XFS_DQ_GROUP); + xrep_force_quotacheck(sc, XFS_DQTYPE_GROUP); if (XFS_IS_PQUOTA_ON(sc->mp) && !sc->ip->i_pdquot) - xrep_force_quotacheck(sc, XFS_DQ_PROJ); + xrep_force_quotacheck(sc, XFS_DQTYPE_PROJ); /* fall through */ case -ESRCH: error = 0; diff --git a/fs/xfs/scrub/repair.h b/fs/xfs/scrub/repair.h index 04a47d45605b..fe77de01abe0 100644 --- a/fs/xfs/scrub/repair.h +++ b/fs/xfs/scrub/repair.h @@ -6,6 +6,8 @@ #ifndef __XFS_SCRUB_REPAIR_H__ #define __XFS_SCRUB_REPAIR_H__ +#include "xfs_quota_defs.h" + static inline int xrep_notsupported(struct xfs_scrub *sc) { return -EOPNOTSUPP; @@ -49,7 +51,7 @@ struct xrep_find_ag_btree { int xrep_find_ag_btree_roots(struct xfs_scrub *sc, struct xfs_buf *agf_bp, struct xrep_find_ag_btree *btree_info, struct xfs_buf *agfl_bp); -void xrep_force_quotacheck(struct xfs_scrub *sc, uint dqtype); +void xrep_force_quotacheck(struct xfs_scrub *sc, xfs_dqtype_t type); int xrep_ino_dqattach(struct xfs_scrub *sc); /* Metadata repairers */ diff --git a/fs/xfs/scrub/rtbitmap.c b/fs/xfs/scrub/rtbitmap.c index c642bc206c41..76e4ffe0315b 100644 --- a/fs/xfs/scrub/rtbitmap.c +++ b/fs/xfs/scrub/rtbitmap.c @@ -13,6 +13,7 @@ #include "xfs_trans.h" #include "xfs_rtalloc.h" #include "xfs_inode.h" +#include "xfs_bmap.h" #include "scrub/scrub.h" #include "scrub/common.h" @@ -58,6 +59,41 @@ xchk_rtbitmap_rec( return 0; } +/* Make sure the entire rtbitmap file is mapped with written extents. */ +STATIC int +xchk_rtbitmap_check_extents( + struct xfs_scrub *sc) +{ + struct xfs_mount *mp = sc->mp; + struct xfs_bmbt_irec map; + xfs_rtblock_t off; + int nmap; + int error = 0; + + for (off = 0; off < mp->m_sb.sb_rbmblocks;) { + if (xchk_should_terminate(sc, &error) || + (sc->sm->sm_flags & XFS_SCRUB_OFLAG_CORRUPT)) + break; + + /* Make sure we have a written extent. */ + nmap = 1; + error = xfs_bmapi_read(mp->m_rbmip, off, + mp->m_sb.sb_rbmblocks - off, &map, &nmap, + XFS_DATA_FORK); + if (!xchk_fblock_process_error(sc, XFS_DATA_FORK, off, &error)) + break; + + if (nmap != 1 || !xfs_bmap_is_written_extent(&map)) { + xchk_fblock_set_corrupt(sc, XFS_DATA_FORK, off); + break; + } + + off += map.br_blockcount; + } + + return error; +} + /* Scrub the realtime bitmap. */ int xchk_rtbitmap( @@ -65,11 +101,22 @@ xchk_rtbitmap( { int error; + /* Is the size of the rtbitmap correct? */ + if (sc->mp->m_rbmip->i_d.di_size != + XFS_FSB_TO_B(sc->mp, sc->mp->m_sb.sb_rbmblocks)) { + xchk_ino_set_corrupt(sc, sc->mp->m_rbmip->i_ino); + return 0; + } + /* Invoke the fork scrubber. */ error = xchk_metadata_inode_forks(sc); if (error || (sc->sm->sm_flags & XFS_SCRUB_OFLAG_CORRUPT)) return error; + error = xchk_rtbitmap_check_extents(sc); + if (error || (sc->sm->sm_flags & XFS_SCRUB_OFLAG_CORRUPT)) + return error; + error = xfs_rtalloc_query_all(sc->tp, xchk_rtbitmap_rec, sc); if (!xchk_fblock_process_error(sc, XFS_DATA_FORK, 0, &error)) goto out; diff --git a/fs/xfs/xfs_attr_list.c b/fs/xfs/xfs_attr_list.c index e380bd1a9bfc..50f922cad91a 100644 --- a/fs/xfs/xfs_attr_list.c +++ b/fs/xfs/xfs_attr_list.c @@ -44,7 +44,7 @@ xfs_attr_shortform_compare(const void *a, const void *b) /* * Copy out entries of shortform attribute lists for attr_list(). * Shortform attribute lists are not stored in hashval sorted order. - * If the output buffer is not large enough to hold them all, then we + * If the output buffer is not large enough to hold them all, then * we have to calculate each entries' hashvalue and sort them before * we can begin returning them to the user. */ diff --git a/fs/xfs/xfs_bmap_item.c b/fs/xfs/xfs_bmap_item.c index 6736c5ab188f..ec3691372e7c 100644 --- a/fs/xfs/xfs_bmap_item.c +++ b/fs/xfs/xfs_bmap_item.c @@ -138,7 +138,7 @@ xfs_bui_init( { struct xfs_bui_log_item *buip; - buip = kmem_zone_zalloc(xfs_bui_zone, 0); + buip = kmem_cache_zalloc(xfs_bui_zone, GFP_KERNEL | __GFP_NOFAIL); xfs_log_item_init(mp, &buip->bui_item, XFS_LI_BUI, &xfs_bui_item_ops); buip->bui_format.bui_nextents = XFS_BUI_MAX_FAST_EXTENTS; @@ -215,7 +215,7 @@ xfs_trans_get_bud( { struct xfs_bud_log_item *budp; - budp = kmem_zone_zalloc(xfs_bud_zone, 0); + budp = kmem_cache_zalloc(xfs_bud_zone, GFP_KERNEL | __GFP_NOFAIL); xfs_log_item_init(tp->t_mountp, &budp->bud_item, XFS_LI_BUD, &xfs_bud_item_ops); budp->bud_buip = buip; diff --git a/fs/xfs/xfs_bmap_util.c b/fs/xfs/xfs_bmap_util.c index f37f5cc4b19f..5123f82f2477 100644 --- a/fs/xfs/xfs_bmap_util.c +++ b/fs/xfs/xfs_bmap_util.c @@ -126,7 +126,7 @@ xfs_bmap_rtalloc( * pick an extent that will space things out in the rt area. */ if (ap->eof && ap->offset == 0) { - xfs_rtblock_t uninitialized_var(rtx); /* realtime extent no */ + xfs_rtblock_t rtx; /* realtime extent no */ error = xfs_rtpick_extent(mp, ap->tp, ralen, &rtx); if (error) @@ -1165,7 +1165,7 @@ xfs_insert_file_space( goto out_trans_cancel; do { - error = xfs_trans_roll_inode(&tp, ip); + error = xfs_defer_finish(&tp); if (error) goto out_trans_cancel; @@ -1567,6 +1567,7 @@ xfs_swap_extents( int lock_flags; uint64_t f; int resblks = 0; + unsigned int flags = 0; /* * Lock the inodes against other IO, page faults and truncate to @@ -1630,17 +1631,16 @@ xfs_swap_extents( resblks += XFS_SWAP_RMAP_SPACE_RES(mp, tipnext, w); /* - * Handle the corner case where either inode might straddle the - * btree format boundary. If so, the inode could bounce between - * btree <-> extent format on unmap -> remap cycles, freeing and - * allocating a bmapbt block each time. + * If either inode straddles a bmapbt block allocation boundary, + * the rmapbt algorithm triggers repeated allocs and frees as + * extents are remapped. This can exhaust the block reservation + * prematurely and cause shutdown. Return freed blocks to the + * transaction reservation to counter this behavior. */ - if (ipnext == (XFS_IFORK_MAXEXT(ip, w) + 1)) - resblks += XFS_IFORK_MAXEXT(ip, w); - if (tipnext == (XFS_IFORK_MAXEXT(tip, w) + 1)) - resblks += XFS_IFORK_MAXEXT(tip, w); + flags |= XFS_TRANS_RES_FDBLKS; } - error = xfs_trans_alloc(mp, &M_RES(mp)->tr_write, resblks, 0, 0, &tp); + error = xfs_trans_alloc(mp, &M_RES(mp)->tr_write, resblks, 0, flags, + &tp); if (error) goto out_unlock; diff --git a/fs/xfs/xfs_buf.c b/fs/xfs/xfs_buf.c index 20b748f7e186..d4cdcb6fb2fe 100644 --- a/fs/xfs/xfs_buf.c +++ b/fs/xfs/xfs_buf.c @@ -14,6 +14,9 @@ #include "xfs_mount.h" #include "xfs_trace.h" #include "xfs_log.h" +#include "xfs_log_recover.h" +#include "xfs_trans.h" +#include "xfs_buf_item.h" #include "xfs_errortag.h" #include "xfs_error.h" @@ -211,9 +214,7 @@ _xfs_buf_alloc( int i; *bpp = NULL; - bp = kmem_zone_zalloc(xfs_buf_zone, KM_NOFS); - if (unlikely(!bp)) - return -ENOMEM; + bp = kmem_cache_zalloc(xfs_buf_zone, GFP_NOFS | __GFP_NOFAIL); /* * We don't want certain flags to appear in b_flags unless they are @@ -655,7 +656,6 @@ found: */ if (bp->b_flags & XBF_STALE) { ASSERT((bp->b_flags & _XBF_DELWRI_Q) == 0); - ASSERT(bp->b_iodone == NULL); bp->b_flags &= _XBF_KMEM | _XBF_PAGES; bp->b_ops = NULL; } @@ -1191,10 +1191,13 @@ xfs_buf_ioend( if (!bp->b_error && bp->b_io_error) xfs_buf_ioerror(bp, bp->b_io_error); - /* Only validate buffers that were read without errors */ - if (read && !bp->b_error && bp->b_ops) { - ASSERT(!bp->b_iodone); - bp->b_ops->verify_read(bp); + if (read) { + if (!bp->b_error && bp->b_ops) + bp->b_ops->verify_read(bp); + if (!bp->b_error) + bp->b_flags |= XBF_DONE; + xfs_buf_ioend_finish(bp); + return; } if (!bp->b_error) { @@ -1202,12 +1205,25 @@ xfs_buf_ioend( bp->b_flags |= XBF_DONE; } - if (bp->b_iodone) - (*(bp->b_iodone))(bp); - else if (bp->b_flags & XBF_ASYNC) - xfs_buf_relse(bp); - else - complete(&bp->b_iowait); + /* + * If this is a log recovery buffer, we aren't doing transactional IO + * yet so we need to let it handle IO completions. + */ + if (bp->b_flags & _XBF_LOGRECOVERY) { + xlog_recover_iodone(bp); + return; + } + + if (bp->b_flags & _XBF_INODES) { + xfs_buf_inode_iodone(bp); + return; + } + + if (bp->b_flags & _XBF_DQUOTS) { + xfs_buf_dquot_iodone(bp); + return; + } + xfs_buf_iodone(bp); } static void diff --git a/fs/xfs/xfs_buf.h b/fs/xfs/xfs_buf.h index 050c53b739e2..755b652e695a 100644 --- a/fs/xfs/xfs_buf.h +++ b/fs/xfs/xfs_buf.h @@ -18,6 +18,7 @@ /* * Base types */ +struct xfs_buf; #define XFS_BUF_DADDR_NULL ((xfs_daddr_t) (-1LL)) @@ -30,15 +31,20 @@ #define XBF_STALE (1 << 6) /* buffer has been staled, do not find it */ #define XBF_WRITE_FAIL (1 << 7) /* async writes have failed on this buffer */ -/* flags used only as arguments to access routines */ -#define XBF_TRYLOCK (1 << 16)/* lock requested, but do not wait */ -#define XBF_UNMAPPED (1 << 17)/* do not map the buffer */ +/* buffer type flags for write callbacks */ +#define _XBF_INODES (1 << 16)/* inode buffer */ +#define _XBF_DQUOTS (1 << 17)/* dquot buffer */ +#define _XBF_LOGRECOVERY (1 << 18)/* log recovery buffer */ /* flags used only internally */ #define _XBF_PAGES (1 << 20)/* backed by refcounted pages */ #define _XBF_KMEM (1 << 21)/* backed by heap memory */ #define _XBF_DELWRI_Q (1 << 22)/* buffer on a delwri queue */ +/* flags used only as arguments to access routines */ +#define XBF_TRYLOCK (1 << 30)/* lock requested, but do not wait */ +#define XBF_UNMAPPED (1 << 31)/* do not map the buffer */ + typedef unsigned int xfs_buf_flags_t; #define XFS_BUF_FLAGS \ @@ -50,12 +56,15 @@ typedef unsigned int xfs_buf_flags_t; { XBF_DONE, "DONE" }, \ { XBF_STALE, "STALE" }, \ { XBF_WRITE_FAIL, "WRITE_FAIL" }, \ - { XBF_TRYLOCK, "TRYLOCK" }, /* should never be set */\ - { XBF_UNMAPPED, "UNMAPPED" }, /* ditto */\ + { _XBF_INODES, "INODES" }, \ + { _XBF_DQUOTS, "DQUOTS" }, \ + { _XBF_LOGRECOVERY, "LOG_RECOVERY" }, \ { _XBF_PAGES, "PAGES" }, \ { _XBF_KMEM, "KMEM" }, \ - { _XBF_DELWRI_Q, "DELWRI_Q" } - + { _XBF_DELWRI_Q, "DELWRI_Q" }, \ + /* The following interface flags should never be set */ \ + { XBF_TRYLOCK, "TRYLOCK" }, \ + { XBF_UNMAPPED, "UNMAPPED" } /* * Internal state flags. @@ -94,10 +103,6 @@ typedef struct xfs_buftarg { struct ratelimit_state bt_ioerror_rl; } xfs_buftarg_t; -struct xfs_buf; -typedef void (*xfs_buf_iodone_t)(struct xfs_buf *); - - #define XB_PAGES 2 struct xfs_buf_map { @@ -150,7 +155,6 @@ typedef struct xfs_buf { xfs_buftarg_t *b_target; /* buffer target (device) */ void *b_addr; /* virtual address of buffer */ struct work_struct b_ioend_work; - xfs_buf_iodone_t b_iodone; /* I/O completion function */ struct completion b_iowait; /* queue for I/O waiters */ struct xfs_buf_log_item *b_log_item; struct list_head b_li_list; /* Log items list head */ @@ -257,9 +261,23 @@ extern void xfs_buf_unlock(xfs_buf_t *); #define xfs_buf_islocked(bp) \ ((bp)->b_sema.count <= 0) +static inline void xfs_buf_relse(xfs_buf_t *bp) +{ + xfs_buf_unlock(bp); + xfs_buf_rele(bp); +} + /* Buffer Read and Write Routines */ extern int xfs_bwrite(struct xfs_buf *bp); extern void xfs_buf_ioend(struct xfs_buf *bp); +static inline void xfs_buf_ioend_finish(struct xfs_buf *bp) +{ + if (bp->b_flags & XBF_ASYNC) + xfs_buf_relse(bp); + else + complete(&bp->b_iowait); +} + extern void __xfs_buf_ioerror(struct xfs_buf *bp, int error, xfs_failaddr_t failaddr); #define xfs_buf_ioerror(bp, err) __xfs_buf_ioerror((bp), (err), __this_address) @@ -324,12 +342,6 @@ static inline int xfs_buf_ispinned(struct xfs_buf *bp) return atomic_read(&bp->b_pin_count); } -static inline void xfs_buf_relse(xfs_buf_t *bp) -{ - xfs_buf_unlock(bp); - xfs_buf_rele(bp); -} - static inline int xfs_buf_verify_cksum(struct xfs_buf *bp, unsigned long cksum_offset) { diff --git a/fs/xfs/xfs_buf_item.c b/fs/xfs/xfs_buf_item.c index 9e75e8d6042e..408d1b572d3f 100644 --- a/fs/xfs/xfs_buf_item.c +++ b/fs/xfs/xfs_buf_item.c @@ -12,8 +12,13 @@ #include "xfs_bit.h" #include "xfs_mount.h" #include "xfs_trans.h" -#include "xfs_buf_item.h" #include "xfs_trans_priv.h" +#include "xfs_buf_item.h" +#include "xfs_inode.h" +#include "xfs_inode_item.h" +#include "xfs_quota.h" +#include "xfs_dquot_item.h" +#include "xfs_dquot.h" #include "xfs_trace.h" #include "xfs_log.h" @@ -25,7 +30,7 @@ static inline struct xfs_buf_log_item *BUF_ITEM(struct xfs_log_item *lip) return container_of(lip, struct xfs_buf_log_item, bli_item); } -STATIC void xfs_buf_do_callbacks(struct xfs_buf *bp); +static void xfs_buf_item_done(struct xfs_buf *bp); /* Is this log iovec plausibly large enough to contain the buffer log format? */ bool @@ -122,7 +127,7 @@ xfs_buf_item_size_segment( * stretch of non-contiguous chunks to be logged. Contiguous chunks are logged * in a single iovec. * - * Discontiguous buffers need a format structure per region that that is being + * Discontiguous buffers need a format structure per region that is being * logged. This makes the changes in the buffer appear to log recovery as though * they came from separate buffers, just like would occur if multiple buffers * were used instead of a single discontiguous buffer. This enables @@ -457,10 +462,9 @@ xfs_buf_item_unpin( * the AIL lock. */ if (bip->bli_flags & XFS_BLI_STALE_INODE) { - xfs_buf_do_callbacks(bp); - bp->b_log_item = NULL; - list_del_init(&bp->b_li_list); - bp->b_iodone = NULL; + xfs_buf_item_done(bp); + xfs_iflush_done(bp); + ASSERT(list_empty(&bp->b_li_list)); } else { xfs_trans_ail_delete(lip, SHUTDOWN_LOG_IO_ERROR); xfs_buf_item_relse(bp); @@ -734,7 +738,7 @@ xfs_buf_item_init( return 0; } - bip = kmem_zone_zalloc(xfs_buf_item_zone, 0); + bip = kmem_cache_zalloc(xfs_buf_item_zone, GFP_KERNEL | __GFP_NOFAIL); xfs_log_item_init(mp, &bip->bli_item, XFS_LI_BUF, &xfs_buf_item_ops); bip->bli_buf = bp; @@ -936,11 +940,7 @@ xfs_buf_item_free( } /* - * This is called when the buf log item is no longer needed. It should - * free the buf log item associated with the given buffer and clear - * the buffer's pointer to the buf log item. If there are no more - * items in the list, clear the b_iodone field of the buffer (see - * xfs_buf_attach_iodone() below). + * xfs_buf_item_relse() is called when the buf log item is no longer needed. */ void xfs_buf_item_relse( @@ -952,137 +952,28 @@ xfs_buf_item_relse( ASSERT(!test_bit(XFS_LI_IN_AIL, &bip->bli_item.li_flags)); bp->b_log_item = NULL; - if (list_empty(&bp->b_li_list)) - bp->b_iodone = NULL; - xfs_buf_rele(bp); xfs_buf_item_free(bip); } - /* - * Add the given log item with its callback to the list of callbacks - * to be called when the buffer's I/O completes. If it is not set - * already, set the buffer's b_iodone() routine to be - * xfs_buf_iodone_callbacks() and link the log item into the list of - * items rooted at b_li_list. + * Decide if we're going to retry the write after a failure, and prepare + * the buffer for retrying the write. */ -void -xfs_buf_attach_iodone( - struct xfs_buf *bp, - void (*cb)(struct xfs_buf *, struct xfs_log_item *), - struct xfs_log_item *lip) -{ - ASSERT(xfs_buf_islocked(bp)); - - lip->li_cb = cb; - list_add_tail(&lip->li_bio_list, &bp->b_li_list); - - ASSERT(bp->b_iodone == NULL || - bp->b_iodone == xfs_buf_iodone_callbacks); - bp->b_iodone = xfs_buf_iodone_callbacks; -} - -/* - * We can have many callbacks on a buffer. Running the callbacks individually - * can cause a lot of contention on the AIL lock, so we allow for a single - * callback to be able to scan the remaining items in bp->b_li_list for other - * items of the same type and callback to be processed in the first call. - * - * As a result, the loop walking the callback list below will also modify the - * list. it removes the first item from the list and then runs the callback. - * The loop then restarts from the new first item int the list. This allows the - * callback to scan and modify the list attached to the buffer and we don't - * have to care about maintaining a next item pointer. - */ -STATIC void -xfs_buf_do_callbacks( - struct xfs_buf *bp) -{ - struct xfs_buf_log_item *blip = bp->b_log_item; - struct xfs_log_item *lip; - - /* If there is a buf_log_item attached, run its callback */ - if (blip) { - lip = &blip->bli_item; - lip->li_cb(bp, lip); - } - - while (!list_empty(&bp->b_li_list)) { - lip = list_first_entry(&bp->b_li_list, struct xfs_log_item, - li_bio_list); - - /* - * Remove the item from the list, so we don't have any - * confusion if the item is added to another buf. - * Don't touch the log item after calling its - * callback, because it could have freed itself. - */ - list_del_init(&lip->li_bio_list); - lip->li_cb(bp, lip); - } -} - -/* - * Invoke the error state callback for each log item affected by the failed I/O. - * - * If a metadata buffer write fails with a non-permanent error, the buffer is - * eventually resubmitted and so the completion callbacks are not run. The error - * state may need to be propagated to the log items attached to the buffer, - * however, so the next AIL push of the item knows hot to handle it correctly. - */ -STATIC void -xfs_buf_do_callbacks_fail( - struct xfs_buf *bp) -{ - struct xfs_log_item *lip; - struct xfs_ail *ailp; - - /* - * Buffer log item errors are handled directly by xfs_buf_item_push() - * and xfs_buf_iodone_callback_error, and they have no IO error - * callbacks. Check only for items in b_li_list. - */ - if (list_empty(&bp->b_li_list)) - return; - - lip = list_first_entry(&bp->b_li_list, struct xfs_log_item, - li_bio_list); - ailp = lip->li_ailp; - spin_lock(&ailp->ail_lock); - list_for_each_entry(lip, &bp->b_li_list, li_bio_list) { - if (lip->li_ops->iop_error) - lip->li_ops->iop_error(lip, bp); - } - spin_unlock(&ailp->ail_lock); -} - static bool -xfs_buf_iodone_callback_error( +xfs_buf_ioerror_fail_without_retry( struct xfs_buf *bp) { - struct xfs_buf_log_item *bip = bp->b_log_item; - struct xfs_log_item *lip; - struct xfs_mount *mp; + struct xfs_mount *mp = bp->b_mount; static ulong lasttime; static xfs_buftarg_t *lasttarg; - struct xfs_error_cfg *cfg; - - /* - * The failed buffer might not have a buf_log_item attached or the - * log_item list might be empty. Get the mp from the available - * xfs_log_item - */ - lip = list_first_entry_or_null(&bp->b_li_list, struct xfs_log_item, - li_bio_list); - mp = lip ? lip->li_mountp : bip->bli_item.li_mountp; /* * If we've already decided to shutdown the filesystem because of * I/O errors, there's no point in giving this a retry. */ if (XFS_FORCED_SHUTDOWN(mp)) - goto out_stale; + return true; if (bp->b_target != lasttarg || time_after(jiffies, (lasttime + 5*HZ))) { @@ -1093,129 +984,240 @@ xfs_buf_iodone_callback_error( /* synchronous writes will have callers process the error */ if (!(bp->b_flags & XBF_ASYNC)) - goto out_stale; - - trace_xfs_buf_item_iodone_async(bp, _RET_IP_); - ASSERT(bp->b_iodone != NULL); - - cfg = xfs_error_get_cfg(mp, XFS_ERR_METADATA, bp->b_error); + return true; + return false; +} - /* - * If the write was asynchronous then no one will be looking for the - * error. If this is the first failure of this type, clear the error - * state and write the buffer out again. This means we always retry an - * async write failure at least once, but we also need to set the buffer - * up to behave correctly now for repeated failures. - */ - if (!(bp->b_flags & (XBF_STALE | XBF_WRITE_FAIL)) || - bp->b_last_error != bp->b_error) { - bp->b_flags |= (XBF_WRITE | XBF_DONE | XBF_WRITE_FAIL); - bp->b_last_error = bp->b_error; - if (cfg->retry_timeout != XFS_ERR_RETRY_FOREVER && - !bp->b_first_retry_time) - bp->b_first_retry_time = jiffies; +static bool +xfs_buf_ioerror_retry( + struct xfs_buf *bp, + struct xfs_error_cfg *cfg) +{ + if ((bp->b_flags & (XBF_STALE | XBF_WRITE_FAIL)) && + bp->b_last_error == bp->b_error) + return false; - xfs_buf_ioerror(bp, 0); - xfs_buf_submit(bp); - return true; - } + bp->b_flags |= (XBF_WRITE | XBF_DONE | XBF_WRITE_FAIL); + bp->b_last_error = bp->b_error; + if (cfg->retry_timeout != XFS_ERR_RETRY_FOREVER && + !bp->b_first_retry_time) + bp->b_first_retry_time = jiffies; + return true; +} - /* - * Repeated failure on an async write. Take action according to the - * error configuration we have been set up to use. - */ +/* + * Account for this latest trip around the retry handler, and decide if + * we've failed enough times to constitute a permanent failure. + */ +static bool +xfs_buf_ioerror_permanent( + struct xfs_buf *bp, + struct xfs_error_cfg *cfg) +{ + struct xfs_mount *mp = bp->b_mount; if (cfg->max_retries != XFS_ERR_RETRY_FOREVER && ++bp->b_retries > cfg->max_retries) - goto permanent_error; + return true; if (cfg->retry_timeout != XFS_ERR_RETRY_FOREVER && time_after(jiffies, cfg->retry_timeout + bp->b_first_retry_time)) - goto permanent_error; + return true; /* At unmount we may treat errors differently */ if ((mp->m_flags & XFS_MOUNT_UNMOUNTING) && mp->m_fail_unmount) - goto permanent_error; + return true; - /* - * Still a transient error, run IO completion failure callbacks and let - * the higher layers retry the buffer. - */ - xfs_buf_do_callbacks_fail(bp); - xfs_buf_ioerror(bp, 0); - xfs_buf_relse(bp); - return true; + return false; +} + +/* + * On a sync write or shutdown we just want to stale the buffer and let the + * caller handle the error in bp->b_error appropriately. + * + * If the write was asynchronous then no one will be looking for the error. If + * this is the first failure of this type, clear the error state and write the + * buffer out again. This means we always retry an async write failure at least + * once, but we also need to set the buffer up to behave correctly now for + * repeated failures. + * + * If we get repeated async write failures, then we take action according to the + * error configuration we have been set up to use. + * + * Multi-state return value: + * + * XBF_IOERROR_FINISH: clear IO error retry state and run callback completions + * XBF_IOERROR_DONE: resubmitted immediately, do not run any completions + * XBF_IOERROR_FAIL: transient error, run failure callback completions and then + * release the buffer + */ +enum { + XBF_IOERROR_FINISH, + XBF_IOERROR_DONE, + XBF_IOERROR_FAIL, +}; + +static int +xfs_buf_iodone_error( + struct xfs_buf *bp) +{ + struct xfs_mount *mp = bp->b_mount; + struct xfs_error_cfg *cfg; + + if (xfs_buf_ioerror_fail_without_retry(bp)) + goto out_stale; + + trace_xfs_buf_item_iodone_async(bp, _RET_IP_); + + cfg = xfs_error_get_cfg(mp, XFS_ERR_METADATA, bp->b_error); + if (xfs_buf_ioerror_retry(bp, cfg)) { + xfs_buf_ioerror(bp, 0); + xfs_buf_submit(bp); + return XBF_IOERROR_DONE; + } /* * Permanent error - we need to trigger a shutdown if we haven't already * to indicate that inconsistency will result from this action. */ -permanent_error: - xfs_force_shutdown(mp, SHUTDOWN_META_IO_ERROR); + if (xfs_buf_ioerror_permanent(bp, cfg)) { + xfs_force_shutdown(mp, SHUTDOWN_META_IO_ERROR); + goto out_stale; + } + + /* Still considered a transient error. Caller will schedule retries. */ + return XBF_IOERROR_FAIL; + out_stale: xfs_buf_stale(bp); bp->b_flags |= XBF_DONE; trace_xfs_buf_error_relse(bp, _RET_IP_); - return false; + return XBF_IOERROR_FINISH; } -/* - * This is the iodone() function for buffers which have had callbacks attached - * to them by xfs_buf_attach_iodone(). We need to iterate the items on the - * callback list, mark the buffer as having no more callbacks and then push the - * buffer through IO completion processing. - */ -void -xfs_buf_iodone_callbacks( +static void +xfs_buf_item_done( struct xfs_buf *bp) { - /* - * If there is an error, process it. Some errors require us - * to run callbacks after failure processing is done so we - * detect that and take appropriate action. - */ - if (bp->b_error && xfs_buf_iodone_callback_error(bp)) + struct xfs_buf_log_item *bip = bp->b_log_item; + + if (!bip) return; /* - * Successful IO or permanent error. Either way, we can clear the - * retry state here in preparation for the next error that may occur. + * If we are forcibly shutting down, this may well be off the AIL + * already. That's because we simulate the log-committed callbacks to + * unpin these buffers. Or we may never have put this item on AIL + * because of the transaction was aborted forcibly. + * xfs_trans_ail_delete() takes care of these. + * + * Either way, AIL is useless if we're forcing a shutdown. */ + xfs_trans_ail_delete(&bip->bli_item, SHUTDOWN_CORRUPT_INCORE); + bp->b_log_item = NULL; + xfs_buf_item_free(bip); + xfs_buf_rele(bp); +} + +static inline void +xfs_buf_clear_ioerror_retry_state( + struct xfs_buf *bp) +{ bp->b_last_error = 0; bp->b_retries = 0; bp->b_first_retry_time = 0; +} - xfs_buf_do_callbacks(bp); - bp->b_log_item = NULL; - list_del_init(&bp->b_li_list); - bp->b_iodone = NULL; - xfs_buf_ioend(bp); +/* + * Inode buffer iodone callback function. + */ +void +xfs_buf_inode_iodone( + struct xfs_buf *bp) +{ + if (bp->b_error) { + struct xfs_log_item *lip; + int ret = xfs_buf_iodone_error(bp); + + if (ret == XBF_IOERROR_FINISH) + goto finish_iodone; + if (ret == XBF_IOERROR_DONE) + return; + ASSERT(ret == XBF_IOERROR_FAIL); + list_for_each_entry(lip, &bp->b_li_list, li_bio_list) { + set_bit(XFS_LI_FAILED, &lip->li_flags); + } + xfs_buf_ioerror(bp, 0); + xfs_buf_relse(bp); + return; + } + +finish_iodone: + xfs_buf_clear_ioerror_retry_state(bp); + xfs_buf_item_done(bp); + xfs_iflush_done(bp); + xfs_buf_ioend_finish(bp); } /* - * This is the iodone() function for buffers which have been - * logged. It is called when they are eventually flushed out. - * It should remove the buf item from the AIL, and free the buf item. - * It is called by xfs_buf_iodone_callbacks() above which will take - * care of cleaning up the buffer itself. + * Dquot buffer iodone callback function. */ void -xfs_buf_iodone( - struct xfs_buf *bp, - struct xfs_log_item *lip) +xfs_buf_dquot_iodone( + struct xfs_buf *bp) { - ASSERT(BUF_ITEM(lip)->bli_buf == bp); + if (bp->b_error) { + struct xfs_log_item *lip; + int ret = xfs_buf_iodone_error(bp); + + if (ret == XBF_IOERROR_FINISH) + goto finish_iodone; + if (ret == XBF_IOERROR_DONE) + return; + ASSERT(ret == XBF_IOERROR_FAIL); + spin_lock(&bp->b_mount->m_ail->ail_lock); + list_for_each_entry(lip, &bp->b_li_list, li_bio_list) { + xfs_set_li_failed(lip, bp); + } + spin_unlock(&bp->b_mount->m_ail->ail_lock); + xfs_buf_ioerror(bp, 0); + xfs_buf_relse(bp); + return; + } - xfs_buf_rele(bp); +finish_iodone: + xfs_buf_clear_ioerror_retry_state(bp); + /* a newly allocated dquot buffer might have a log item attached */ + xfs_buf_item_done(bp); + xfs_dquot_done(bp); + xfs_buf_ioend_finish(bp); +} - /* - * If we are forcibly shutting down, this may well be off the AIL - * already. That's because we simulate the log-committed callbacks to - * unpin these buffers. Or we may never have put this item on AIL - * because of the transaction was aborted forcibly. - * xfs_trans_ail_delete() takes care of these. - * - * Either way, AIL is useless if we're forcing a shutdown. - */ - xfs_trans_ail_delete(lip, SHUTDOWN_CORRUPT_INCORE); - xfs_buf_item_free(BUF_ITEM(lip)); +/* + * Dirty buffer iodone callback function. + * + * Note that for things like remote attribute buffers, there may not be a buffer + * log item here, so processing the buffer log item must remain be optional. + */ +void +xfs_buf_iodone( + struct xfs_buf *bp) +{ + if (bp->b_error) { + int ret = xfs_buf_iodone_error(bp); + + if (ret == XBF_IOERROR_FINISH) + goto finish_iodone; + if (ret == XBF_IOERROR_DONE) + return; + ASSERT(ret == XBF_IOERROR_FAIL); + ASSERT(list_empty(&bp->b_li_list)); + xfs_buf_ioerror(bp, 0); + xfs_buf_relse(bp); + return; + } + +finish_iodone: + xfs_buf_clear_ioerror_retry_state(bp); + xfs_buf_item_done(bp); + xfs_buf_ioend_finish(bp); } diff --git a/fs/xfs/xfs_buf_item.h b/fs/xfs/xfs_buf_item.h index c9c57e2da932..23507cbb4c41 100644 --- a/fs/xfs/xfs_buf_item.h +++ b/fs/xfs/xfs_buf_item.h @@ -54,11 +54,9 @@ void xfs_buf_item_relse(struct xfs_buf *); bool xfs_buf_item_put(struct xfs_buf_log_item *); void xfs_buf_item_log(struct xfs_buf_log_item *, uint, uint); bool xfs_buf_item_dirty_format(struct xfs_buf_log_item *); -void xfs_buf_attach_iodone(struct xfs_buf *, - void(*)(struct xfs_buf *, struct xfs_log_item *), - struct xfs_log_item *); -void xfs_buf_iodone_callbacks(struct xfs_buf *); -void xfs_buf_iodone(struct xfs_buf *, struct xfs_log_item *); +void xfs_buf_inode_iodone(struct xfs_buf *); +void xfs_buf_dquot_iodone(struct xfs_buf *); +void xfs_buf_iodone(struct xfs_buf *); bool xfs_buf_log_check_iovec(struct xfs_log_iovec *iovec); extern kmem_zone_t *xfs_buf_item_zone; diff --git a/fs/xfs/xfs_buf_item_recover.c b/fs/xfs/xfs_buf_item_recover.c index 04faa7310c4f..8f0457d67d77 100644 --- a/fs/xfs/xfs_buf_item_recover.c +++ b/fs/xfs/xfs_buf_item_recover.c @@ -419,8 +419,7 @@ xlog_recover_validate_buf_type( if (bp->b_ops) { struct xfs_buf_log_item *bip; - ASSERT(!bp->b_iodone || bp->b_iodone == xlog_recover_iodone); - bp->b_iodone = xlog_recover_iodone; + bp->b_flags |= _XBF_LOGRECOVERY; xfs_buf_item_init(bp, mp); bip = bp->b_log_item; bip->bli_item.li_lsn = current_lsn; @@ -494,8 +493,7 @@ xlog_recover_do_reg_buffer( item->ri_buf[i].i_len, __func__); goto next; } - fa = xfs_dquot_verify(mp, item->ri_buf[i].i_addr, - -1, 0); + fa = xfs_dquot_verify(mp, item->ri_buf[i].i_addr, -1); if (fa) { xfs_alert(mp, "dquot corrupt at %pS trying to replay into block 0x%llx", @@ -548,11 +546,11 @@ xlog_recover_do_dquot_buffer( type = 0; if (buf_f->blf_flags & XFS_BLF_UDQUOT_BUF) - type |= XFS_DQ_USER; + type |= XFS_DQTYPE_USER; if (buf_f->blf_flags & XFS_BLF_PDQUOT_BUF) - type |= XFS_DQ_PROJ; + type |= XFS_DQTYPE_PROJ; if (buf_f->blf_flags & XFS_BLF_GDQUOT_BUF) - type |= XFS_DQ_GROUP; + type |= XFS_DQTYPE_GROUP; /* * This type of quotas was turned off, so ignore this buffer */ @@ -950,7 +948,7 @@ xlog_recover_buf_commit_pass2( * or inode_cluster_size bytes, whichever is bigger. The inode * buffers in the log can be a different size if the log was generated * by an older kernel using unclustered inode buffers or a newer kernel - * running with a different inode cluster size. Regardless, if the + * running with a different inode cluster size. Regardless, if * the inode buffer size isn't max(blocksize, inode_cluster_size) * for *our* value of inode_cluster_size, then we need to keep * the buffer out of the buffer cache so that the buffer won't @@ -963,7 +961,7 @@ xlog_recover_buf_commit_pass2( error = xfs_bwrite(bp); } else { ASSERT(bp->b_mount == mp); - bp->b_iodone = xlog_recover_iodone; + bp->b_flags |= _XBF_LOGRECOVERY; xfs_buf_delwri_queue(bp, buffer_list); } diff --git a/fs/xfs/xfs_dquot.c b/fs/xfs/xfs_dquot.c index d5b7f03e93c8..bcd73b9c2994 100644 --- a/fs/xfs/xfs_dquot.c +++ b/fs/xfs/xfs_dquot.c @@ -23,6 +23,7 @@ #include "xfs_trace.h" #include "xfs_log.h" #include "xfs_bmap_btree.h" +#include "xfs_error.h" /* * Lock order: @@ -66,39 +67,61 @@ xfs_qm_dqdestroy( */ void xfs_qm_adjust_dqlimits( - struct xfs_mount *mp, struct xfs_dquot *dq) { + struct xfs_mount *mp = dq->q_mount; struct xfs_quotainfo *q = mp->m_quotainfo; - struct xfs_disk_dquot *d = &dq->q_core; struct xfs_def_quota *defq; int prealloc = 0; - ASSERT(d->d_id); + ASSERT(dq->q_id); defq = xfs_get_defquota(q, xfs_dquot_type(dq)); - if (defq->bsoftlimit && !d->d_blk_softlimit) { - d->d_blk_softlimit = cpu_to_be64(defq->bsoftlimit); + if (!dq->q_blk.softlimit) { + dq->q_blk.softlimit = defq->blk.soft; prealloc = 1; } - if (defq->bhardlimit && !d->d_blk_hardlimit) { - d->d_blk_hardlimit = cpu_to_be64(defq->bhardlimit); + if (!dq->q_blk.hardlimit) { + dq->q_blk.hardlimit = defq->blk.hard; prealloc = 1; } - if (defq->isoftlimit && !d->d_ino_softlimit) - d->d_ino_softlimit = cpu_to_be64(defq->isoftlimit); - if (defq->ihardlimit && !d->d_ino_hardlimit) - d->d_ino_hardlimit = cpu_to_be64(defq->ihardlimit); - if (defq->rtbsoftlimit && !d->d_rtb_softlimit) - d->d_rtb_softlimit = cpu_to_be64(defq->rtbsoftlimit); - if (defq->rtbhardlimit && !d->d_rtb_hardlimit) - d->d_rtb_hardlimit = cpu_to_be64(defq->rtbhardlimit); + if (!dq->q_ino.softlimit) + dq->q_ino.softlimit = defq->ino.soft; + if (!dq->q_ino.hardlimit) + dq->q_ino.hardlimit = defq->ino.hard; + if (!dq->q_rtb.softlimit) + dq->q_rtb.softlimit = defq->rtb.soft; + if (!dq->q_rtb.hardlimit) + dq->q_rtb.hardlimit = defq->rtb.hard; if (prealloc) xfs_dquot_set_prealloc_limits(dq); } /* + * Determine if this quota counter is over either limit and set the quota + * timers as appropriate. + */ +static inline void +xfs_qm_adjust_res_timer( + struct xfs_dquot_res *res, + struct xfs_quota_limits *qlim) +{ + ASSERT(res->hardlimit == 0 || res->softlimit <= res->hardlimit); + + if ((res->softlimit && res->count > res->softlimit) || + (res->hardlimit && res->count > res->hardlimit)) { + if (res->timer == 0) + res->timer = ktime_get_real_seconds() + qlim->time; + } else { + if (res->timer == 0) + res->warnings = 0; + else + res->timer = 0; + } +} + +/* * Check the limits and timers of a dquot and start or reset timers * if necessary. * This gets called even when quota enforcement is OFF, which makes our @@ -113,96 +136,18 @@ xfs_qm_adjust_dqlimits( */ void xfs_qm_adjust_dqtimers( - struct xfs_mount *mp, struct xfs_dquot *dq) { + struct xfs_mount *mp = dq->q_mount; struct xfs_quotainfo *qi = mp->m_quotainfo; - struct xfs_disk_dquot *d = &dq->q_core; struct xfs_def_quota *defq; - ASSERT(d->d_id); + ASSERT(dq->q_id); defq = xfs_get_defquota(qi, xfs_dquot_type(dq)); -#ifdef DEBUG - if (d->d_blk_hardlimit) - ASSERT(be64_to_cpu(d->d_blk_softlimit) <= - be64_to_cpu(d->d_blk_hardlimit)); - if (d->d_ino_hardlimit) - ASSERT(be64_to_cpu(d->d_ino_softlimit) <= - be64_to_cpu(d->d_ino_hardlimit)); - if (d->d_rtb_hardlimit) - ASSERT(be64_to_cpu(d->d_rtb_softlimit) <= - be64_to_cpu(d->d_rtb_hardlimit)); -#endif - - if (!d->d_btimer) { - if ((d->d_blk_softlimit && - (be64_to_cpu(d->d_bcount) > - be64_to_cpu(d->d_blk_softlimit))) || - (d->d_blk_hardlimit && - (be64_to_cpu(d->d_bcount) > - be64_to_cpu(d->d_blk_hardlimit)))) { - d->d_btimer = cpu_to_be32(ktime_get_real_seconds() + - defq->btimelimit); - } else { - d->d_bwarns = 0; - } - } else { - if ((!d->d_blk_softlimit || - (be64_to_cpu(d->d_bcount) <= - be64_to_cpu(d->d_blk_softlimit))) && - (!d->d_blk_hardlimit || - (be64_to_cpu(d->d_bcount) <= - be64_to_cpu(d->d_blk_hardlimit)))) { - d->d_btimer = 0; - } - } - - if (!d->d_itimer) { - if ((d->d_ino_softlimit && - (be64_to_cpu(d->d_icount) > - be64_to_cpu(d->d_ino_softlimit))) || - (d->d_ino_hardlimit && - (be64_to_cpu(d->d_icount) > - be64_to_cpu(d->d_ino_hardlimit)))) { - d->d_itimer = cpu_to_be32(ktime_get_real_seconds() + - defq->itimelimit); - } else { - d->d_iwarns = 0; - } - } else { - if ((!d->d_ino_softlimit || - (be64_to_cpu(d->d_icount) <= - be64_to_cpu(d->d_ino_softlimit))) && - (!d->d_ino_hardlimit || - (be64_to_cpu(d->d_icount) <= - be64_to_cpu(d->d_ino_hardlimit)))) { - d->d_itimer = 0; - } - } - - if (!d->d_rtbtimer) { - if ((d->d_rtb_softlimit && - (be64_to_cpu(d->d_rtbcount) > - be64_to_cpu(d->d_rtb_softlimit))) || - (d->d_rtb_hardlimit && - (be64_to_cpu(d->d_rtbcount) > - be64_to_cpu(d->d_rtb_hardlimit)))) { - d->d_rtbtimer = cpu_to_be32(ktime_get_real_seconds() + - defq->rtbtimelimit); - } else { - d->d_rtbwarns = 0; - } - } else { - if ((!d->d_rtb_softlimit || - (be64_to_cpu(d->d_rtbcount) <= - be64_to_cpu(d->d_rtb_softlimit))) && - (!d->d_rtb_hardlimit || - (be64_to_cpu(d->d_rtbcount) <= - be64_to_cpu(d->d_rtb_hardlimit)))) { - d->d_rtbtimer = 0; - } - } + xfs_qm_adjust_res_timer(&dq->q_blk, &defq->blk); + xfs_qm_adjust_res_timer(&dq->q_ino, &defq->ino); + xfs_qm_adjust_res_timer(&dq->q_rtb, &defq->rtb); } /* @@ -213,7 +158,7 @@ xfs_qm_init_dquot_blk( struct xfs_trans *tp, struct xfs_mount *mp, xfs_dqid_t id, - uint type, + xfs_dqtype_t type, struct xfs_buf *bp) { struct xfs_quotainfo *q = mp->m_quotainfo; @@ -226,6 +171,24 @@ xfs_qm_init_dquot_blk( ASSERT(tp); ASSERT(xfs_buf_islocked(bp)); + switch (type) { + case XFS_DQTYPE_USER: + qflag = XFS_UQUOTA_CHKD; + blftype = XFS_BLF_UDQUOT_BUF; + break; + case XFS_DQTYPE_PROJ: + qflag = XFS_PQUOTA_CHKD; + blftype = XFS_BLF_PDQUOT_BUF; + break; + case XFS_DQTYPE_GROUP: + qflag = XFS_GQUOTA_CHKD; + blftype = XFS_BLF_GDQUOT_BUF; + break; + default: + ASSERT(0); + return; + } + d = bp->b_addr; /* @@ -237,7 +200,7 @@ xfs_qm_init_dquot_blk( d->dd_diskdq.d_magic = cpu_to_be16(XFS_DQUOT_MAGIC); d->dd_diskdq.d_version = XFS_DQUOT_VERSION; d->dd_diskdq.d_id = cpu_to_be32(curid); - d->dd_diskdq.d_flags = type; + d->dd_diskdq.d_type = type; if (xfs_sb_version_hascrc(&mp->m_sb)) { uuid_copy(&d->dd_uuid, &mp->m_sb.sb_meta_uuid); xfs_update_cksum((char *)d, sizeof(struct xfs_dqblk), @@ -245,17 +208,6 @@ xfs_qm_init_dquot_blk( } } - if (type & XFS_DQ_USER) { - qflag = XFS_UQUOTA_CHKD; - blftype = XFS_BLF_UDQUOT_BUF; - } else if (type & XFS_DQ_PROJ) { - qflag = XFS_PQUOTA_CHKD; - blftype = XFS_BLF_PDQUOT_BUF; - } else { - qflag = XFS_GQUOTA_CHKD; - blftype = XFS_BLF_GDQUOT_BUF; - } - xfs_trans_dquot_buf(tp, bp, blftype); /* @@ -290,8 +242,8 @@ xfs_dquot_set_prealloc_limits(struct xfs_dquot *dqp) { uint64_t space; - dqp->q_prealloc_hi_wmark = be64_to_cpu(dqp->q_core.d_blk_hardlimit); - dqp->q_prealloc_lo_wmark = be64_to_cpu(dqp->q_core.d_blk_softlimit); + dqp->q_prealloc_hi_wmark = dqp->q_blk.hardlimit; + dqp->q_prealloc_lo_wmark = dqp->q_blk.softlimit; if (!dqp->q_prealloc_lo_wmark) { dqp->q_prealloc_lo_wmark = dqp->q_prealloc_hi_wmark; do_div(dqp->q_prealloc_lo_wmark, 100); @@ -321,14 +273,15 @@ xfs_dquot_disk_alloc( struct xfs_trans *tp = *tpp; struct xfs_mount *mp = tp->t_mountp; struct xfs_buf *bp; - struct xfs_inode *quotip = xfs_quota_inode(mp, dqp->dq_flags); + xfs_dqtype_t qtype = xfs_dquot_type(dqp); + struct xfs_inode *quotip = xfs_quota_inode(mp, qtype); int nmaps = 1; int error; trace_xfs_dqalloc(dqp); xfs_ilock(quotip, XFS_ILOCK_EXCL); - if (!xfs_this_quota_on(dqp->q_mount, dqp->dq_flags)) { + if (!xfs_this_quota_on(dqp->q_mount, qtype)) { /* * Return if this type of quotas is turned off while we didn't * have an inode lock @@ -365,8 +318,7 @@ xfs_dquot_disk_alloc( * Make a chunk of dquots out of this buffer and log * the entire thing. */ - xfs_qm_init_dquot_blk(tp, mp, be32_to_cpu(dqp->q_core.d_id), - dqp->dq_flags & XFS_DQ_ALLTYPES, bp); + xfs_qm_init_dquot_blk(tp, mp, dqp->q_id, qtype, bp); xfs_buf_set_ref(bp, XFS_DQUOT_REF); /* @@ -413,13 +365,14 @@ xfs_dquot_disk_read( { struct xfs_bmbt_irec map; struct xfs_buf *bp; - struct xfs_inode *quotip = xfs_quota_inode(mp, dqp->dq_flags); + xfs_dqtype_t qtype = xfs_dquot_type(dqp); + struct xfs_inode *quotip = xfs_quota_inode(mp, qtype); uint lock_mode; int nmaps = 1; int error; lock_mode = xfs_ilock_data_map_shared(quotip); - if (!xfs_this_quota_on(mp, dqp->dq_flags)) { + if (!xfs_this_quota_on(mp, qtype)) { /* * Return if this type of quotas is turned off while we * didn't have the quota inode lock. @@ -471,14 +424,14 @@ STATIC struct xfs_dquot * xfs_dquot_alloc( struct xfs_mount *mp, xfs_dqid_t id, - uint type) + xfs_dqtype_t type) { struct xfs_dquot *dqp; - dqp = kmem_zone_zalloc(xfs_qm_dqzone, 0); + dqp = kmem_cache_zalloc(xfs_qm_dqzone, GFP_KERNEL | __GFP_NOFAIL); - dqp->dq_flags = type; - dqp->q_core.d_id = cpu_to_be32(id); + dqp->q_type = type; + dqp->q_id = id; dqp->q_mount = mp; INIT_LIST_HEAD(&dqp->q_lru); mutex_init(&dqp->q_qlock); @@ -503,13 +456,13 @@ xfs_dquot_alloc( * quotas. */ switch (type) { - case XFS_DQ_USER: + case XFS_DQTYPE_USER: /* uses the default lock class */ break; - case XFS_DQ_GROUP: + case XFS_DQTYPE_GROUP: lockdep_set_class(&dqp->q_qlock, &xfs_dquot_group_class); break; - case XFS_DQ_PROJ: + case XFS_DQTYPE_PROJ: lockdep_set_class(&dqp->q_qlock, &xfs_dquot_project_class); break; default: @@ -524,26 +477,91 @@ xfs_dquot_alloc( } /* Copy the in-core quota fields in from the on-disk buffer. */ -STATIC void +STATIC int xfs_dquot_from_disk( struct xfs_dquot *dqp, struct xfs_buf *bp) { struct xfs_disk_dquot *ddqp = bp->b_addr + dqp->q_bufoffset; + /* + * Ensure that we got the type and ID we were looking for. + * Everything else was checked by the dquot buffer verifier. + */ + if ((ddqp->d_type & XFS_DQTYPE_REC_MASK) != xfs_dquot_type(dqp) || + be32_to_cpu(ddqp->d_id) != dqp->q_id) { + xfs_alert_tag(bp->b_mount, XFS_PTAG_VERIFIER_ERROR, + "Metadata corruption detected at %pS, quota %u", + __this_address, dqp->q_id); + xfs_alert(bp->b_mount, "Unmount and run xfs_repair"); + return -EFSCORRUPTED; + } + /* copy everything from disk dquot to the incore dquot */ - memcpy(&dqp->q_core, ddqp, sizeof(struct xfs_disk_dquot)); + dqp->q_type = ddqp->d_type; + dqp->q_blk.hardlimit = be64_to_cpu(ddqp->d_blk_hardlimit); + dqp->q_blk.softlimit = be64_to_cpu(ddqp->d_blk_softlimit); + dqp->q_ino.hardlimit = be64_to_cpu(ddqp->d_ino_hardlimit); + dqp->q_ino.softlimit = be64_to_cpu(ddqp->d_ino_softlimit); + dqp->q_rtb.hardlimit = be64_to_cpu(ddqp->d_rtb_hardlimit); + dqp->q_rtb.softlimit = be64_to_cpu(ddqp->d_rtb_softlimit); + + dqp->q_blk.count = be64_to_cpu(ddqp->d_bcount); + dqp->q_ino.count = be64_to_cpu(ddqp->d_icount); + dqp->q_rtb.count = be64_to_cpu(ddqp->d_rtbcount); + + dqp->q_blk.warnings = be16_to_cpu(ddqp->d_bwarns); + dqp->q_ino.warnings = be16_to_cpu(ddqp->d_iwarns); + dqp->q_rtb.warnings = be16_to_cpu(ddqp->d_rtbwarns); + + dqp->q_blk.timer = be32_to_cpu(ddqp->d_btimer); + dqp->q_ino.timer = be32_to_cpu(ddqp->d_itimer); + dqp->q_rtb.timer = be32_to_cpu(ddqp->d_rtbtimer); /* * Reservation counters are defined as reservation plus current usage * to avoid having to add every time. */ - dqp->q_res_bcount = be64_to_cpu(ddqp->d_bcount); - dqp->q_res_icount = be64_to_cpu(ddqp->d_icount); - dqp->q_res_rtbcount = be64_to_cpu(ddqp->d_rtbcount); + dqp->q_blk.reserved = dqp->q_blk.count; + dqp->q_ino.reserved = dqp->q_ino.count; + dqp->q_rtb.reserved = dqp->q_rtb.count; /* initialize the dquot speculative prealloc thresholds */ xfs_dquot_set_prealloc_limits(dqp); + return 0; +} + +/* Copy the in-core quota fields into the on-disk buffer. */ +void +xfs_dquot_to_disk( + struct xfs_disk_dquot *ddqp, + struct xfs_dquot *dqp) +{ + ddqp->d_magic = cpu_to_be16(XFS_DQUOT_MAGIC); + ddqp->d_version = XFS_DQUOT_VERSION; + ddqp->d_type = dqp->q_type; + ddqp->d_id = cpu_to_be32(dqp->q_id); + ddqp->d_pad0 = 0; + ddqp->d_pad = 0; + + ddqp->d_blk_hardlimit = cpu_to_be64(dqp->q_blk.hardlimit); + ddqp->d_blk_softlimit = cpu_to_be64(dqp->q_blk.softlimit); + ddqp->d_ino_hardlimit = cpu_to_be64(dqp->q_ino.hardlimit); + ddqp->d_ino_softlimit = cpu_to_be64(dqp->q_ino.softlimit); + ddqp->d_rtb_hardlimit = cpu_to_be64(dqp->q_rtb.hardlimit); + ddqp->d_rtb_softlimit = cpu_to_be64(dqp->q_rtb.softlimit); + + ddqp->d_bcount = cpu_to_be64(dqp->q_blk.count); + ddqp->d_icount = cpu_to_be64(dqp->q_ino.count); + ddqp->d_rtbcount = cpu_to_be64(dqp->q_rtb.count); + + ddqp->d_bwarns = cpu_to_be16(dqp->q_blk.warnings); + ddqp->d_iwarns = cpu_to_be16(dqp->q_ino.warnings); + ddqp->d_rtbwarns = cpu_to_be16(dqp->q_rtb.warnings); + + ddqp->d_btimer = cpu_to_be32(dqp->q_blk.timer); + ddqp->d_itimer = cpu_to_be32(dqp->q_ino.timer); + ddqp->d_rtbtimer = cpu_to_be32(dqp->q_rtb.timer); } /* Allocate and initialize the dquot buffer for this in-core dquot. */ @@ -592,7 +610,7 @@ static int xfs_qm_dqread( struct xfs_mount *mp, xfs_dqid_t id, - uint type, + xfs_dqtype_t type, bool can_alloc, struct xfs_dquot **dqpp) { @@ -617,9 +635,11 @@ xfs_qm_dqread( * further. */ ASSERT(xfs_buf_islocked(bp)); - xfs_dquot_from_disk(dqp, bp); - + error = xfs_dquot_from_disk(dqp, bp); xfs_buf_relse(bp); + if (error) + goto err; + *dqpp = dqp; return error; @@ -638,7 +658,7 @@ err: static int xfs_dq_get_next_id( struct xfs_mount *mp, - uint type, + xfs_dqtype_t type, xfs_dqid_t *id) { struct xfs_inode *quotip = xfs_quota_inode(mp, type); @@ -706,7 +726,7 @@ restart: } xfs_dqlock(dqp); - if (dqp->dq_flags & XFS_DQ_FREEING) { + if (dqp->q_flags & XFS_DQFLAG_FREEING) { xfs_dqunlock(dqp); mutex_unlock(&qi->qi_tree_lock); trace_xfs_dqget_freeing(dqp); @@ -762,21 +782,21 @@ xfs_qm_dqget_cache_insert( static int xfs_qm_dqget_checks( struct xfs_mount *mp, - uint type) + xfs_dqtype_t type) { if (WARN_ON_ONCE(!XFS_IS_QUOTA_RUNNING(mp))) return -ESRCH; switch (type) { - case XFS_DQ_USER: + case XFS_DQTYPE_USER: if (!XFS_IS_UQUOTA_ON(mp)) return -ESRCH; return 0; - case XFS_DQ_GROUP: + case XFS_DQTYPE_GROUP: if (!XFS_IS_GQUOTA_ON(mp)) return -ESRCH; return 0; - case XFS_DQ_PROJ: + case XFS_DQTYPE_PROJ: if (!XFS_IS_PQUOTA_ON(mp)) return -ESRCH; return 0; @@ -787,14 +807,14 @@ xfs_qm_dqget_checks( } /* - * Given the file system, id, and type (UDQUOT/GDQUOT), return a a locked + * Given the file system, id, and type (UDQUOT/GDQUOT), return a locked * dquot, doing an allocation (if requested) as needed. */ int xfs_qm_dqget( struct xfs_mount *mp, xfs_dqid_t id, - uint type, + xfs_dqtype_t type, bool can_alloc, struct xfs_dquot **O_dqpp) { @@ -844,7 +864,7 @@ int xfs_qm_dqget_uncached( struct xfs_mount *mp, xfs_dqid_t id, - uint type, + xfs_dqtype_t type, struct xfs_dquot **dqpp) { int error; @@ -860,14 +880,14 @@ xfs_qm_dqget_uncached( xfs_dqid_t xfs_qm_id_for_quotatype( struct xfs_inode *ip, - uint type) + xfs_dqtype_t type) { switch (type) { - case XFS_DQ_USER: + case XFS_DQTYPE_USER: return i_uid_read(VFS_I(ip)); - case XFS_DQ_GROUP: + case XFS_DQTYPE_GROUP: return i_gid_read(VFS_I(ip)); - case XFS_DQ_PROJ: + case XFS_DQTYPE_PROJ: return ip->i_d.di_projid; } ASSERT(0); @@ -882,7 +902,7 @@ xfs_qm_id_for_quotatype( int xfs_qm_dqget_inode( struct xfs_inode *ip, - uint type, + xfs_dqtype_t type, bool can_alloc, struct xfs_dquot **O_dqpp) { @@ -968,7 +988,7 @@ int xfs_qm_dqget_next( struct xfs_mount *mp, xfs_dqid_t id, - uint type, + xfs_dqtype_t type, struct xfs_dquot **dqpp) { struct xfs_dquot *dqp; @@ -1048,9 +1068,8 @@ xfs_qm_dqrele( * from the AIL if it has not been re-logged, and unlocking the dquot's * flush lock. This behavior is very similar to that of inodes.. */ -STATIC void +static void xfs_qm_dqflush_done( - struct xfs_buf *bp, struct xfs_log_item *lip) { struct xfs_dq_logitem *qip = (struct xfs_dq_logitem *)lip; @@ -1071,16 +1090,12 @@ xfs_qm_dqflush_done( test_bit(XFS_LI_FAILED, &lip->li_flags))) { spin_lock(&ailp->ail_lock); + xfs_clear_li_failed(lip); if (lip->li_lsn == qip->qli_flush_lsn) { /* xfs_ail_update_finish() drops the AIL lock */ tail_lsn = xfs_ail_delete_one(ailp, lip); xfs_ail_update_finish(ailp, tail_lsn); } else { - /* - * Clear the failed state since we are about to drop the - * flush lock - */ - xfs_clear_li_failed(lip); spin_unlock(&ailp->ail_lock); } } @@ -1091,6 +1106,48 @@ xfs_qm_dqflush_done( xfs_dqfunlock(dqp); } +void +xfs_dquot_done( + struct xfs_buf *bp) +{ + struct xfs_log_item *lip, *n; + + list_for_each_entry_safe(lip, n, &bp->b_li_list, li_bio_list) { + list_del_init(&lip->li_bio_list); + xfs_qm_dqflush_done(lip); + } +} + +/* Check incore dquot for errors before we flush. */ +static xfs_failaddr_t +xfs_qm_dqflush_check( + struct xfs_dquot *dqp) +{ + xfs_dqtype_t type = xfs_dquot_type(dqp); + + if (type != XFS_DQTYPE_USER && + type != XFS_DQTYPE_GROUP && + type != XFS_DQTYPE_PROJ) + return __this_address; + + if (dqp->q_id == 0) + return NULL; + + if (dqp->q_blk.softlimit && dqp->q_blk.count > dqp->q_blk.softlimit && + !dqp->q_blk.timer) + return __this_address; + + if (dqp->q_ino.softlimit && dqp->q_ino.count > dqp->q_ino.softlimit && + !dqp->q_ino.timer) + return __this_address; + + if (dqp->q_rtb.softlimit && dqp->q_rtb.count > dqp->q_rtb.softlimit && + !dqp->q_rtb.timer) + return __this_address; + + return NULL; +} + /* * Write a modified dquot to disk. * The dquot must be locked and the flush lock too taken by caller. @@ -1107,8 +1164,7 @@ xfs_qm_dqflush( struct xfs_mount *mp = dqp->q_mount; struct xfs_log_item *lip = &dqp->q_logitem.qli_item; struct xfs_buf *bp; - struct xfs_dqblk *dqb; - struct xfs_disk_dquot *ddqp; + struct xfs_dqblk *dqblk; xfs_failaddr_t fa; int error; @@ -1132,30 +1188,23 @@ xfs_qm_dqflush( if (error) goto out_abort; - /* - * Calculate the location of the dquot inside the buffer. - */ - dqb = bp->b_addr + dqp->q_bufoffset; - ddqp = &dqb->dd_diskdq; - - /* sanity check the in-core structure before we flush */ - fa = xfs_dquot_verify(mp, &dqp->q_core, be32_to_cpu(dqp->q_core.d_id), - 0); + fa = xfs_qm_dqflush_check(dqp); if (fa) { xfs_alert(mp, "corrupt dquot ID 0x%x in memory at %pS", - be32_to_cpu(dqp->q_core.d_id), fa); + dqp->q_id, fa); xfs_buf_relse(bp); error = -EFSCORRUPTED; goto out_abort; } - /* This is the only portion of data that needs to persist */ - memcpy(ddqp, &dqp->q_core, sizeof(struct xfs_disk_dquot)); + /* Flush the incore dquot to the ondisk buffer. */ + dqblk = bp->b_addr + dqp->q_bufoffset; + xfs_dquot_to_disk(&dqblk->dd_diskdq, dqp); /* * Clear the dirty field and remember the flush lsn for later use. */ - dqp->dq_flags &= ~XFS_DQ_DIRTY; + dqp->q_flags &= ~XFS_DQFLAG_DIRTY; xfs_trans_ail_copy_lsn(mp->m_ail, &dqp->q_logitem.qli_flush_lsn, &dqp->q_logitem.qli_item.li_lsn); @@ -1170,17 +1219,17 @@ xfs_qm_dqflush( * of a dquot without an up-to-date CRC getting to disk. */ if (xfs_sb_version_hascrc(&mp->m_sb)) { - dqb->dd_lsn = cpu_to_be64(dqp->q_logitem.qli_item.li_lsn); - xfs_update_cksum((char *)dqb, sizeof(struct xfs_dqblk), + dqblk->dd_lsn = cpu_to_be64(dqp->q_logitem.qli_item.li_lsn); + xfs_update_cksum((char *)dqblk, sizeof(struct xfs_dqblk), XFS_DQUOT_CRC_OFF); } /* - * Attach an iodone routine so that we can remove this dquot from the - * AIL and release the flush lock once the dquot is synced to disk. + * Attach the dquot to the buffer so that we can remove this dquot from + * the AIL and release the flush lock once the dquot is synced to disk. */ - xfs_buf_attach_iodone(bp, xfs_qm_dqflush_done, - &dqp->q_logitem.qli_item); + bp->b_flags |= _XBF_DQUOTS; + list_add_tail(&dqp->q_logitem.qli_item.li_bio_list, &bp->b_li_list); /* * If the buffer is pinned then push on the log so we won't @@ -1196,7 +1245,7 @@ xfs_qm_dqflush( return 0; out_abort: - dqp->dq_flags &= ~XFS_DQ_DIRTY; + dqp->q_flags &= ~XFS_DQFLAG_DIRTY; xfs_trans_ail_delete(lip, 0); xfs_force_shutdown(mp, SHUTDOWN_CORRUPT_INCORE); out_unlock: @@ -1217,8 +1266,7 @@ xfs_dqlock2( { if (d1 && d2) { ASSERT(d1 != d2); - if (be32_to_cpu(d1->q_core.d_id) > - be32_to_cpu(d2->q_core.d_id)) { + if (d1->q_id > d2->q_id) { mutex_lock(&d2->q_qlock); mutex_lock_nested(&d1->q_qlock, XFS_QLOCK_NESTED); } else { @@ -1270,7 +1318,7 @@ xfs_qm_exit(void) int xfs_qm_dqiterate( struct xfs_mount *mp, - uint dqtype, + xfs_dqtype_t type, xfs_qm_dqiterate_fn iter_fn, void *priv) { @@ -1279,16 +1327,15 @@ xfs_qm_dqiterate( int error; do { - error = xfs_qm_dqget_next(mp, id, dqtype, &dq); + error = xfs_qm_dqget_next(mp, id, type, &dq); if (error == -ENOENT) return 0; if (error) return error; - error = iter_fn(dq, dqtype, priv); - id = be32_to_cpu(dq->q_core.d_id); + error = iter_fn(dq, type, priv); + id = dq->q_id; xfs_qm_dqput(dq); - id++; } while (error == 0 && id != 0); return error; diff --git a/fs/xfs/xfs_dquot.h b/fs/xfs/xfs_dquot.h index 71e36c85e20b..282a65da93c7 100644 --- a/fs/xfs/xfs_dquot.h +++ b/fs/xfs/xfs_dquot.h @@ -27,26 +27,53 @@ enum { XFS_QLOWSP_MAX }; +struct xfs_dquot_res { + /* Total resources allocated and reserved. */ + xfs_qcnt_t reserved; + + /* Total resources allocated. */ + xfs_qcnt_t count; + + /* Absolute and preferred limits. */ + xfs_qcnt_t hardlimit; + xfs_qcnt_t softlimit; + + /* + * For root dquots, this is the default grace period, in seconds. + * Otherwise, this is when the quota grace period expires, + * in seconds since the Unix epoch. + */ + time64_t timer; + + /* + * For root dquots, this is the maximum number of warnings that will + * be issued for this quota type. Otherwise, this is the number of + * warnings issued against this quota. Note that none of this is + * implemented. + */ + xfs_qwarncnt_t warnings; +}; + /* * The incore dquot structure */ struct xfs_dquot { - uint dq_flags; struct list_head q_lru; struct xfs_mount *q_mount; + xfs_dqtype_t q_type; + uint16_t q_flags; + xfs_dqid_t q_id; uint q_nrefs; - xfs_daddr_t q_blkno; int q_bufoffset; + xfs_daddr_t q_blkno; xfs_fileoff_t q_fileoffset; - struct xfs_disk_dquot q_core; + struct xfs_dquot_res q_blk; /* regular blocks */ + struct xfs_dquot_res q_ino; /* inodes */ + struct xfs_dquot_res q_rtb; /* realtime blocks */ + struct xfs_dq_logitem q_logitem; - /* total regular nblks used+reserved */ - xfs_qcnt_t q_res_bcount; - /* total inos allocd+reserved */ - xfs_qcnt_t q_res_icount; - /* total realtime blks used+reserved */ - xfs_qcnt_t q_res_rtbcount; + xfs_qcnt_t q_prealloc_lo_wmark; xfs_qcnt_t q_prealloc_hi_wmark; int64_t q_low_space[XFS_QLOWSP_MAX]; @@ -101,34 +128,59 @@ static inline void xfs_dqunlock(struct xfs_dquot *dqp) mutex_unlock(&dqp->q_qlock); } -static inline int xfs_this_quota_on(struct xfs_mount *mp, int type) +static inline int +xfs_dquot_type(const struct xfs_dquot *dqp) { - switch (type & XFS_DQ_ALLTYPES) { - case XFS_DQ_USER: + return dqp->q_type & XFS_DQTYPE_REC_MASK; +} + +static inline int xfs_this_quota_on(struct xfs_mount *mp, xfs_dqtype_t type) +{ + switch (type) { + case XFS_DQTYPE_USER: return XFS_IS_UQUOTA_ON(mp); - case XFS_DQ_GROUP: + case XFS_DQTYPE_GROUP: return XFS_IS_GQUOTA_ON(mp); - case XFS_DQ_PROJ: + case XFS_DQTYPE_PROJ: return XFS_IS_PQUOTA_ON(mp); default: return 0; } } -static inline struct xfs_dquot *xfs_inode_dquot(struct xfs_inode *ip, int type) +static inline struct xfs_dquot *xfs_inode_dquot( + struct xfs_inode *ip, + xfs_dqtype_t type) { - switch (type & XFS_DQ_ALLTYPES) { - case XFS_DQ_USER: + switch (type) { + case XFS_DQTYPE_USER: return ip->i_udquot; - case XFS_DQ_GROUP: + case XFS_DQTYPE_GROUP: return ip->i_gdquot; - case XFS_DQ_PROJ: + case XFS_DQTYPE_PROJ: return ip->i_pdquot; default: return NULL; } } +/* Decide if the dquot's limits are actually being enforced. */ +static inline bool +xfs_dquot_is_enforced( + const struct xfs_dquot *dqp) +{ + switch (xfs_dquot_type(dqp)) { + case XFS_DQTYPE_USER: + return XFS_IS_UQUOTA_ENFORCED(dqp->q_mount); + case XFS_DQTYPE_GROUP: + return XFS_IS_GQUOTA_ENFORCED(dqp->q_mount); + case XFS_DQTYPE_PROJ: + return XFS_IS_PQUOTA_ENFORCED(dqp->q_mount); + } + ASSERT(0); + return false; +} + /* * Check whether a dquot is under low free space conditions. We assume the quota * is enabled and enforced. @@ -137,38 +189,35 @@ static inline bool xfs_dquot_lowsp(struct xfs_dquot *dqp) { int64_t freesp; - freesp = be64_to_cpu(dqp->q_core.d_blk_hardlimit) - dqp->q_res_bcount; + freesp = dqp->q_blk.hardlimit - dqp->q_blk.reserved; if (freesp < dqp->q_low_space[XFS_QLOWSP_1_PCNT]) return true; return false; } +void xfs_dquot_to_disk(struct xfs_disk_dquot *ddqp, struct xfs_dquot *dqp); + #define XFS_DQ_IS_LOCKED(dqp) (mutex_is_locked(&((dqp)->q_qlock))) -#define XFS_DQ_IS_DIRTY(dqp) ((dqp)->dq_flags & XFS_DQ_DIRTY) -#define XFS_QM_ISUDQ(dqp) ((dqp)->dq_flags & XFS_DQ_USER) -#define XFS_QM_ISPDQ(dqp) ((dqp)->dq_flags & XFS_DQ_PROJ) -#define XFS_QM_ISGDQ(dqp) ((dqp)->dq_flags & XFS_DQ_GROUP) +#define XFS_DQ_IS_DIRTY(dqp) ((dqp)->q_flags & XFS_DQFLAG_DIRTY) void xfs_qm_dqdestroy(struct xfs_dquot *dqp); int xfs_qm_dqflush(struct xfs_dquot *dqp, struct xfs_buf **bpp); void xfs_qm_dqunpin_wait(struct xfs_dquot *dqp); -void xfs_qm_adjust_dqtimers(struct xfs_mount *mp, - struct xfs_dquot *d); -void xfs_qm_adjust_dqlimits(struct xfs_mount *mp, - struct xfs_dquot *d); -xfs_dqid_t xfs_qm_id_for_quotatype(struct xfs_inode *ip, uint type); +void xfs_qm_adjust_dqtimers(struct xfs_dquot *d); +void xfs_qm_adjust_dqlimits(struct xfs_dquot *d); +xfs_dqid_t xfs_qm_id_for_quotatype(struct xfs_inode *ip, + xfs_dqtype_t type); int xfs_qm_dqget(struct xfs_mount *mp, xfs_dqid_t id, - uint type, bool can_alloc, - struct xfs_dquot **dqpp); -int xfs_qm_dqget_inode(struct xfs_inode *ip, uint type, - bool can_alloc, - struct xfs_dquot **dqpp); + xfs_dqtype_t type, bool can_alloc, + struct xfs_dquot **dqpp); +int xfs_qm_dqget_inode(struct xfs_inode *ip, xfs_dqtype_t type, + bool can_alloc, struct xfs_dquot **dqpp); int xfs_qm_dqget_next(struct xfs_mount *mp, xfs_dqid_t id, - uint type, struct xfs_dquot **dqpp); + xfs_dqtype_t type, struct xfs_dquot **dqpp); int xfs_qm_dqget_uncached(struct xfs_mount *mp, - xfs_dqid_t id, uint type, - struct xfs_dquot **dqpp); + xfs_dqid_t id, xfs_dqtype_t type, + struct xfs_dquot **dqpp); void xfs_qm_dqput(struct xfs_dquot *dqp); void xfs_dqlock2(struct xfs_dquot *, struct xfs_dquot *); @@ -183,9 +232,9 @@ static inline struct xfs_dquot *xfs_qm_dqhold(struct xfs_dquot *dqp) return dqp; } -typedef int (*xfs_qm_dqiterate_fn)(struct xfs_dquot *dq, uint dqtype, - void *priv); -int xfs_qm_dqiterate(struct xfs_mount *mp, uint dqtype, +typedef int (*xfs_qm_dqiterate_fn)(struct xfs_dquot *dq, + xfs_dqtype_t type, void *priv); +int xfs_qm_dqiterate(struct xfs_mount *mp, xfs_dqtype_t type, xfs_qm_dqiterate_fn iter_fn, void *priv); #endif /* __XFS_DQUOT_H__ */ diff --git a/fs/xfs/xfs_dquot_item.c b/fs/xfs/xfs_dquot_item.c index 349c92d26570..8c1fdf37ee8f 100644 --- a/fs/xfs/xfs_dquot_item.c +++ b/fs/xfs/xfs_dquot_item.c @@ -45,6 +45,7 @@ xfs_qm_dquot_logitem_format( struct xfs_log_item *lip, struct xfs_log_vec *lv) { + struct xfs_disk_dquot ddq; struct xfs_dq_logitem *qlip = DQUOT_ITEM(lip); struct xfs_log_iovec *vecp = NULL; struct xfs_dq_logformat *qlf; @@ -52,14 +53,15 @@ xfs_qm_dquot_logitem_format( qlf = xlog_prepare_iovec(lv, &vecp, XLOG_REG_TYPE_QFORMAT); qlf->qlf_type = XFS_LI_DQUOT; qlf->qlf_size = 2; - qlf->qlf_id = be32_to_cpu(qlip->qli_dquot->q_core.d_id); + qlf->qlf_id = qlip->qli_dquot->q_id; qlf->qlf_blkno = qlip->qli_dquot->q_blkno; qlf->qlf_len = 1; qlf->qlf_boffset = qlip->qli_dquot->q_bufoffset; xlog_finish_iovec(lv, vecp, sizeof(struct xfs_dq_logformat)); - xlog_copy_iovec(lv, &vecp, XLOG_REG_TYPE_DQUOT, - &qlip->qli_dquot->q_core, + xfs_dquot_to_disk(&ddq, qlip->qli_dquot); + + xlog_copy_iovec(lv, &vecp, XLOG_REG_TYPE_DQUOT, &ddq, sizeof(struct xfs_disk_dquot)); } @@ -113,23 +115,6 @@ xfs_qm_dqunpin_wait( wait_event(dqp->q_pinwait, (atomic_read(&dqp->q_pincount) == 0)); } -/* - * Callback used to mark a buffer with XFS_LI_FAILED when items in the buffer - * have been failed during writeback - * - * this informs the AIL that the dquot is already flush locked on the next push, - * and acquires a hold on the buffer to ensure that it isn't reclaimed before - * dirty data makes it to disk. - */ -STATIC void -xfs_dquot_item_error( - struct xfs_log_item *lip, - struct xfs_buf *bp) -{ - ASSERT(!completion_done(&DQUOT_ITEM(lip)->qli_dquot->q_flush)); - xfs_set_li_failed(lip, bp); -} - STATIC uint xfs_qm_dquot_logitem_push( struct xfs_log_item *lip, @@ -216,7 +201,6 @@ static const struct xfs_item_ops xfs_dquot_item_ops = { .iop_release = xfs_qm_dquot_logitem_release, .iop_committing = xfs_qm_dquot_logitem_committing, .iop_push = xfs_qm_dquot_logitem_push, - .iop_error = xfs_dquot_item_error }; /* diff --git a/fs/xfs/xfs_dquot_item_recover.c b/fs/xfs/xfs_dquot_item_recover.c index 3400be4c88f0..5875c7e1bd28 100644 --- a/fs/xfs/xfs_dquot_item_recover.c +++ b/fs/xfs/xfs_dquot_item_recover.c @@ -39,7 +39,7 @@ xlog_recover_dquot_ra_pass2( if (item->ri_buf[1].i_len < sizeof(struct xfs_disk_dquot)) return; - type = recddq->d_flags & (XFS_DQ_USER | XFS_DQ_PROJ | XFS_DQ_GROUP); + type = recddq->d_type & XFS_DQTYPE_REC_MASK; ASSERT(type); if (log->l_quotaoffs_flag & type) return; @@ -91,7 +91,7 @@ xlog_recover_dquot_commit_pass2( /* * This type of quotas was turned off, so ignore this record. */ - type = recddq->d_flags & (XFS_DQ_USER | XFS_DQ_PROJ | XFS_DQ_GROUP); + type = recddq->d_type & XFS_DQTYPE_REC_MASK; ASSERT(type); if (log->l_quotaoffs_flag & type) return 0; @@ -108,7 +108,7 @@ xlog_recover_dquot_commit_pass2( */ dq_f = item->ri_buf[0].i_addr; ASSERT(dq_f); - fa = xfs_dquot_verify(mp, recddq, dq_f->qlf_id, 0); + fa = xfs_dquot_verify(mp, recddq, dq_f->qlf_id); if (fa) { xfs_alert(mp, "corrupt dquot ID 0x%x in log at %pS", dq_f->qlf_id, fa); @@ -153,7 +153,7 @@ xlog_recover_dquot_commit_pass2( ASSERT(dq_f->qlf_size == 2); ASSERT(bp->b_mount == mp); - bp->b_iodone = xlog_recover_iodone; + bp->b_flags |= _XBF_LOGRECOVERY; xfs_buf_delwri_queue(bp, buffer_list); out_release: @@ -185,11 +185,11 @@ xlog_recover_quotaoff_commit_pass1( * group/project quotaoff or both. */ if (qoff_f->qf_flags & XFS_UQUOTA_ACCT) - log->l_quotaoffs_flag |= XFS_DQ_USER; + log->l_quotaoffs_flag |= XFS_DQTYPE_USER; if (qoff_f->qf_flags & XFS_PQUOTA_ACCT) - log->l_quotaoffs_flag |= XFS_DQ_PROJ; + log->l_quotaoffs_flag |= XFS_DQTYPE_PROJ; if (qoff_f->qf_flags & XFS_GQUOTA_ACCT) - log->l_quotaoffs_flag |= XFS_DQ_GROUP; + log->l_quotaoffs_flag |= XFS_DQTYPE_GROUP; return 0; } diff --git a/fs/xfs/xfs_export.c b/fs/xfs/xfs_export.c index 5a4b0119143a..465fd9e048d4 100644 --- a/fs/xfs/xfs_export.c +++ b/fs/xfs/xfs_export.c @@ -56,7 +56,7 @@ xfs_fs_encode_fh( fileid_type = FILEID_INO32_GEN_PARENT; /* - * If the the filesystem may contain 64bit inode numbers, we need + * If the filesystem may contain 64bit inode numbers, we need * to use larger file handles that can represent them. * * While we only allocate inodes that do not fit into 32 bits any diff --git a/fs/xfs/xfs_extfree_item.c b/fs/xfs/xfs_extfree_item.c index b9c333bae0a1..6cb8cd11072a 100644 --- a/fs/xfs/xfs_extfree_item.c +++ b/fs/xfs/xfs_extfree_item.c @@ -161,7 +161,8 @@ xfs_efi_init( ((nextents - 1) * sizeof(xfs_extent_t))); efip = kmem_zalloc(size, 0); } else { - efip = kmem_zone_zalloc(xfs_efi_zone, 0); + efip = kmem_cache_zalloc(xfs_efi_zone, + GFP_KERNEL | __GFP_NOFAIL); } xfs_log_item_init(mp, &efip->efi_item, XFS_LI_EFI, &xfs_efi_item_ops); @@ -332,7 +333,8 @@ xfs_trans_get_efd( (nextents - 1) * sizeof(struct xfs_extent), 0); } else { - efdp = kmem_zone_zalloc(xfs_efd_zone, 0); + efdp = kmem_cache_zalloc(xfs_efd_zone, + GFP_KERNEL | __GFP_NOFAIL); } xfs_log_item_init(tp->t_mountp, &efdp->efd_item, XFS_LI_EFD, diff --git a/fs/xfs/xfs_file.c b/fs/xfs/xfs_file.c index 00db81eac80d..a29f78a663ca 100644 --- a/fs/xfs/xfs_file.c +++ b/fs/xfs/xfs_file.c @@ -94,6 +94,7 @@ xfs_file_fsync( { struct inode *inode = file->f_mapping->host; struct xfs_inode *ip = XFS_I(inode); + struct xfs_inode_log_item *iip = ip->i_itemp; struct xfs_mount *mp = ip->i_mount; int error = 0; int log_flushed = 0; @@ -137,13 +138,15 @@ xfs_file_fsync( xfs_ilock(ip, XFS_ILOCK_SHARED); if (xfs_ipincount(ip)) { if (!datasync || - (ip->i_itemp->ili_fsync_fields & ~XFS_ILOG_TIMESTAMP)) - lsn = ip->i_itemp->ili_last_lsn; + (iip->ili_fsync_fields & ~XFS_ILOG_TIMESTAMP)) + lsn = iip->ili_last_lsn; } if (lsn) { error = xfs_log_force_lsn(mp, lsn, XFS_LOG_SYNC, &log_flushed); - ip->i_itemp->ili_fsync_fields = 0; + spin_lock(&iip->ili_lock); + iip->ili_fsync_fields = 0; + spin_unlock(&iip->ili_lock); } xfs_iunlock(ip, XFS_ILOCK_SHARED); @@ -505,7 +508,7 @@ xfs_file_dio_aio_write( */ if (xfs_is_cow_inode(ip)) { trace_xfs_reflink_bounce_dio_write(ip, iocb->ki_pos, count); - return -EREMCHG; + return -ENOTBLK; } iolock = XFS_IOLOCK_EXCL; } else { @@ -553,8 +556,8 @@ out: xfs_iunlock(ip, iolock); /* - * No fallback to buffered IO on errors for XFS, direct IO will either - * complete fully or fail. + * No fallback to buffered IO after short writes for XFS, direct I/O + * will either complete fully or return an error. */ ASSERT(ret < 0 || ret == count); return ret; @@ -714,7 +717,7 @@ xfs_file_write_iter( * allow an operation to fall back to buffered mode. */ ret = xfs_file_dio_aio_write(iocb, from); - if (ret != -EREMCHG) + if (ret != -ENOTBLK) return ret; } @@ -1035,7 +1038,7 @@ xfs_file_remap_range( /* Prepare and then clone file data. */ ret = xfs_reflink_remap_prep(file_in, pos_in, file_out, pos_out, &len, remap_flags); - if (ret < 0 || len == 0) + if (ret || len == 0) return ret; trace_xfs_reflink_remap_range(src, pos_in, len, dest, pos_out); @@ -1065,7 +1068,7 @@ xfs_file_remap_range( if (mp->m_flags & XFS_MOUNT_WSYNC) xfs_log_force_inode(dest); out_unlock: - xfs_reflink_remap_unlock(file_in, file_out); + xfs_iunlock2_io_mmap(src, dest); if (ret) trace_xfs_reflink_remap_range_error(dest, ret, _RET_IP_); return remapped > 0 ? remapped : ret; @@ -1080,7 +1083,7 @@ xfs_file_open( return -EFBIG; if (XFS_FORCED_SHUTDOWN(XFS_M(inode->i_sb))) return -EIO; - file->f_mode |= FMODE_NOWAIT; + file->f_mode |= FMODE_NOWAIT | FMODE_BUF_RASYNC; return 0; } @@ -1220,6 +1223,14 @@ __xfs_filemap_fault( return ret; } +static inline bool +xfs_is_write_fault( + struct vm_fault *vmf) +{ + return (vmf->flags & FAULT_FLAG_WRITE) && + (vmf->vma->vm_flags & VM_SHARED); +} + static vm_fault_t xfs_filemap_fault( struct vm_fault *vmf) @@ -1227,7 +1238,7 @@ xfs_filemap_fault( /* DAX can shortcut the normal fault path on write faults! */ return __xfs_filemap_fault(vmf, PE_SIZE_PTE, IS_DAX(file_inode(vmf->vma->vm_file)) && - (vmf->flags & FAULT_FLAG_WRITE)); + xfs_is_write_fault(vmf)); } static vm_fault_t @@ -1240,7 +1251,7 @@ xfs_filemap_huge_fault( /* DAX can shortcut the normal fault path on write faults! */ return __xfs_filemap_fault(vmf, pe_size, - (vmf->flags & FAULT_FLAG_WRITE)); + xfs_is_write_fault(vmf)); } static vm_fault_t @@ -1263,10 +1274,23 @@ xfs_filemap_pfn_mkwrite( return __xfs_filemap_fault(vmf, PE_SIZE_PTE, true); } +static void +xfs_filemap_map_pages( + struct vm_fault *vmf, + pgoff_t start_pgoff, + pgoff_t end_pgoff) +{ + struct inode *inode = file_inode(vmf->vma->vm_file); + + xfs_ilock(XFS_I(inode), XFS_MMAPLOCK_SHARED); + filemap_map_pages(vmf, start_pgoff, end_pgoff); + xfs_iunlock(XFS_I(inode), XFS_MMAPLOCK_SHARED); +} + static const struct vm_operations_struct xfs_file_vm_ops = { .fault = xfs_filemap_fault, .huge_fault = xfs_filemap_huge_fault, - .map_pages = filemap_map_pages, + .map_pages = xfs_filemap_map_pages, .page_mkwrite = xfs_filemap_page_mkwrite, .pfn_mkwrite = xfs_filemap_pfn_mkwrite, }; diff --git a/fs/xfs/xfs_icache.c b/fs/xfs/xfs_icache.c index 5daef654956c..101028ebb571 100644 --- a/fs/xfs/xfs_icache.c +++ b/fs/xfs/xfs_icache.c @@ -37,13 +37,11 @@ xfs_inode_alloc( struct xfs_inode *ip; /* - * if this didn't occur in transactions, we could use - * KM_MAYFAIL and return NULL here on ENOMEM. Set the - * code up to do this anyway. + * XXX: If this didn't occur in transactions, we could drop GFP_NOFAIL + * and return NULL here on ENOMEM. */ - ip = kmem_zone_alloc(xfs_inode_zone, 0); - if (!ip) - return NULL; + ip = kmem_cache_alloc(xfs_inode_zone, GFP_KERNEL | __GFP_NOFAIL); + if (inode_init_always(mp->m_super, VFS_I(ip))) { kmem_cache_free(xfs_inode_zone, ip); return NULL; @@ -115,6 +113,7 @@ __xfs_inode_free( { /* asserts to verify all state is correct here */ ASSERT(atomic_read(&ip->i_pincount) == 0); + ASSERT(!ip->i_itemp || list_empty(&ip->i_itemp->ili_item.li_bio_list)); XFS_STATS_DEC(ip->i_mount, vn_active); call_rcu(&VFS_I(ip)->i_rcu, xfs_inode_free_callback); @@ -141,11 +140,8 @@ xfs_inode_free( } /* - * Queue a new inode reclaim pass if there are reclaimable inodes and there - * isn't a reclaim pass already in progress. By default it runs every 5s based - * on the xfs periodic sync default of 30s. Perhaps this should have it's own - * tunable, but that can be done if this method proves to be ineffective or too - * aggressive. + * Queue background inode reclaim work if there are reclaimable inodes and there + * isn't reclaim work already scheduled or in progress. */ static void xfs_reclaim_work_queue( @@ -160,24 +156,6 @@ xfs_reclaim_work_queue( rcu_read_unlock(); } -/* - * This is a fast pass over the inode cache to try to get reclaim moving on as - * many inodes as possible in a short period of time. It kicks itself every few - * seconds, as well as being kicked by the inode cache shrinker when memory - * goes low. It scans as quickly as possible avoiding locked inodes or those - * already being flushed, and once done schedules a future pass. - */ -void -xfs_reclaim_worker( - struct work_struct *work) -{ - struct xfs_mount *mp = container_of(to_delayed_work(work), - struct xfs_mount, m_reclaim_work); - - xfs_reclaim_inodes(mp, SYNC_TRYLOCK); - xfs_reclaim_work_queue(mp); -} - static void xfs_perag_set_reclaim_tag( struct xfs_perag *pag) @@ -618,48 +596,31 @@ out_destroy: } /* - * Look up an inode by number in the given file system. - * The inode is looked up in the cache held in each AG. - * If the inode is found in the cache, initialise the vfs inode - * if necessary. + * Look up an inode by number in the given file system. The inode is looked up + * in the cache held in each AG. If the inode is found in the cache, initialise + * the vfs inode if necessary. * - * If it is not in core, read it in from the file system's device, - * add it to the cache and initialise the vfs inode. + * If it is not in core, read it in from the file system's device, add it to the + * cache and initialise the vfs inode. * * The inode is locked according to the value of the lock_flags parameter. - * This flag parameter indicates how and if the inode's IO lock and inode lock - * should be taken. - * - * mp -- the mount point structure for the current file system. It points - * to the inode hash table. - * tp -- a pointer to the current transaction if there is one. This is - * simply passed through to the xfs_iread() call. - * ino -- the number of the inode desired. This is the unique identifier - * within the file system for the inode being requested. - * lock_flags -- flags indicating how to lock the inode. See the comment - * for xfs_ilock() for a list of valid values. + * Inode lookup is only done during metadata operations and not as part of the + * data IO path. Hence we only allow locking of the XFS_ILOCK during lookup. */ int xfs_iget( - xfs_mount_t *mp, - xfs_trans_t *tp, - xfs_ino_t ino, - uint flags, - uint lock_flags, - xfs_inode_t **ipp) + struct xfs_mount *mp, + struct xfs_trans *tp, + xfs_ino_t ino, + uint flags, + uint lock_flags, + struct xfs_inode **ipp) { - xfs_inode_t *ip; - int error; - xfs_perag_t *pag; - xfs_agino_t agino; + struct xfs_inode *ip; + struct xfs_perag *pag; + xfs_agino_t agino; + int error; - /* - * xfs_reclaim_inode() uses the ILOCK to ensure an inode - * doesn't get freed while it's being referenced during a - * radix tree traversal here. It assumes this function - * aqcuires only the ILOCK (and therefore it has no need to - * involve the IOLOCK in this synchronization). - */ ASSERT((lock_flags & (XFS_IOLOCK_EXCL | XFS_IOLOCK_SHARED)) == 0); /* reject inode numbers outside existing AGs */ @@ -776,15 +737,7 @@ xfs_inode_walk_ag_grab( ASSERT(rcu_read_lock_held()); - /* - * check for stale RCU freed inode - * - * If the inode has been reallocated, it doesn't matter if it's not in - * the AG we are walking - we are walking for writeback, so if it - * passes all the "valid inode" checks and is dirty, then we'll write - * it back anyway. If it has been reallocated and still being - * initialised, the XFS_INEW check below will catch it. - */ + /* Check for stale RCU freed inode */ spin_lock(&ip->i_flags_lock); if (!ip->i_ino) goto out_unlock_noent; @@ -1028,107 +981,62 @@ xfs_cowblocks_worker( /* * Grab the inode for reclaim exclusively. - * Return 0 if we grabbed it, non-zero otherwise. + * + * We have found this inode via a lookup under RCU, so the inode may have + * already been freed, or it may be in the process of being recycled by + * xfs_iget(). In both cases, the inode will have XFS_IRECLAIM set. If the inode + * has been fully recycled by the time we get the i_flags_lock, XFS_IRECLAIMABLE + * will not be set. Hence we need to check for both these flag conditions to + * avoid inodes that are no longer reclaim candidates. + * + * Note: checking for other state flags here, under the i_flags_lock or not, is + * racy and should be avoided. Those races should be resolved only after we have + * ensured that we are able to reclaim this inode and the world can see that we + * are going to reclaim it. + * + * Return true if we grabbed it, false otherwise. */ -STATIC int +static bool xfs_reclaim_inode_grab( - struct xfs_inode *ip, - int flags) + struct xfs_inode *ip) { ASSERT(rcu_read_lock_held()); - /* quick check for stale RCU freed inode */ - if (!ip->i_ino) - return 1; - - /* - * If we are asked for non-blocking operation, do unlocked checks to - * see if the inode already is being flushed or in reclaim to avoid - * lock traffic. - */ - if ((flags & SYNC_TRYLOCK) && - __xfs_iflags_test(ip, XFS_IFLOCK | XFS_IRECLAIM)) - return 1; - - /* - * The radix tree lock here protects a thread in xfs_iget from racing - * with us starting reclaim on the inode. Once we have the - * XFS_IRECLAIM flag set it will not touch us. - * - * Due to RCU lookup, we may find inodes that have been freed and only - * have XFS_IRECLAIM set. Indeed, we may see reallocated inodes that - * aren't candidates for reclaim at all, so we must check the - * XFS_IRECLAIMABLE is set first before proceeding to reclaim. - */ spin_lock(&ip->i_flags_lock); if (!__xfs_iflags_test(ip, XFS_IRECLAIMABLE) || __xfs_iflags_test(ip, XFS_IRECLAIM)) { /* not a reclaim candidate. */ spin_unlock(&ip->i_flags_lock); - return 1; + return false; } __xfs_iflags_set(ip, XFS_IRECLAIM); spin_unlock(&ip->i_flags_lock); - return 0; + return true; } /* - * Inodes in different states need to be treated differently. The following - * table lists the inode states and the reclaim actions necessary: - * - * inode state iflush ret required action - * --------------- ---------- --------------- - * bad - reclaim - * shutdown EIO unpin and reclaim - * clean, unpinned 0 reclaim - * stale, unpinned 0 reclaim - * clean, pinned(*) 0 requeue - * stale, pinned EAGAIN requeue - * dirty, async - requeue - * dirty, sync 0 reclaim + * Inode reclaim is non-blocking, so the default action if progress cannot be + * made is to "requeue" the inode for reclaim by unlocking it and clearing the + * XFS_IRECLAIM flag. If we are in a shutdown state, we don't care about + * blocking anymore and hence we can wait for the inode to be able to reclaim + * it. * - * (*) dgc: I don't think the clean, pinned state is possible but it gets - * handled anyway given the order of checks implemented. - * - * Also, because we get the flush lock first, we know that any inode that has - * been flushed delwri has had the flush completed by the time we check that - * the inode is clean. - * - * Note that because the inode is flushed delayed write by AIL pushing, the - * flush lock may already be held here and waiting on it can result in very - * long latencies. Hence for sync reclaims, where we wait on the flush lock, - * the caller should push the AIL first before trying to reclaim inodes to - * minimise the amount of time spent waiting. For background relaim, we only - * bother to reclaim clean inodes anyway. - * - * Hence the order of actions after gaining the locks should be: - * bad => reclaim - * shutdown => unpin and reclaim - * pinned, async => requeue - * pinned, sync => unpin - * stale => reclaim - * clean => reclaim - * dirty, async => requeue - * dirty, sync => flush, wait and reclaim + * We do no IO here - if callers require inodes to be cleaned they must push the + * AIL first to trigger writeback of dirty inodes. This enables writeback to be + * done in the background in a non-blocking manner, and enables memory reclaim + * to make progress without blocking. */ -STATIC int +static void xfs_reclaim_inode( struct xfs_inode *ip, - struct xfs_perag *pag, - int sync_mode) + struct xfs_perag *pag) { - struct xfs_buf *bp = NULL; xfs_ino_t ino = ip->i_ino; /* for radix_tree_delete */ - int error; -restart: - error = 0; - xfs_ilock(ip, XFS_ILOCK_EXCL); - if (!xfs_iflock_nowait(ip)) { - if (!(sync_mode & SYNC_WAIT)) - goto out; - xfs_iflock(ip); - } + if (!xfs_ilock_nowait(ip, XFS_ILOCK_EXCL)) + goto out; + if (!xfs_iflock_nowait(ip)) + goto out_iunlock; if (XFS_FORCED_SHUTDOWN(ip->i_mount)) { xfs_iunpin_wait(ip); @@ -1136,52 +1044,12 @@ restart: xfs_iflush_abort(ip); goto reclaim; } - if (xfs_ipincount(ip)) { - if (!(sync_mode & SYNC_WAIT)) - goto out_ifunlock; - xfs_iunpin_wait(ip); - } - if (xfs_iflags_test(ip, XFS_ISTALE) || xfs_inode_clean(ip)) { - xfs_ifunlock(ip); - goto reclaim; - } - - /* - * Never flush out dirty data during non-blocking reclaim, as it would - * just contend with AIL pushing trying to do the same job. - */ - if (!(sync_mode & SYNC_WAIT)) + if (xfs_ipincount(ip)) + goto out_ifunlock; + if (!xfs_inode_clean(ip)) goto out_ifunlock; - /* - * Now we have an inode that needs flushing. - * - * Note that xfs_iflush will never block on the inode buffer lock, as - * xfs_ifree_cluster() can lock the inode buffer before it locks the - * ip->i_lock, and we are doing the exact opposite here. As a result, - * doing a blocking xfs_imap_to_bp() to get the cluster buffer would - * result in an ABBA deadlock with xfs_ifree_cluster(). - * - * As xfs_ifree_cluser() must gather all inodes that are active in the - * cache to mark them stale, if we hit this case we don't actually want - * to do IO here - we want the inode marked stale so we can simply - * reclaim it. Hence if we get an EAGAIN error here, just unlock the - * inode, back off and try again. Hopefully the next pass through will - * see the stale flag set on the inode. - */ - error = xfs_iflush(ip, &bp); - if (error == -EAGAIN) { - xfs_iunlock(ip, XFS_ILOCK_EXCL); - /* backoff longer than in xfs_ifree_cluster */ - delay(2); - goto restart; - } - - if (!error) { - error = xfs_bwrite(bp); - xfs_buf_relse(bp); - } - + xfs_ifunlock(ip); reclaim: ASSERT(!xfs_isiflocked(ip)); @@ -1228,23 +1096,17 @@ reclaim: xfs_ilock(ip, XFS_ILOCK_EXCL); xfs_qm_dqdetach(ip); xfs_iunlock(ip, XFS_ILOCK_EXCL); + ASSERT(xfs_inode_clean(ip)); __xfs_inode_free(ip); - return error; + return; out_ifunlock: xfs_ifunlock(ip); +out_iunlock: + xfs_iunlock(ip, XFS_ILOCK_EXCL); out: xfs_iflags_clear(ip, XFS_IRECLAIM); - xfs_iunlock(ip, XFS_ILOCK_EXCL); - /* - * We could return -EAGAIN here to make reclaim rescan the inode tree in - * a short while. However, this just burns CPU time scanning the tree - * waiting for IO to complete and the reclaim work never goes back to - * the idle state. Instead, return 0 to let the next scheduled - * background reclaim attempt to reclaim the inode again. - */ - return 0; } /* @@ -1252,23 +1114,19 @@ out: * corrupted, we still want to try to reclaim all the inodes. If we don't, * then a shut down during filesystem unmount reclaim walk leak all the * unreclaimed inodes. + * + * Returns non-zero if any AGs or inodes were skipped in the reclaim pass + * so that callers that want to block until all dirty inodes are written back + * and reclaimed can sanely loop. */ -STATIC int +static void xfs_reclaim_inodes_ag( struct xfs_mount *mp, - int flags, int *nr_to_scan) { struct xfs_perag *pag; - int error = 0; - int last_error = 0; - xfs_agnumber_t ag; - int trylock = flags & SYNC_TRYLOCK; - int skipped; + xfs_agnumber_t ag = 0; -restart: - ag = 0; - skipped = 0; while ((pag = xfs_perag_get_tag(mp, ag, XFS_ICI_RECLAIM_TAG))) { unsigned long first_index = 0; int done = 0; @@ -1276,16 +1134,7 @@ restart: ag = pag->pag_agno + 1; - if (trylock) { - if (!mutex_trylock(&pag->pag_ici_reclaim_lock)) { - skipped++; - xfs_perag_put(pag); - continue; - } - first_index = pag->pag_ici_reclaim_cursor; - } else - mutex_lock(&pag->pag_ici_reclaim_lock); - + first_index = READ_ONCE(pag->pag_ici_reclaim_cursor); do { struct xfs_inode *batch[XFS_LOOKUP_BATCH]; int i; @@ -1309,7 +1158,7 @@ restart: for (i = 0; i < nr_found; i++) { struct xfs_inode *ip = batch[i]; - if (done || xfs_reclaim_inode_grab(ip, flags)) + if (done || !xfs_reclaim_inode_grab(ip)) batch[i] = NULL; /* @@ -1338,59 +1187,39 @@ restart: rcu_read_unlock(); for (i = 0; i < nr_found; i++) { - if (!batch[i]) - continue; - error = xfs_reclaim_inode(batch[i], pag, flags); - if (error && last_error != -EFSCORRUPTED) - last_error = error; + if (batch[i]) + xfs_reclaim_inode(batch[i], pag); } *nr_to_scan -= XFS_LOOKUP_BATCH; - cond_resched(); - } while (nr_found && !done && *nr_to_scan > 0); - if (trylock && !done) - pag->pag_ici_reclaim_cursor = first_index; - else - pag->pag_ici_reclaim_cursor = 0; - mutex_unlock(&pag->pag_ici_reclaim_lock); + if (done) + first_index = 0; + WRITE_ONCE(pag->pag_ici_reclaim_cursor, first_index); xfs_perag_put(pag); } - - /* - * if we skipped any AG, and we still have scan count remaining, do - * another pass this time using blocking reclaim semantics (i.e - * waiting on the reclaim locks and ignoring the reclaim cursors). This - * ensure that when we get more reclaimers than AGs we block rather - * than spin trying to execute reclaim. - */ - if (skipped && (flags & SYNC_WAIT) && *nr_to_scan > 0) { - trylock = 0; - goto restart; - } - return last_error; } -int +void xfs_reclaim_inodes( - xfs_mount_t *mp, - int mode) + struct xfs_mount *mp) { int nr_to_scan = INT_MAX; - return xfs_reclaim_inodes_ag(mp, mode, &nr_to_scan); + while (radix_tree_tagged(&mp->m_perag_tree, XFS_ICI_RECLAIM_TAG)) { + xfs_ail_push_all_sync(mp->m_ail); + xfs_reclaim_inodes_ag(mp, &nr_to_scan); + }; } /* - * Scan a certain number of inodes for reclaim. - * - * When called we make sure that there is a background (fast) inode reclaim in - * progress, while we will throttle the speed of reclaim via doing synchronous - * reclaim of inodes. That means if we come across dirty inodes, we wait for - * them to be cleaned, which we hope will not be very long due to the - * background walker having already kicked the IO off on those dirty inodes. + * The shrinker infrastructure determines how many inodes we should scan for + * reclaim. We want as many clean inodes ready to reclaim as possible, so we + * push the AIL here. We also want to proactively free up memory if we can to + * minimise the amount of work memory reclaim has to do so we kick the + * background reclaim if it isn't already scheduled. */ long xfs_reclaim_inodes_nr( @@ -1401,7 +1230,8 @@ xfs_reclaim_inodes_nr( xfs_reclaim_work_queue(mp); xfs_ail_push_all(mp->m_ail); - return xfs_reclaim_inodes_ag(mp, SYNC_TRYLOCK | SYNC_WAIT, &nr_to_scan); + xfs_reclaim_inodes_ag(mp, &nr_to_scan); + return 0; } /* @@ -1498,6 +1328,24 @@ xfs_inode_matches_eofb( return true; } +/* + * This is a fast pass over the inode cache to try to get reclaim moving on as + * many inodes as possible in a short period of time. It kicks itself every few + * seconds, as well as being kicked by the inode cache shrinker when memory + * goes low. + */ +void +xfs_reclaim_worker( + struct work_struct *work) +{ + struct xfs_mount *mp = container_of(to_delayed_work(work), + struct xfs_mount, m_reclaim_work); + int nr_to_scan = INT_MAX; + + xfs_reclaim_inodes_ag(mp, &nr_to_scan); + xfs_reclaim_work_queue(mp); +} + STATIC int xfs_inode_free_eofblocks( struct xfs_inode *ip, @@ -1574,7 +1422,7 @@ __xfs_inode_free_quota_eofblocks( eofb.eof_flags = XFS_EOF_FLAGS_UNION|XFS_EOF_FLAGS_SYNC; if (XFS_IS_UQUOTA_ENFORCED(ip->i_mount)) { - dq = xfs_inode_dquot(ip, XFS_DQ_USER); + dq = xfs_inode_dquot(ip, XFS_DQTYPE_USER); if (dq && xfs_dquot_lowsp(dq)) { eofb.eof_uid = VFS_I(ip)->i_uid; eofb.eof_flags |= XFS_EOF_FLAGS_UID; @@ -1583,7 +1431,7 @@ __xfs_inode_free_quota_eofblocks( } if (XFS_IS_GQUOTA_ENFORCED(ip->i_mount)) { - dq = xfs_inode_dquot(ip, XFS_DQ_GROUP); + dq = xfs_inode_dquot(ip, XFS_DQTYPE_GROUP); if (dq && xfs_dquot_lowsp(dq)) { eofb.eof_gid = VFS_I(ip)->i_gid; eofb.eof_flags |= XFS_EOF_FLAGS_GID; diff --git a/fs/xfs/xfs_icache.h b/fs/xfs/xfs_icache.h index 93b54e7d55f0..3a4c8b382cd0 100644 --- a/fs/xfs/xfs_icache.h +++ b/fs/xfs/xfs_icache.h @@ -17,9 +17,6 @@ struct xfs_eofblocks { __u64 eof_min_file_size; }; -#define SYNC_WAIT 0x0001 /* wait for i/o to complete */ -#define SYNC_TRYLOCK 0x0002 /* only try to lock inodes */ - /* * tags for inode radix tree */ @@ -51,7 +48,7 @@ void xfs_inode_free(struct xfs_inode *ip); void xfs_reclaim_worker(struct work_struct *work); -int xfs_reclaim_inodes(struct xfs_mount *mp, int mode); +void xfs_reclaim_inodes(struct xfs_mount *mp); int xfs_reclaim_inodes_count(struct xfs_mount *mp); long xfs_reclaim_inodes_nr(struct xfs_mount *mp, int nr_to_scan); diff --git a/fs/xfs/xfs_icreate_item.c b/fs/xfs/xfs_icreate_item.c index 287a9e5c7d75..9b3994b9c716 100644 --- a/fs/xfs/xfs_icreate_item.c +++ b/fs/xfs/xfs_icreate_item.c @@ -97,7 +97,7 @@ xfs_icreate_log( { struct xfs_icreate_item *icp; - icp = kmem_zone_zalloc(xfs_icreate_zone, 0); + icp = kmem_cache_zalloc(xfs_icreate_zone, GFP_KERNEL | __GFP_NOFAIL); xfs_log_item_init(tp->t_mountp, &icp->ic_item, XFS_LI_ICREATE, &xfs_icreate_item_ops); diff --git a/fs/xfs/xfs_inode.c b/fs/xfs/xfs_inode.c index 9aea7d68d8ab..c06129cffba9 100644 --- a/fs/xfs/xfs_inode.c +++ b/fs/xfs/xfs_inode.c @@ -44,7 +44,6 @@ kmem_zone_t *xfs_inode_zone; */ #define XFS_ITRUNC_MAX_EXTENTS 2 -STATIC int xfs_iflush_int(struct xfs_inode *, struct xfs_buf *); STATIC int xfs_iunlink(struct xfs_trans *, struct xfs_inode *); STATIC int xfs_iunlink_remove(struct xfs_trans *, struct xfs_inode *); @@ -452,7 +451,7 @@ xfs_lock_inodes( /* * Currently supports between 2 and 5 inodes with exclusive locking. We * support an arbitrary depth of locking here, but absolute limits on - * inodes depend on the the type of locking and the limits placed by + * inodes depend on the type of locking and the limits placed by * lockdep annotations in xfs_lock_inumorder. These are all checked by * the asserts. */ @@ -1740,10 +1739,31 @@ xfs_inactive_ifree( return error; } + /* + * We do not hold the inode locked across the entire rolling transaction + * here. We only need to hold it for the first transaction that + * xfs_ifree() builds, which may mark the inode XFS_ISTALE if the + * underlying cluster buffer is freed. Relogging an XFS_ISTALE inode + * here breaks the relationship between cluster buffer invalidation and + * stale inode invalidation on cluster buffer item journal commit + * completion, and can result in leaving dirty stale inodes hanging + * around in memory. + * + * We have no need for serialising this inode operation against other + * operations - we freed the inode and hence reallocation is required + * and that will serialise on reallocating the space the deferops need + * to free. Hence we can unlock the inode on the first commit of + * the transaction rather than roll it right through the deferops. This + * avoids relogging the XFS_ISTALE inode. + * + * We check that xfs_ifree() hasn't grown an internal transaction roll + * by asserting that the inode is still locked when it returns. + */ xfs_ilock(ip, XFS_ILOCK_EXCL); - xfs_trans_ijoin(tp, ip, 0); + xfs_trans_ijoin(tp, ip, XFS_ILOCK_EXCL); error = xfs_ifree(tp, ip); + ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL)); if (error) { /* * If we fail to free the inode, shut down. The cancel @@ -1756,7 +1776,6 @@ xfs_inactive_ifree( xfs_force_shutdown(mp, SHUTDOWN_META_IO_ERROR); } xfs_trans_cancel(tp); - xfs_iunlock(ip, XFS_ILOCK_EXCL); return error; } @@ -1774,7 +1793,6 @@ xfs_inactive_ifree( xfs_notice(mp, "%s: xfs_trans_commit returned error %d", __func__, error); - xfs_iunlock(ip, XFS_ILOCK_EXCL); return 0; } @@ -2147,7 +2165,6 @@ xfs_iunlink_update_dinode( xfs_dinode_calc_crc(mp, dip); xfs_trans_inode_buf(tp, ibp); xfs_trans_log_buf(tp, ibp, offset, offset + sizeof(xfs_agino_t) - 1); - xfs_inobp_check(mp, ibp); } /* Set an in-core inode's unlinked pointer and return the old value. */ @@ -2248,7 +2265,6 @@ xfs_iunlink( } if (next_agino != NULLAGINO) { - struct xfs_perag *pag; xfs_agino_t old_agino; /* @@ -2265,9 +2281,7 @@ xfs_iunlink( * agino has been unlinked, add a backref from the next inode * back to agino. */ - pag = xfs_perag_get(mp, agno); - error = xfs_iunlink_add_backref(pag, agino, next_agino); - xfs_perag_put(pag); + error = xfs_iunlink_add_backref(agibp->b_pag, agino, next_agino); if (error) return error; } @@ -2403,7 +2417,6 @@ xfs_iunlink_remove( struct xfs_buf *agibp; struct xfs_buf *last_ibp; struct xfs_dinode *last_dip = NULL; - struct xfs_perag *pag = NULL; xfs_agnumber_t agno = XFS_INO_TO_AGNO(mp, ip->i_ino); xfs_agino_t agino = XFS_INO_TO_AGINO(mp, ip->i_ino); xfs_agino_t next_agino; @@ -2447,32 +2460,22 @@ xfs_iunlink_remove( * this inode's backref to point from the next inode. */ if (next_agino != NULLAGINO) { - pag = xfs_perag_get(mp, agno); - error = xfs_iunlink_change_backref(pag, next_agino, + error = xfs_iunlink_change_backref(agibp->b_pag, next_agino, NULLAGINO); if (error) - goto out; + return error; } - if (head_agino == agino) { - /* Point the head of the list to the next unlinked inode. */ - error = xfs_iunlink_update_bucket(tp, agno, agibp, bucket_index, - next_agino); - if (error) - goto out; - } else { + if (head_agino != agino) { struct xfs_imap imap; xfs_agino_t prev_agino; - if (!pag) - pag = xfs_perag_get(mp, agno); - /* We need to search the list for the inode being freed. */ error = xfs_iunlink_map_prev(tp, agno, head_agino, agino, &prev_agino, &imap, &last_dip, &last_ibp, - pag); + agibp->b_pag); if (error) - goto out; + return error; /* Point the previous inode on the list to the next inode. */ xfs_iunlink_update_dinode(tp, agno, prev_agino, last_ibp, @@ -2486,29 +2489,29 @@ xfs_iunlink_remove( * change_backref takes care of deleting the backref if * next_agino is NULLAGINO. */ - error = xfs_iunlink_change_backref(pag, agino, next_agino); - if (error) - goto out; + return xfs_iunlink_change_backref(agibp->b_pag, agino, + next_agino); } -out: - if (pag) - xfs_perag_put(pag); - return error; + /* Point the head of the list to the next unlinked inode. */ + return xfs_iunlink_update_bucket(tp, agno, agibp, bucket_index, + next_agino); } /* - * Look up the inode number specified and mark it stale if it is found. If it is - * dirty, return the inode so it can be attached to the cluster buffer so it can - * be processed appropriately when the cluster free transaction completes. + * Look up the inode number specified and if it is not already marked XFS_ISTALE + * mark it stale. We should only find clean inodes in this lookup that aren't + * already stale. */ -static struct xfs_inode * -xfs_ifree_get_one_inode( - struct xfs_perag *pag, +static void +xfs_ifree_mark_inode_stale( + struct xfs_buf *bp, struct xfs_inode *free_ip, xfs_ino_t inum) { - struct xfs_mount *mp = pag->pag_mount; + struct xfs_mount *mp = bp->b_mount; + struct xfs_perag *pag = bp->b_pag; + struct xfs_inode_log_item *iip; struct xfs_inode *ip; retry: @@ -2516,8 +2519,10 @@ retry: ip = radix_tree_lookup(&pag->pag_ici_root, XFS_INO_TO_AGINO(mp, inum)); /* Inode not in memory, nothing to do */ - if (!ip) - goto out_rcu_unlock; + if (!ip) { + rcu_read_unlock(); + return; + } /* * because this is an RCU protected lookup, we could find a recently @@ -2528,9 +2533,9 @@ retry: spin_lock(&ip->i_flags_lock); if (ip->i_ino != inum || __xfs_iflags_test(ip, XFS_ISTALE)) { spin_unlock(&ip->i_flags_lock); - goto out_rcu_unlock; + rcu_read_unlock(); + return; } - spin_unlock(&ip->i_flags_lock); /* * Don't try to lock/unlock the current inode, but we _cannot_ skip the @@ -2540,43 +2545,50 @@ retry: */ if (ip != free_ip) { if (!xfs_ilock_nowait(ip, XFS_ILOCK_EXCL)) { + spin_unlock(&ip->i_flags_lock); rcu_read_unlock(); delay(1); goto retry; } - - /* - * Check the inode number again in case we're racing with - * freeing in xfs_reclaim_inode(). See the comments in that - * function for more information as to why the initial check is - * not sufficient. - */ - if (ip->i_ino != inum) { - xfs_iunlock(ip, XFS_ILOCK_EXCL); - goto out_rcu_unlock; - } } + ip->i_flags |= XFS_ISTALE; + spin_unlock(&ip->i_flags_lock); rcu_read_unlock(); - xfs_iflock(ip); - xfs_iflags_set(ip, XFS_ISTALE); + /* + * If we can't get the flush lock, the inode is already attached. All + * we needed to do here is mark the inode stale so buffer IO completion + * will remove it from the AIL. + */ + iip = ip->i_itemp; + if (!xfs_iflock_nowait(ip)) { + ASSERT(!list_empty(&iip->ili_item.li_bio_list)); + ASSERT(iip->ili_last_fields); + goto out_iunlock; + } /* - * We don't need to attach clean inodes or those only with unlogged - * changes (which we throw away, anyway). + * Inodes not attached to the buffer can be released immediately. + * Everything else has to go through xfs_iflush_abort() on journal + * commit as the flock synchronises removal of the inode from the + * cluster buffer against inode reclaim. */ - if (!ip->i_itemp || xfs_inode_clean(ip)) { - ASSERT(ip != free_ip); + if (!iip || list_empty(&iip->ili_item.li_bio_list)) { xfs_ifunlock(ip); - xfs_iunlock(ip, XFS_ILOCK_EXCL); - goto out_no_inode; + goto out_iunlock; } - return ip; -out_rcu_unlock: - rcu_read_unlock(); -out_no_inode: - return NULL; + /* we have a dirty inode in memory that has not yet been flushed. */ + spin_lock(&iip->ili_lock); + iip->ili_last_fields = iip->ili_fields; + iip->ili_fields = 0; + iip->ili_fsync_fields = 0; + spin_unlock(&iip->ili_lock); + ASSERT(iip->ili_last_fields); + +out_iunlock: + if (ip != free_ip) + xfs_iunlock(ip, XFS_ILOCK_EXCL); } /* @@ -2586,26 +2598,20 @@ out_no_inode: */ STATIC int xfs_ifree_cluster( - xfs_inode_t *free_ip, - xfs_trans_t *tp, + struct xfs_inode *free_ip, + struct xfs_trans *tp, struct xfs_icluster *xic) { - xfs_mount_t *mp = free_ip->i_mount; + struct xfs_mount *mp = free_ip->i_mount; + struct xfs_ino_geometry *igeo = M_IGEO(mp); + struct xfs_buf *bp; + xfs_daddr_t blkno; + xfs_ino_t inum = xic->first_ino; int nbufs; int i, j; int ioffset; - xfs_daddr_t blkno; - xfs_buf_t *bp; - xfs_inode_t *ip; - struct xfs_inode_log_item *iip; - struct xfs_log_item *lip; - struct xfs_perag *pag; - struct xfs_ino_geometry *igeo = M_IGEO(mp); - xfs_ino_t inum; int error; - inum = xic->first_ino; - pag = xfs_perag_get(mp, XFS_INO_TO_AGNO(mp, inum)); nbufs = igeo->ialloc_blks / igeo->blocks_per_cluster; for (j = 0; j < nbufs; j++, inum += igeo->inodes_per_cluster) { @@ -2634,10 +2640,8 @@ xfs_ifree_cluster( error = xfs_trans_get_buf(tp, mp->m_ddev_targp, blkno, mp->m_bsize * igeo->blocks_per_cluster, XBF_UNMAPPED, &bp); - if (error) { - xfs_perag_put(pag); + if (error) return error; - } /* * This buffer may not have been correctly initialised as we @@ -2651,60 +2655,16 @@ xfs_ifree_cluster( bp->b_ops = &xfs_inode_buf_ops; /* - * Walk the inodes already attached to the buffer and mark them - * stale. These will all have the flush locks held, so an - * in-memory inode walk can't lock them. By marking them all - * stale first, we will not attempt to lock them in the loop - * below as the XFS_ISTALE flag will be set. + * Now we need to set all the cached clean inodes as XFS_ISTALE, + * too. This requires lookups, and will skip inodes that we've + * already marked XFS_ISTALE. */ - list_for_each_entry(lip, &bp->b_li_list, li_bio_list) { - if (lip->li_type == XFS_LI_INODE) { - iip = (struct xfs_inode_log_item *)lip; - ASSERT(iip->ili_logged == 1); - lip->li_cb = xfs_istale_done; - xfs_trans_ail_copy_lsn(mp->m_ail, - &iip->ili_flush_lsn, - &iip->ili_item.li_lsn); - xfs_iflags_set(iip->ili_inode, XFS_ISTALE); - } - } - - - /* - * For each inode in memory attempt to add it to the inode - * buffer and set it up for being staled on buffer IO - * completion. This is safe as we've locked out tail pushing - * and flushing by locking the buffer. - * - * We have already marked every inode that was part of a - * transaction stale above, which means there is no point in - * even trying to lock them. - */ - for (i = 0; i < igeo->inodes_per_cluster; i++) { - ip = xfs_ifree_get_one_inode(pag, free_ip, inum + i); - if (!ip) - continue; - - iip = ip->i_itemp; - iip->ili_last_fields = iip->ili_fields; - iip->ili_fields = 0; - iip->ili_fsync_fields = 0; - iip->ili_logged = 1; - xfs_trans_ail_copy_lsn(mp->m_ail, &iip->ili_flush_lsn, - &iip->ili_item.li_lsn); - - xfs_buf_attach_iodone(bp, xfs_istale_done, - &iip->ili_item); - - if (ip != free_ip) - xfs_iunlock(ip, XFS_ILOCK_EXCL); - } + for (i = 0; i < igeo->inodes_per_cluster; i++) + xfs_ifree_mark_inode_stale(bp, free_ip, inum + i); xfs_trans_stale_inode_buf(tp, bp); xfs_trans_binval(tp, bp); } - - xfs_perag_put(pag); return 0; } @@ -2725,6 +2685,7 @@ xfs_ifree( { int error; struct xfs_icluster xic = { 0 }; + struct xfs_inode_log_item *iip = ip->i_itemp; ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL)); ASSERT(VFS_I(ip)->i_nlink == 0); @@ -2762,7 +2723,9 @@ xfs_ifree( ip->i_df.if_format = XFS_DINODE_FMT_EXTENTS; /* Don't attempt to replay owner changes for a deleted inode */ - ip->i_itemp->ili_fields &= ~(XFS_ILOG_AOWNER|XFS_ILOG_DOWNER); + spin_lock(&iip->ili_lock); + iip->ili_fields &= ~(XFS_ILOG_AOWNER | XFS_ILOG_DOWNER); + spin_unlock(&iip->ili_lock); /* * Bump the generation count so no one will be confused @@ -3142,7 +3105,7 @@ out_trans_abort: /* * xfs_rename_alloc_whiteout() * - * Return a referenced, unlinked, unlocked inode that that can be used as a + * Return a referenced, unlinked, unlocked inode that can be used as a * whiteout in a rename transaction. We use a tmpfile inode here so that if we * crash between allocating the inode and linking it into the rename transaction * recovery will free the inode and we won't leak it. @@ -3469,232 +3432,9 @@ out_release_wip: return error; } -STATIC int -xfs_iflush_cluster( - struct xfs_inode *ip, - struct xfs_buf *bp) -{ - struct xfs_mount *mp = ip->i_mount; - struct xfs_perag *pag; - unsigned long first_index, mask; - int cilist_size; - struct xfs_inode **cilist; - struct xfs_inode *cip; - struct xfs_ino_geometry *igeo = M_IGEO(mp); - int error = 0; - int nr_found; - int clcount = 0; - int i; - - pag = xfs_perag_get(mp, XFS_INO_TO_AGNO(mp, ip->i_ino)); - - cilist_size = igeo->inodes_per_cluster * sizeof(struct xfs_inode *); - cilist = kmem_alloc(cilist_size, KM_MAYFAIL|KM_NOFS); - if (!cilist) - goto out_put; - - mask = ~(igeo->inodes_per_cluster - 1); - first_index = XFS_INO_TO_AGINO(mp, ip->i_ino) & mask; - rcu_read_lock(); - /* really need a gang lookup range call here */ - nr_found = radix_tree_gang_lookup(&pag->pag_ici_root, (void**)cilist, - first_index, igeo->inodes_per_cluster); - if (nr_found == 0) - goto out_free; - - for (i = 0; i < nr_found; i++) { - cip = cilist[i]; - if (cip == ip) - continue; - - /* - * because this is an RCU protected lookup, we could find a - * recently freed or even reallocated inode during the lookup. - * We need to check under the i_flags_lock for a valid inode - * here. Skip it if it is not valid or the wrong inode. - */ - spin_lock(&cip->i_flags_lock); - if (!cip->i_ino || - __xfs_iflags_test(cip, XFS_ISTALE)) { - spin_unlock(&cip->i_flags_lock); - continue; - } - - /* - * Once we fall off the end of the cluster, no point checking - * any more inodes in the list because they will also all be - * outside the cluster. - */ - if ((XFS_INO_TO_AGINO(mp, cip->i_ino) & mask) != first_index) { - spin_unlock(&cip->i_flags_lock); - break; - } - spin_unlock(&cip->i_flags_lock); - - /* - * Do an un-protected check to see if the inode is dirty and - * is a candidate for flushing. These checks will be repeated - * later after the appropriate locks are acquired. - */ - if (xfs_inode_clean(cip) && xfs_ipincount(cip) == 0) - continue; - - /* - * Try to get locks. If any are unavailable or it is pinned, - * then this inode cannot be flushed and is skipped. - */ - - if (!xfs_ilock_nowait(cip, XFS_ILOCK_SHARED)) - continue; - if (!xfs_iflock_nowait(cip)) { - xfs_iunlock(cip, XFS_ILOCK_SHARED); - continue; - } - if (xfs_ipincount(cip)) { - xfs_ifunlock(cip); - xfs_iunlock(cip, XFS_ILOCK_SHARED); - continue; - } - - - /* - * Check the inode number again, just to be certain we are not - * racing with freeing in xfs_reclaim_inode(). See the comments - * in that function for more information as to why the initial - * check is not sufficient. - */ - if (!cip->i_ino) { - xfs_ifunlock(cip); - xfs_iunlock(cip, XFS_ILOCK_SHARED); - continue; - } - - /* - * arriving here means that this inode can be flushed. First - * re-check that it's dirty before flushing. - */ - if (!xfs_inode_clean(cip)) { - error = xfs_iflush_int(cip, bp); - if (error) { - xfs_iunlock(cip, XFS_ILOCK_SHARED); - goto out_free; - } - clcount++; - } else { - xfs_ifunlock(cip); - } - xfs_iunlock(cip, XFS_ILOCK_SHARED); - } - - if (clcount) { - XFS_STATS_INC(mp, xs_icluster_flushcnt); - XFS_STATS_ADD(mp, xs_icluster_flushinode, clcount); - } - -out_free: - rcu_read_unlock(); - kmem_free(cilist); -out_put: - xfs_perag_put(pag); - return error; -} - -/* - * Flush dirty inode metadata into the backing buffer. - * - * The caller must have the inode lock and the inode flush lock held. The - * inode lock will still be held upon return to the caller, and the inode - * flush lock will be released after the inode has reached the disk. - * - * The caller must write out the buffer returned in *bpp and release it. - */ -int +static int xfs_iflush( struct xfs_inode *ip, - struct xfs_buf **bpp) -{ - struct xfs_mount *mp = ip->i_mount; - struct xfs_buf *bp = NULL; - struct xfs_dinode *dip; - int error; - - XFS_STATS_INC(mp, xs_iflush_count); - - ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL|XFS_ILOCK_SHARED)); - ASSERT(xfs_isiflocked(ip)); - ASSERT(ip->i_df.if_format != XFS_DINODE_FMT_BTREE || - ip->i_df.if_nextents > XFS_IFORK_MAXEXT(ip, XFS_DATA_FORK)); - - *bpp = NULL; - - xfs_iunpin_wait(ip); - - /* - * For stale inodes we cannot rely on the backing buffer remaining - * stale in cache for the remaining life of the stale inode and so - * xfs_imap_to_bp() below may give us a buffer that no longer contains - * inodes below. We have to check this after ensuring the inode is - * unpinned so that it is safe to reclaim the stale inode after the - * flush call. - */ - if (xfs_iflags_test(ip, XFS_ISTALE)) { - xfs_ifunlock(ip); - return 0; - } - - /* - * Get the buffer containing the on-disk inode. We are doing a try-lock - * operation here, so we may get an EAGAIN error. In that case, return - * leaving the inode dirty. - * - * If we get any other error, we effectively have a corruption situation - * and we cannot flush the inode. Abort the flush and shut down. - */ - error = xfs_imap_to_bp(mp, NULL, &ip->i_imap, &dip, &bp, XBF_TRYLOCK); - if (error == -EAGAIN) { - xfs_ifunlock(ip); - return error; - } - if (error) - goto abort; - - /* - * If the buffer is pinned then push on the log now so we won't - * get stuck waiting in the write for too long. - */ - if (xfs_buf_ispinned(bp)) - xfs_log_force(mp, 0); - - /* - * Flush the provided inode then attempt to gather others from the - * cluster into the write. - * - * Note: Once we attempt to flush an inode, we must run buffer - * completion callbacks on any failure. If this fails, simulate an I/O - * failure on the buffer and shut down. - */ - error = xfs_iflush_int(ip, bp); - if (!error) - error = xfs_iflush_cluster(ip, bp); - if (error) { - bp->b_flags |= XBF_ASYNC; - xfs_buf_ioend_fail(bp); - goto shutdown; - } - - *bpp = bp; - return 0; - -abort: - xfs_iflush_abort(ip); -shutdown: - xfs_force_shutdown(mp, SHUTDOWN_CORRUPT_INCORE); - return error; -} - -STATIC int -xfs_iflush_int( - struct xfs_inode *ip, struct xfs_buf *bp) { struct xfs_inode_log_item *iip = ip->i_itemp; @@ -3706,7 +3446,7 @@ xfs_iflush_int( ASSERT(xfs_isiflocked(ip)); ASSERT(ip->i_df.if_format != XFS_DINODE_FMT_BTREE || ip->i_df.if_nextents > XFS_IFORK_MAXEXT(ip, XFS_DATA_FORK)); - ASSERT(iip != NULL && iip->ili_fields != 0); + ASSERT(iip->ili_item.li_buf == bp); dip = xfs_buf_offset(bp, ip->i_imap.im_boffset); @@ -3801,7 +3541,6 @@ xfs_iflush_int( xfs_iflush_fork(ip, dip, iip, XFS_DATA_FORK); if (XFS_IFORK_Q(ip)) xfs_iflush_fork(ip, dip, iip, XFS_ATTR_FORK); - xfs_inobp_check(mp, bp); /* * We've recorded everything logged in the inode, so we'd like to clear @@ -3818,40 +3557,148 @@ xfs_iflush_int( * know that the information those bits represent is permanently on * disk. As long as the flush completes before the inode is logged * again, then both ili_fields and ili_last_fields will be cleared. - * - * We can play with the ili_fields bits here, because the inode lock - * must be held exclusively in order to set bits there and the flush - * lock protects the ili_last_fields bits. Set ili_logged so the flush - * done routine can tell whether or not to look in the AIL. Also, store - * the current LSN of the inode so that we can tell whether the item has - * moved in the AIL from xfs_iflush_done(). In order to read the lsn we - * need the AIL lock, because it is a 64 bit value that cannot be read - * atomically. */ error = 0; flush_out: + spin_lock(&iip->ili_lock); iip->ili_last_fields = iip->ili_fields; iip->ili_fields = 0; iip->ili_fsync_fields = 0; - iip->ili_logged = 1; + spin_unlock(&iip->ili_lock); + /* + * Store the current LSN of the inode so that we can tell whether the + * item has moved in the AIL from xfs_iflush_done(). + */ xfs_trans_ail_copy_lsn(mp->m_ail, &iip->ili_flush_lsn, &iip->ili_item.li_lsn); + /* generate the checksum. */ + xfs_dinode_calc_crc(mp, dip); + return error; +} + +/* + * Non-blocking flush of dirty inode metadata into the backing buffer. + * + * The caller must have a reference to the inode and hold the cluster buffer + * locked. The function will walk across all the inodes on the cluster buffer it + * can find and lock without blocking, and flush them to the cluster buffer. + * + * On successful flushing of at least one inode, the caller must write out the + * buffer and release it. If no inodes are flushed, -EAGAIN will be returned and + * the caller needs to release the buffer. On failure, the filesystem will be + * shut down, the buffer will have been unlocked and released, and EFSCORRUPTED + * will be returned. + */ +int +xfs_iflush_cluster( + struct xfs_buf *bp) +{ + struct xfs_mount *mp = bp->b_mount; + struct xfs_log_item *lip, *n; + struct xfs_inode *ip; + struct xfs_inode_log_item *iip; + int clcount = 0; + int error = 0; + /* - * Attach the inode item callback to the buffer whether the flush - * succeeded or not. If not, the caller will shut down and fail I/O - * completion on the buffer to remove the inode from the AIL and release - * the flush lock. + * We must use the safe variant here as on shutdown xfs_iflush_abort() + * can remove itself from the list. */ - xfs_buf_attach_iodone(bp, xfs_iflush_done, &iip->ili_item); + list_for_each_entry_safe(lip, n, &bp->b_li_list, li_bio_list) { + iip = (struct xfs_inode_log_item *)lip; + ip = iip->ili_inode; - /* generate the checksum. */ - xfs_dinode_calc_crc(mp, dip); + /* + * Quick and dirty check to avoid locks if possible. + */ + if (__xfs_iflags_test(ip, XFS_IRECLAIM | XFS_IFLOCK)) + continue; + if (xfs_ipincount(ip)) + continue; + + /* + * The inode is still attached to the buffer, which means it is + * dirty but reclaim might try to grab it. Check carefully for + * that, and grab the ilock while still holding the i_flags_lock + * to guarantee reclaim will not be able to reclaim this inode + * once we drop the i_flags_lock. + */ + spin_lock(&ip->i_flags_lock); + ASSERT(!__xfs_iflags_test(ip, XFS_ISTALE)); + if (__xfs_iflags_test(ip, XFS_IRECLAIM | XFS_IFLOCK)) { + spin_unlock(&ip->i_flags_lock); + continue; + } + + /* + * ILOCK will pin the inode against reclaim and prevent + * concurrent transactions modifying the inode while we are + * flushing the inode. + */ + if (!xfs_ilock_nowait(ip, XFS_ILOCK_SHARED)) { + spin_unlock(&ip->i_flags_lock); + continue; + } + spin_unlock(&ip->i_flags_lock); + + /* + * Skip inodes that are already flush locked as they have + * already been written to the buffer. + */ + if (!xfs_iflock_nowait(ip)) { + xfs_iunlock(ip, XFS_ILOCK_SHARED); + continue; + } + + /* + * Abort flushing this inode if we are shut down because the + * inode may not currently be in the AIL. This can occur when + * log I/O failure unpins the inode without inserting into the + * AIL, leaving a dirty/unpinned inode attached to the buffer + * that otherwise looks like it should be flushed. + */ + if (XFS_FORCED_SHUTDOWN(mp)) { + xfs_iunpin_wait(ip); + /* xfs_iflush_abort() drops the flush lock */ + xfs_iflush_abort(ip); + xfs_iunlock(ip, XFS_ILOCK_SHARED); + error = -EIO; + continue; + } + + /* don't block waiting on a log force to unpin dirty inodes */ + if (xfs_ipincount(ip)) { + xfs_ifunlock(ip); + xfs_iunlock(ip, XFS_ILOCK_SHARED); + continue; + } + + if (!xfs_inode_clean(ip)) + error = xfs_iflush(ip, bp); + else + xfs_ifunlock(ip); + xfs_iunlock(ip, XFS_ILOCK_SHARED); + if (error) + break; + clcount++; + } + + if (error) { + bp->b_flags |= XBF_ASYNC; + xfs_buf_ioend_fail(bp); + xfs_force_shutdown(mp, SHUTDOWN_CORRUPT_INCORE); + return error; + } + + if (!clcount) + return -EAGAIN; + + XFS_STATS_INC(mp, xs_icluster_flushcnt); + XFS_STATS_ADD(mp, xs_icluster_flushinode, clcount); + return 0; - ASSERT(!list_empty(&bp->b_li_list)); - ASSERT(bp->b_iodone != NULL); - return error; } /* Release an inode. */ @@ -3881,3 +3728,96 @@ xfs_log_force_inode( return 0; return xfs_log_force_lsn(ip->i_mount, lsn, XFS_LOG_SYNC, NULL); } + +/* + * Grab the exclusive iolock for a data copy from src to dest, making sure to + * abide vfs locking order (lowest pointer value goes first) and breaking the + * layout leases before proceeding. The loop is needed because we cannot call + * the blocking break_layout() with the iolocks held, and therefore have to + * back out both locks. + */ +static int +xfs_iolock_two_inodes_and_break_layout( + struct inode *src, + struct inode *dest) +{ + int error; + + if (src > dest) + swap(src, dest); + +retry: + /* Wait to break both inodes' layouts before we start locking. */ + error = break_layout(src, true); + if (error) + return error; + if (src != dest) { + error = break_layout(dest, true); + if (error) + return error; + } + + /* Lock one inode and make sure nobody got in and leased it. */ + inode_lock(src); + error = break_layout(src, false); + if (error) { + inode_unlock(src); + if (error == -EWOULDBLOCK) + goto retry; + return error; + } + + if (src == dest) + return 0; + + /* Lock the other inode and make sure nobody got in and leased it. */ + inode_lock_nested(dest, I_MUTEX_NONDIR2); + error = break_layout(dest, false); + if (error) { + inode_unlock(src); + inode_unlock(dest); + if (error == -EWOULDBLOCK) + goto retry; + return error; + } + + return 0; +} + +/* + * Lock two inodes so that userspace cannot initiate I/O via file syscalls or + * mmap activity. + */ +int +xfs_ilock2_io_mmap( + struct xfs_inode *ip1, + struct xfs_inode *ip2) +{ + int ret; + + ret = xfs_iolock_two_inodes_and_break_layout(VFS_I(ip1), VFS_I(ip2)); + if (ret) + return ret; + if (ip1 == ip2) + xfs_ilock(ip1, XFS_MMAPLOCK_EXCL); + else + xfs_lock_two_inodes(ip1, XFS_MMAPLOCK_EXCL, + ip2, XFS_MMAPLOCK_EXCL); + return 0; +} + +/* Unlock both inodes to allow IO and mmap activity. */ +void +xfs_iunlock2_io_mmap( + struct xfs_inode *ip1, + struct xfs_inode *ip2) +{ + bool same_inode = (ip1 == ip2); + + xfs_iunlock(ip2, XFS_MMAPLOCK_EXCL); + if (!same_inode) + xfs_iunlock(ip1, XFS_MMAPLOCK_EXCL); + inode_unlock(VFS_I(ip2)); + if (!same_inode) + inode_unlock(VFS_I(ip1)); +} diff --git a/fs/xfs/xfs_inode.h b/fs/xfs/xfs_inode.h index 47d3b391030d..e9a8bb184d1f 100644 --- a/fs/xfs/xfs_inode.h +++ b/fs/xfs/xfs_inode.h @@ -426,7 +426,7 @@ int xfs_log_force_inode(struct xfs_inode *ip); void xfs_iunpin_wait(xfs_inode_t *); #define xfs_ipincount(ip) ((unsigned int) atomic_read(&ip->i_pincount)) -int xfs_iflush(struct xfs_inode *, struct xfs_buf **); +int xfs_iflush_cluster(struct xfs_buf *); void xfs_lock_two_inodes(struct xfs_inode *ip0, uint ip0_mode, struct xfs_inode *ip1, uint ip1_mode); @@ -499,4 +499,7 @@ void xfs_iunlink_destroy(struct xfs_perag *pag); void xfs_end_io(struct work_struct *work); +int xfs_ilock2_io_mmap(struct xfs_inode *ip1, struct xfs_inode *ip2); +void xfs_iunlock2_io_mmap(struct xfs_inode *ip1, struct xfs_inode *ip2); + #endif /* __XFS_INODE_H__ */ diff --git a/fs/xfs/xfs_inode_item.c b/fs/xfs/xfs_inode_item.c index ba47bf65b772..6c65938cee1c 100644 --- a/fs/xfs/xfs_inode_item.c +++ b/fs/xfs/xfs_inode_item.c @@ -191,7 +191,7 @@ xfs_inode_item_format_data_fork( ip->i_df.if_bytes > 0) { /* * Round i_bytes up to a word boundary. - * The underlying memory is guaranteed to + * The underlying memory is guaranteed * to be there by xfs_idata_realloc(). */ data_bytes = roundup(ip->i_df.if_bytes, 4); @@ -275,7 +275,7 @@ xfs_inode_item_format_attr_fork( ip->i_afp->if_bytes > 0) { /* * Round i_bytes up to a word boundary. - * The underlying memory is guaranteed to + * The underlying memory is guaranteed * to be there by xfs_idata_realloc(). */ data_bytes = roundup(ip->i_afp->if_bytes, 4); @@ -439,6 +439,7 @@ xfs_inode_item_pin( struct xfs_inode *ip = INODE_ITEM(lip)->ili_inode; ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL)); + ASSERT(lip->li_buf); trace_xfs_inode_pin(ip, _RET_IP_); atomic_inc(&ip->i_pincount); @@ -450,6 +451,12 @@ xfs_inode_item_pin( * item which was previously pinned with a call to xfs_inode_item_pin(). * * Also wake up anyone in xfs_iunpin_wait() if the count goes to 0. + * + * Note that unpin can race with inode cluster buffer freeing marking the buffer + * stale. In that case, flush completions are run from the buffer unpin call, + * which may happen before the inode is unpinned. If we lose the race, there + * will be no buffer attached to the log item, but the inode will be marked + * XFS_ISTALE. */ STATIC void xfs_inode_item_unpin( @@ -459,28 +466,12 @@ xfs_inode_item_unpin( struct xfs_inode *ip = INODE_ITEM(lip)->ili_inode; trace_xfs_inode_unpin(ip, _RET_IP_); + ASSERT(lip->li_buf || xfs_iflags_test(ip, XFS_ISTALE)); ASSERT(atomic_read(&ip->i_pincount) > 0); if (atomic_dec_and_test(&ip->i_pincount)) wake_up_bit(&ip->i_flags, __XFS_IPINNED_BIT); } -/* - * Callback used to mark a buffer with XFS_LI_FAILED when items in the buffer - * have been failed during writeback - * - * This informs the AIL that the inode is already flush locked on the next push, - * and acquires a hold on the buffer to ensure that it isn't reclaimed before - * dirty data makes it to disk. - */ -STATIC void -xfs_inode_item_error( - struct xfs_log_item *lip, - struct xfs_buf *bp) -{ - ASSERT(xfs_isiflocked(INODE_ITEM(lip)->ili_inode)); - xfs_set_li_failed(lip, bp); -} - STATIC uint xfs_inode_item_push( struct xfs_log_item *lip, @@ -494,55 +485,44 @@ xfs_inode_item_push( uint rval = XFS_ITEM_SUCCESS; int error; - if (xfs_ipincount(ip) > 0) + ASSERT(iip->ili_item.li_buf); + + if (xfs_ipincount(ip) > 0 || xfs_buf_ispinned(bp) || + (ip->i_flags & XFS_ISTALE)) return XFS_ITEM_PINNED; - if (!xfs_ilock_nowait(ip, XFS_ILOCK_SHARED)) - return XFS_ITEM_LOCKED; + /* If the inode is already flush locked, we're already flushing. */ + if (xfs_isiflocked(ip)) + return XFS_ITEM_FLUSHING; - /* - * Re-check the pincount now that we stabilized the value by - * taking the ilock. - */ - if (xfs_ipincount(ip) > 0) { - rval = XFS_ITEM_PINNED; - goto out_unlock; - } + if (!xfs_buf_trylock(bp)) + return XFS_ITEM_LOCKED; - /* - * Stale inode items should force out the iclog. - */ - if (ip->i_flags & XFS_ISTALE) { - rval = XFS_ITEM_PINNED; - goto out_unlock; - } + spin_unlock(&lip->li_ailp->ail_lock); /* - * Someone else is already flushing the inode. Nothing we can do - * here but wait for the flush to finish and remove the item from - * the AIL. + * We need to hold a reference for flushing the cluster buffer as it may + * fail the buffer without IO submission. In which case, we better get a + * reference for that completion because otherwise we don't get a + * reference for IO until we queue the buffer for delwri submission. */ - if (!xfs_iflock_nowait(ip)) { - rval = XFS_ITEM_FLUSHING; - goto out_unlock; - } - - ASSERT(iip->ili_fields != 0 || XFS_FORCED_SHUTDOWN(ip->i_mount)); - ASSERT(iip->ili_logged == 0 || XFS_FORCED_SHUTDOWN(ip->i_mount)); - - spin_unlock(&lip->li_ailp->ail_lock); - - error = xfs_iflush(ip, &bp); + xfs_buf_hold(bp); + error = xfs_iflush_cluster(bp); if (!error) { if (!xfs_buf_delwri_queue(bp, buffer_list)) rval = XFS_ITEM_FLUSHING; xfs_buf_relse(bp); - } else if (error == -EAGAIN) + } else { + /* + * Release the buffer if we were unable to flush anything. On + * any other error, the buffer has already been released. + */ + if (error == -EAGAIN) + xfs_buf_relse(bp); rval = XFS_ITEM_LOCKED; + } spin_lock(&lip->li_ailp->ail_lock); -out_unlock: - xfs_iunlock(ip, XFS_ILOCK_SHARED); return rval; } @@ -621,7 +601,6 @@ static const struct xfs_item_ops xfs_inode_item_ops = { .iop_committed = xfs_inode_item_committed, .iop_push = xfs_inode_item_push, .iop_committing = xfs_inode_item_committing, - .iop_error = xfs_inode_item_error }; @@ -636,9 +615,11 @@ xfs_inode_item_init( struct xfs_inode_log_item *iip; ASSERT(ip->i_itemp == NULL); - iip = ip->i_itemp = kmem_zone_zalloc(xfs_ili_zone, 0); + iip = ip->i_itemp = kmem_cache_zalloc(xfs_ili_zone, + GFP_KERNEL | __GFP_NOFAIL); iip->ili_inode = ip; + spin_lock_init(&iip->ili_lock); xfs_log_item_init(mp, &iip->ili_item, XFS_LI_INODE, &xfs_inode_item_ops); } @@ -648,110 +629,129 @@ xfs_inode_item_init( */ void xfs_inode_item_destroy( - xfs_inode_t *ip) + struct xfs_inode *ip) { - kmem_free(ip->i_itemp->ili_item.li_lv_shadow); - kmem_cache_free(xfs_ili_zone, ip->i_itemp); + struct xfs_inode_log_item *iip = ip->i_itemp; + + ASSERT(iip->ili_item.li_buf == NULL); + + ip->i_itemp = NULL; + kmem_free(iip->ili_item.li_lv_shadow); + kmem_cache_free(xfs_ili_zone, iip); } /* - * This is the inode flushing I/O completion routine. It is called - * from interrupt level when the buffer containing the inode is - * flushed to disk. It is responsible for removing the inode item - * from the AIL if it has not been re-logged, and unlocking the inode's - * flush lock. - * - * To reduce AIL lock traffic as much as possible, we scan the buffer log item - * list for other inodes that will run this function. We remove them from the - * buffer list so we can process all the inode IO completions in one AIL lock - * traversal. + * We only want to pull the item from the AIL if it is actually there + * and its location in the log has not changed since we started the + * flush. Thus, we only bother if the inode's lsn has not changed. */ -void -xfs_iflush_done( - struct xfs_buf *bp, - struct xfs_log_item *lip) +static void +xfs_iflush_ail_updates( + struct xfs_ail *ailp, + struct list_head *list) { - struct xfs_inode_log_item *iip; - struct xfs_log_item *blip, *n; - struct xfs_ail *ailp = lip->li_ailp; - int need_ail = 0; - LIST_HEAD(tmp); + struct xfs_log_item *lip; + xfs_lsn_t tail_lsn = 0; - /* - * Scan the buffer IO completions for other inodes being completed and - * attach them to the current inode log item. - */ + /* this is an opencoded batch version of xfs_trans_ail_delete */ + spin_lock(&ailp->ail_lock); + list_for_each_entry(lip, list, li_bio_list) { + xfs_lsn_t lsn; - list_add_tail(&lip->li_bio_list, &tmp); - - list_for_each_entry_safe(blip, n, &bp->b_li_list, li_bio_list) { - if (lip->li_cb != xfs_iflush_done) + clear_bit(XFS_LI_FAILED, &lip->li_flags); + if (INODE_ITEM(lip)->ili_flush_lsn != lip->li_lsn) continue; - list_move_tail(&blip->li_bio_list, &tmp); + lsn = xfs_ail_delete_one(ailp, lip); + if (!tail_lsn && lsn) + tail_lsn = lsn; + } + xfs_ail_update_finish(ailp, tail_lsn); +} + +/* + * Walk the list of inodes that have completed their IOs. If they are clean + * remove them from the list and dissociate them from the buffer. Buffers that + * are still dirty remain linked to the buffer and on the list. Caller must + * handle them appropriately. + */ +static void +xfs_iflush_finish( + struct xfs_buf *bp, + struct list_head *list) +{ + struct xfs_log_item *lip, *n; + + list_for_each_entry_safe(lip, n, list, li_bio_list) { + struct xfs_inode_log_item *iip = INODE_ITEM(lip); + bool drop_buffer = false; + + spin_lock(&iip->ili_lock); + /* - * while we have the item, do the unlocked check for needing - * the AIL lock. + * Remove the reference to the cluster buffer if the inode is + * clean in memory and drop the buffer reference once we've + * dropped the locks we hold. */ - iip = INODE_ITEM(blip); - if ((iip->ili_logged && blip->li_lsn == iip->ili_flush_lsn) || - test_bit(XFS_LI_FAILED, &blip->li_flags)) - need_ail++; + ASSERT(iip->ili_item.li_buf == bp); + if (!iip->ili_fields) { + iip->ili_item.li_buf = NULL; + list_del_init(&lip->li_bio_list); + drop_buffer = true; + } + iip->ili_last_fields = 0; + iip->ili_flush_lsn = 0; + spin_unlock(&iip->ili_lock); + xfs_ifunlock(iip->ili_inode); + if (drop_buffer) + xfs_buf_rele(bp); } +} - /* make sure we capture the state of the initial inode. */ - iip = INODE_ITEM(lip); - if ((iip->ili_logged && lip->li_lsn == iip->ili_flush_lsn) || - test_bit(XFS_LI_FAILED, &lip->li_flags)) - need_ail++; +/* + * Inode buffer IO completion routine. It is responsible for removing inodes + * attached to the buffer from the AIL if they have not been re-logged, as well + * as completing the flush and unlocking the inode. + */ +void +xfs_iflush_done( + struct xfs_buf *bp) +{ + struct xfs_log_item *lip, *n; + LIST_HEAD(flushed_inodes); + LIST_HEAD(ail_updates); /* - * We only want to pull the item from the AIL if it is - * actually there and its location in the log has not - * changed since we started the flush. Thus, we only bother - * if the ili_logged flag is set and the inode's lsn has not - * changed. First we check the lsn outside - * the lock since it's cheaper, and then we recheck while - * holding the lock before removing the inode from the AIL. + * Pull the attached inodes from the buffer one at a time and take the + * appropriate action on them. */ - if (need_ail) { - xfs_lsn_t tail_lsn = 0; - - /* this is an opencoded batch version of xfs_trans_ail_delete */ - spin_lock(&ailp->ail_lock); - list_for_each_entry(blip, &tmp, li_bio_list) { - if (INODE_ITEM(blip)->ili_logged && - blip->li_lsn == INODE_ITEM(blip)->ili_flush_lsn) { - /* - * xfs_ail_update_finish() only cares about the - * lsn of the first tail item removed, any - * others will be at the same or higher lsn so - * we just ignore them. - */ - xfs_lsn_t lsn = xfs_ail_delete_one(ailp, blip); - if (!tail_lsn && lsn) - tail_lsn = lsn; - } else { - xfs_clear_li_failed(blip); - } + list_for_each_entry_safe(lip, n, &bp->b_li_list, li_bio_list) { + struct xfs_inode_log_item *iip = INODE_ITEM(lip); + + if (xfs_iflags_test(iip->ili_inode, XFS_ISTALE)) { + xfs_iflush_abort(iip->ili_inode); + continue; } - xfs_ail_update_finish(ailp, tail_lsn); + if (!iip->ili_last_fields) + continue; + + /* Do an unlocked check for needing the AIL lock. */ + if (iip->ili_flush_lsn == lip->li_lsn || + test_bit(XFS_LI_FAILED, &lip->li_flags)) + list_move_tail(&lip->li_bio_list, &ail_updates); + else + list_move_tail(&lip->li_bio_list, &flushed_inodes); } - /* - * clean up and unlock the flush lock now we are done. We can clear the - * ili_last_fields bits now that we know that the data corresponding to - * them is safely on disk. - */ - list_for_each_entry_safe(blip, n, &tmp, li_bio_list) { - list_del_init(&blip->li_bio_list); - iip = INODE_ITEM(blip); - iip->ili_logged = 0; - iip->ili_last_fields = 0; - xfs_ifunlock(iip->ili_inode); + if (!list_empty(&ail_updates)) { + xfs_iflush_ail_updates(bp->b_mount->m_ail, &ail_updates); + list_splice_tail(&ail_updates, &flushed_inodes); } - list_del(&tmp); + + xfs_iflush_finish(bp, &flushed_inodes); + if (!list_empty(&flushed_inodes)) + list_splice_tail(&flushed_inodes, &bp->b_li_list); } /* @@ -762,37 +762,37 @@ xfs_iflush_done( */ void xfs_iflush_abort( - struct xfs_inode *ip) + struct xfs_inode *ip) { - struct xfs_inode_log_item *iip = ip->i_itemp; + struct xfs_inode_log_item *iip = ip->i_itemp; + struct xfs_buf *bp = NULL; if (iip) { - xfs_trans_ail_delete(&iip->ili_item, 0); - iip->ili_logged = 0; /* - * Clear the ili_last_fields bits now that we know that the - * data corresponding to them is safely on disk. + * Clear the failed bit before removing the item from the AIL so + * xfs_trans_ail_delete() doesn't try to clear and release the + * buffer attached to the log item before we are done with it. */ - iip->ili_last_fields = 0; + clear_bit(XFS_LI_FAILED, &iip->ili_item.li_flags); + xfs_trans_ail_delete(&iip->ili_item, 0); + /* * Clear the inode logging fields so no more flushes are * attempted. */ + spin_lock(&iip->ili_lock); + iip->ili_last_fields = 0; iip->ili_fields = 0; iip->ili_fsync_fields = 0; + iip->ili_flush_lsn = 0; + bp = iip->ili_item.li_buf; + iip->ili_item.li_buf = NULL; + list_del_init(&iip->ili_item.li_bio_list); + spin_unlock(&iip->ili_lock); } - /* - * Release the inode's flush lock since we're done with it. - */ xfs_ifunlock(ip); -} - -void -xfs_istale_done( - struct xfs_buf *bp, - struct xfs_log_item *lip) -{ - xfs_iflush_abort(INODE_ITEM(lip)->ili_inode); + if (bp) + xfs_buf_rele(bp); } /* diff --git a/fs/xfs/xfs_inode_item.h b/fs/xfs/xfs_inode_item.h index 60b34bb66e8e..048b5e7dee90 100644 --- a/fs/xfs/xfs_inode_item.h +++ b/fs/xfs/xfs_inode_item.h @@ -16,24 +16,34 @@ struct xfs_mount; struct xfs_inode_log_item { struct xfs_log_item ili_item; /* common portion */ struct xfs_inode *ili_inode; /* inode ptr */ - xfs_lsn_t ili_flush_lsn; /* lsn at last flush */ - xfs_lsn_t ili_last_lsn; /* lsn at last transaction */ - unsigned short ili_lock_flags; /* lock flags */ - unsigned short ili_logged; /* flushed logged data */ + unsigned short ili_lock_flags; /* inode lock flags */ + /* + * The ili_lock protects the interactions between the dirty state and + * the flush state of the inode log item. This allows us to do atomic + * modifications of multiple state fields without having to hold a + * specific inode lock to serialise them. + * + * We need atomic changes between inode dirtying, inode flushing and + * inode completion, but these all hold different combinations of + * ILOCK and iflock and hence we need some other method of serialising + * updates to the flush state. + */ + spinlock_t ili_lock; /* flush state lock */ unsigned int ili_last_fields; /* fields when flushed */ unsigned int ili_fields; /* fields to be logged */ unsigned int ili_fsync_fields; /* logged since last fsync */ + xfs_lsn_t ili_flush_lsn; /* lsn at last flush */ + xfs_lsn_t ili_last_lsn; /* lsn at last transaction */ }; -static inline int xfs_inode_clean(xfs_inode_t *ip) +static inline int xfs_inode_clean(struct xfs_inode *ip) { return !ip->i_itemp || !(ip->i_itemp->ili_fields & XFS_ILOG_ALL); } extern void xfs_inode_item_init(struct xfs_inode *, struct xfs_mount *); extern void xfs_inode_item_destroy(struct xfs_inode *); -extern void xfs_iflush_done(struct xfs_buf *, struct xfs_log_item *); -extern void xfs_istale_done(struct xfs_buf *, struct xfs_log_item *); +extern void xfs_iflush_done(struct xfs_buf *); extern void xfs_iflush_abort(struct xfs_inode *); extern int xfs_inode_item_format_convert(xfs_log_iovec_t *, struct xfs_inode_log_format *); diff --git a/fs/xfs/xfs_inode_item_recover.c b/fs/xfs/xfs_inode_item_recover.c index dc3e26ff16c9..5e0d291835b3 100644 --- a/fs/xfs/xfs_inode_item_recover.c +++ b/fs/xfs/xfs_inode_item_recover.c @@ -376,7 +376,7 @@ out_owner_change: xfs_dinode_calc_crc(log->l_mp, dip); ASSERT(bp->b_mount == mp); - bp->b_iodone = xlog_recover_iodone; + bp->b_flags |= _XBF_LOGRECOVERY; xfs_buf_delwri_queue(bp, buffer_list); out_release: diff --git a/fs/xfs/xfs_ioctl.c b/fs/xfs/xfs_ioctl.c index a190212ca85d..6f22a66777cd 100644 --- a/fs/xfs/xfs_ioctl.c +++ b/fs/xfs/xfs_ioctl.c @@ -1075,13 +1075,18 @@ xfs_merge_ioc_xflags( xflags |= FS_XFLAG_NODUMP; else xflags &= ~FS_XFLAG_NODUMP; + if (flags & FS_DAX_FL) + xflags |= FS_XFLAG_DAX; + else + xflags &= ~FS_XFLAG_DAX; return xflags; } STATIC unsigned int xfs_di2lxflags( - uint16_t di_flags) + uint16_t di_flags, + uint64_t di_flags2) { unsigned int flags = 0; @@ -1095,6 +1100,9 @@ xfs_di2lxflags( flags |= FS_NOATIME_FL; if (di_flags & XFS_DIFLAG_NODUMP) flags |= FS_NODUMP_FL; + if (di_flags2 & XFS_DIFLAG2_DAX) { + flags |= FS_DAX_FL; + } return flags; } @@ -1565,7 +1573,7 @@ xfs_ioc_getxflags( { unsigned int flags; - flags = xfs_di2lxflags(ip->i_d.di_flags); + flags = xfs_di2lxflags(ip->i_d.di_flags, ip->i_d.di_flags2); if (copy_to_user(arg, &flags, sizeof(flags))) return -EFAULT; return 0; @@ -1588,7 +1596,7 @@ xfs_ioc_setxflags( if (flags & ~(FS_IMMUTABLE_FL | FS_APPEND_FL | \ FS_NOATIME_FL | FS_NODUMP_FL | \ - FS_SYNC_FL)) + FS_SYNC_FL | FS_DAX_FL)) return -EOPNOTSUPP; fa.fsx_xflags = xfs_merge_ioc_xflags(flags, xfs_ip2xflags(ip)); diff --git a/fs/xfs/xfs_iomap.c b/fs/xfs/xfs_iomap.c index b9a8c3798e08..3abb8b9d6f4c 100644 --- a/fs/xfs/xfs_iomap.c +++ b/fs/xfs/xfs_iomap.c @@ -293,11 +293,11 @@ out_trans_cancel: STATIC bool xfs_quota_need_throttle( - struct xfs_inode *ip, - int type, - xfs_fsblock_t alloc_blocks) + struct xfs_inode *ip, + xfs_dqtype_t type, + xfs_fsblock_t alloc_blocks) { - struct xfs_dquot *dq = xfs_inode_dquot(ip, type); + struct xfs_dquot *dq = xfs_inode_dquot(ip, type); if (!dq || !xfs_this_quota_on(ip->i_mount, type)) return false; @@ -307,7 +307,7 @@ xfs_quota_need_throttle( return false; /* under the lo watermark, no throttle */ - if (dq->q_res_bcount + alloc_blocks < dq->q_prealloc_lo_wmark) + if (dq->q_blk.reserved + alloc_blocks < dq->q_prealloc_lo_wmark) return false; return true; @@ -315,24 +315,24 @@ xfs_quota_need_throttle( STATIC void xfs_quota_calc_throttle( - struct xfs_inode *ip, - int type, - xfs_fsblock_t *qblocks, - int *qshift, - int64_t *qfreesp) + struct xfs_inode *ip, + xfs_dqtype_t type, + xfs_fsblock_t *qblocks, + int *qshift, + int64_t *qfreesp) { - int64_t freesp; - int shift = 0; - struct xfs_dquot *dq = xfs_inode_dquot(ip, type); + struct xfs_dquot *dq = xfs_inode_dquot(ip, type); + int64_t freesp; + int shift = 0; /* no dq, or over hi wmark, squash the prealloc completely */ - if (!dq || dq->q_res_bcount >= dq->q_prealloc_hi_wmark) { + if (!dq || dq->q_blk.reserved >= dq->q_prealloc_hi_wmark) { *qblocks = 0; *qfreesp = 0; return; } - freesp = dq->q_prealloc_hi_wmark - dq->q_res_bcount; + freesp = dq->q_prealloc_hi_wmark - dq->q_blk.reserved; if (freesp < dq->q_low_space[XFS_QLOWSP_5_PCNT]) { shift = 2; if (freesp < dq->q_low_space[XFS_QLOWSP_3_PCNT]) @@ -450,14 +450,14 @@ xfs_iomap_prealloc_size( * Check each quota to cap the prealloc size, provide a shift value to * throttle with and adjust amount of available space. */ - if (xfs_quota_need_throttle(ip, XFS_DQ_USER, alloc_blocks)) - xfs_quota_calc_throttle(ip, XFS_DQ_USER, &qblocks, &qshift, + if (xfs_quota_need_throttle(ip, XFS_DQTYPE_USER, alloc_blocks)) + xfs_quota_calc_throttle(ip, XFS_DQTYPE_USER, &qblocks, &qshift, &freesp); - if (xfs_quota_need_throttle(ip, XFS_DQ_GROUP, alloc_blocks)) - xfs_quota_calc_throttle(ip, XFS_DQ_GROUP, &qblocks, &qshift, + if (xfs_quota_need_throttle(ip, XFS_DQTYPE_GROUP, alloc_blocks)) + xfs_quota_calc_throttle(ip, XFS_DQTYPE_GROUP, &qblocks, &qshift, &freesp); - if (xfs_quota_need_throttle(ip, XFS_DQ_PROJ, alloc_blocks)) - xfs_quota_calc_throttle(ip, XFS_DQ_PROJ, &qblocks, &qshift, + if (xfs_quota_need_throttle(ip, XFS_DQTYPE_PROJ, alloc_blocks)) + xfs_quota_calc_throttle(ip, XFS_DQTYPE_PROJ, &qblocks, &qshift, &freesp); /* @@ -865,7 +865,7 @@ xfs_buffered_write_iomap_begin( } /* - * Search the data fork fork first to look up our source mapping. We + * Search the data fork first to look up our source mapping. We * always need the data fork map, as we have to return it to the * iomap code so that the higher level write code can read data in to * perform read-modify-write cycles for unaligned writes. diff --git a/fs/xfs/xfs_linux.h b/fs/xfs/xfs_linux.h index 9f70d2f68e05..ab737fed7b12 100644 --- a/fs/xfs/xfs_linux.h +++ b/fs/xfs/xfs_linux.h @@ -102,12 +102,8 @@ typedef __u32 xfs_nlink_t; #define xfs_cowb_secs xfs_params.cowb_timer.val #define current_cpu() (raw_smp_processor_id()) -#define current_pid() (current->pid) -#define current_test_flags(f) (current->flags & (f)) #define current_set_flags_nested(sp, f) \ (*(sp) = current->flags, current->flags |= (f)) -#define current_clear_flags_nested(sp, f) \ - (*(sp) = current->flags, current->flags &= ~(f)) #define current_restore_flags_nested(sp, f) \ (current->flags = ((current->flags & ~(f)) | (*(sp) & (f)))) diff --git a/fs/xfs/xfs_log.c b/fs/xfs/xfs_log.c index 00fda2e8e738..ad0c69ee8947 100644 --- a/fs/xfs/xfs_log.c +++ b/fs/xfs/xfs_log.c @@ -433,7 +433,7 @@ xfs_log_reserve( XFS_STATS_INC(mp, xs_try_logspace); ASSERT(*ticp == NULL); - tic = xlog_ticket_alloc(log, unit_bytes, cnt, client, permanent, 0); + tic = xlog_ticket_alloc(log, unit_bytes, cnt, client, permanent); *ticp = tic; xlog_grant_push_ail(log, tic->t_cnt ? tic->t_unit_res * tic->t_cnt @@ -3408,15 +3408,12 @@ xlog_ticket_alloc( int unit_bytes, int cnt, char client, - bool permanent, - xfs_km_flags_t alloc_flags) + bool permanent) { struct xlog_ticket *tic; int unit_res; - tic = kmem_zone_zalloc(xfs_log_ticket_zone, alloc_flags); - if (!tic) - return NULL; + tic = kmem_cache_zalloc(xfs_log_ticket_zone, GFP_NOFS | __GFP_NOFAIL); unit_res = xfs_log_calc_unit_res(log->l_mp, unit_bytes); diff --git a/fs/xfs/xfs_log_cil.c b/fs/xfs/xfs_log_cil.c index 9ed90368ab31..b0ef071b3cb5 100644 --- a/fs/xfs/xfs_log_cil.c +++ b/fs/xfs/xfs_log_cil.c @@ -37,8 +37,7 @@ xlog_cil_ticket_alloc( { struct xlog_ticket *tic; - tic = xlog_ticket_alloc(log, 0, 1, XFS_TRANSACTION, 0, - KM_NOFS); + tic = xlog_ticket_alloc(log, 0, 1, XFS_TRANSACTION, 0); /* * set the current reservation to zero so we know to steal the basic @@ -240,7 +239,7 @@ xfs_cil_prepare_item( * this CIL context and so we need to pin it. If we are replacing the * old_lv, then remove the space it accounts for and make it the shadow * buffer for later freeing. In both cases we are now switching to the - * shadow buffer, so update the the pointer to it appropriately. + * shadow buffer, so update the pointer to it appropriately. */ if (!old_lv) { if (lv->lv_item->li_ops->iop_pin) diff --git a/fs/xfs/xfs_log_priv.h b/fs/xfs/xfs_log_priv.h index 75a62870b63a..1c6fdbf3d506 100644 --- a/fs/xfs/xfs_log_priv.h +++ b/fs/xfs/xfs_log_priv.h @@ -464,9 +464,7 @@ xlog_ticket_alloc( int unit_bytes, int count, char client, - bool permanent, - xfs_km_flags_t alloc_flags); - + bool permanent); static inline void xlog_write_adv_cnt(void **ptr, int *len, int *off, size_t bytes) diff --git a/fs/xfs/xfs_log_recover.c b/fs/xfs/xfs_log_recover.c index ec015df55b77..e2ec91b2d0f4 100644 --- a/fs/xfs/xfs_log_recover.c +++ b/fs/xfs/xfs_log_recover.c @@ -287,9 +287,8 @@ xlog_recover_iodone( if (bp->b_log_item) xfs_buf_item_relse(bp); ASSERT(bp->b_log_item == NULL); - - bp->b_iodone = NULL; - xfs_buf_ioend(bp); + bp->b_flags &= ~_XBF_LOGRECOVERY; + xfs_buf_ioend_finish(bp); } /* @@ -1101,7 +1100,7 @@ xlog_verify_head( * * Note that xlog_find_tail() clears the blocks at the new head * (i.e., the records with invalid CRC) if the cycle number - * matches the the current cycle. + * matches the current cycle. */ found = xlog_rseek_logrec_hdr(log, first_bad, *tail_blk, 1, buffer, rhead_blk, rhead, wrapped); diff --git a/fs/xfs/xfs_mount.c b/fs/xfs/xfs_mount.c index d5dcf9869860..c8ae49a1e99c 100644 --- a/fs/xfs/xfs_mount.c +++ b/fs/xfs/xfs_mount.c @@ -148,7 +148,6 @@ xfs_free_perag( ASSERT(atomic_read(&pag->pag_ref) == 0); xfs_iunlink_destroy(pag); xfs_buf_hash_destroy(pag); - mutex_destroy(&pag->pag_ici_reclaim_lock); call_rcu(&pag->rcu_head, __xfs_free_perag); } } @@ -200,7 +199,6 @@ xfs_initialize_perag( pag->pag_agno = index; pag->pag_mount = mp; spin_lock_init(&pag->pag_ici_lock); - mutex_init(&pag->pag_ici_reclaim_lock); INIT_RADIX_TREE(&pag->pag_ici_root, GFP_ATOMIC); if (xfs_buf_hash_init(pag)) goto out_free_pag; @@ -242,7 +240,6 @@ xfs_initialize_perag( out_hash_destroy: xfs_buf_hash_destroy(pag); out_free_pag: - mutex_destroy(&pag->pag_ici_reclaim_lock); kmem_free(pag); out_unwind_new_pags: /* unwind any prior newly initialized pags */ @@ -252,7 +249,6 @@ out_unwind_new_pags: break; xfs_buf_hash_destroy(pag); xfs_iunlink_destroy(pag); - mutex_destroy(&pag->pag_ici_reclaim_lock); kmem_free(pag); } return error; @@ -1015,7 +1011,7 @@ xfs_mountfs( * quota inodes. */ cancel_delayed_work_sync(&mp->m_reclaim_work); - xfs_reclaim_inodes(mp, SYNC_WAIT); + xfs_reclaim_inodes(mp); xfs_health_unmount(mp); out_log_dealloc: mp->m_flags |= XFS_MOUNT_UNMOUNTING; @@ -1092,13 +1088,12 @@ xfs_unmountfs( xfs_ail_push_all_sync(mp->m_ail); /* - * And reclaim all inodes. At this point there should be no dirty - * inodes and none should be pinned or locked, but use synchronous - * reclaim just to be sure. We can stop background inode reclaim - * here as well if it is still running. + * Reclaim all inodes. At this point there should be no dirty inodes and + * none should be pinned or locked. Stop background inode reclaim here + * if it is still running. */ cancel_delayed_work_sync(&mp->m_reclaim_work); - xfs_reclaim_inodes(mp, SYNC_WAIT); + xfs_reclaim_inodes(mp); xfs_health_unmount(mp); xfs_qm_unmount(mp); diff --git a/fs/xfs/xfs_mount.h b/fs/xfs/xfs_mount.h index 3725d25ad97e..a72cfcaa4ad1 100644 --- a/fs/xfs/xfs_mount.h +++ b/fs/xfs/xfs_mount.h @@ -354,7 +354,6 @@ typedef struct xfs_perag { spinlock_t pag_ici_lock; /* incore inode cache lock */ struct radix_tree_root pag_ici_root; /* incore inode cache root */ int pag_ici_reclaimable; /* reclaimable inodes */ - struct mutex pag_ici_reclaim_lock; /* serialisation point */ unsigned long pag_ici_reclaim_cursor; /* reclaim restart point */ /* buffer cache index */ diff --git a/fs/xfs/xfs_pwork.c b/fs/xfs/xfs_pwork.c index 4bcc3e61056c..b03333f1c84a 100644 --- a/fs/xfs/xfs_pwork.c +++ b/fs/xfs/xfs_pwork.c @@ -132,5 +132,5 @@ xfs_pwork_guess_datadev_parallelism( * For now we'll go with the most conservative setting possible, * which is two threads for an SSD and 1 thread everywhere else. */ - return blk_queue_nonrot(btp->bt_bdev->bd_queue) ? 2 : 1; + return blk_queue_nonrot(btp->bt_bdev->bd_disk->queue) ? 2 : 1; } diff --git a/fs/xfs/xfs_qm.c b/fs/xfs/xfs_qm.c index d6cd83317344..be67570badf8 100644 --- a/fs/xfs/xfs_qm.c +++ b/fs/xfs/xfs_qm.c @@ -47,7 +47,7 @@ STATIC void xfs_qm_dqfree_one(struct xfs_dquot *dqp); STATIC int xfs_qm_dquot_walk( struct xfs_mount *mp, - int type, + xfs_dqtype_t type, int (*execute)(struct xfs_dquot *dqp, void *data), void *data) { @@ -79,7 +79,7 @@ restart: for (i = 0; i < nr_found; i++) { struct xfs_dquot *dqp = batch[i]; - next_index = be32_to_cpu(dqp->q_core.d_id) + 1; + next_index = dqp->q_id + 1; error = execute(batch[i], data); if (error == -EAGAIN) { @@ -124,10 +124,10 @@ xfs_qm_dqpurge( int error = -EAGAIN; xfs_dqlock(dqp); - if ((dqp->dq_flags & XFS_DQ_FREEING) || dqp->q_nrefs != 0) + if ((dqp->q_flags & XFS_DQFLAG_FREEING) || dqp->q_nrefs != 0) goto out_unlock; - dqp->dq_flags |= XFS_DQ_FREEING; + dqp->q_flags |= XFS_DQFLAG_FREEING; xfs_dqflock(dqp); @@ -148,6 +148,7 @@ xfs_qm_dqpurge( error = xfs_bwrite(bp); xfs_buf_relse(bp); } else if (error == -EAGAIN) { + dqp->q_flags &= ~XFS_DQFLAG_FREEING; goto out_unlock; } xfs_dqflock(dqp); @@ -160,8 +161,7 @@ xfs_qm_dqpurge( xfs_dqfunlock(dqp); xfs_dqunlock(dqp); - radix_tree_delete(xfs_dquot_tree(qi, dqp->q_core.d_flags), - be32_to_cpu(dqp->q_core.d_id)); + radix_tree_delete(xfs_dquot_tree(qi, xfs_dquot_type(dqp)), dqp->q_id); qi->qi_dquots--; /* @@ -189,11 +189,11 @@ xfs_qm_dqpurge_all( uint flags) { if (flags & XFS_QMOPT_UQUOTA) - xfs_qm_dquot_walk(mp, XFS_DQ_USER, xfs_qm_dqpurge, NULL); + xfs_qm_dquot_walk(mp, XFS_DQTYPE_USER, xfs_qm_dqpurge, NULL); if (flags & XFS_QMOPT_GQUOTA) - xfs_qm_dquot_walk(mp, XFS_DQ_GROUP, xfs_qm_dqpurge, NULL); + xfs_qm_dquot_walk(mp, XFS_DQTYPE_GROUP, xfs_qm_dqpurge, NULL); if (flags & XFS_QMOPT_PQUOTA) - xfs_qm_dquot_walk(mp, XFS_DQ_PROJ, xfs_qm_dqpurge, NULL); + xfs_qm_dquot_walk(mp, XFS_DQTYPE_PROJ, xfs_qm_dqpurge, NULL); } /* @@ -250,7 +250,7 @@ STATIC int xfs_qm_dqattach_one( struct xfs_inode *ip, xfs_dqid_t id, - uint type, + xfs_dqtype_t type, bool doalloc, struct xfs_dquot **IO_idqpp) { @@ -331,7 +331,7 @@ xfs_qm_dqattach_locked( if (XFS_IS_UQUOTA_ON(mp) && !ip->i_udquot) { error = xfs_qm_dqattach_one(ip, i_uid_read(VFS_I(ip)), - XFS_DQ_USER, doalloc, &ip->i_udquot); + XFS_DQTYPE_USER, doalloc, &ip->i_udquot); if (error) goto done; ASSERT(ip->i_udquot); @@ -339,14 +339,14 @@ xfs_qm_dqattach_locked( if (XFS_IS_GQUOTA_ON(mp) && !ip->i_gdquot) { error = xfs_qm_dqattach_one(ip, i_gid_read(VFS_I(ip)), - XFS_DQ_GROUP, doalloc, &ip->i_gdquot); + XFS_DQTYPE_GROUP, doalloc, &ip->i_gdquot); if (error) goto done; ASSERT(ip->i_gdquot); } if (XFS_IS_PQUOTA_ON(mp) && !ip->i_pdquot) { - error = xfs_qm_dqattach_one(ip, ip->i_d.di_projid, XFS_DQ_PROJ, + error = xfs_qm_dqattach_one(ip, ip->i_d.di_projid, XFS_DQTYPE_PROJ, doalloc, &ip->i_pdquot); if (error) goto done; @@ -473,7 +473,7 @@ xfs_qm_dquot_isolate( /* * Prevent lookups now that we are past the point of no return. */ - dqp->dq_flags |= XFS_DQ_FREEING; + dqp->q_flags |= XFS_DQFLAG_FREEING; xfs_dqunlock(dqp); ASSERT(dqp->q_nrefs == 0); @@ -545,31 +545,29 @@ xfs_qm_shrink_count( STATIC void xfs_qm_set_defquota( struct xfs_mount *mp, - uint type, + xfs_dqtype_t type, struct xfs_quotainfo *qinf) { struct xfs_dquot *dqp; struct xfs_def_quota *defq; - struct xfs_disk_dquot *ddqp; int error; error = xfs_qm_dqget_uncached(mp, 0, type, &dqp); if (error) return; - ddqp = &dqp->q_core; defq = xfs_get_defquota(qinf, xfs_dquot_type(dqp)); /* * Timers and warnings have been already set, let's just set the * default limits for this quota type */ - defq->bhardlimit = be64_to_cpu(ddqp->d_blk_hardlimit); - defq->bsoftlimit = be64_to_cpu(ddqp->d_blk_softlimit); - defq->ihardlimit = be64_to_cpu(ddqp->d_ino_hardlimit); - defq->isoftlimit = be64_to_cpu(ddqp->d_ino_softlimit); - defq->rtbhardlimit = be64_to_cpu(ddqp->d_rtb_hardlimit); - defq->rtbsoftlimit = be64_to_cpu(ddqp->d_rtb_softlimit); + defq->blk.hard = dqp->q_blk.hardlimit; + defq->blk.soft = dqp->q_blk.softlimit; + defq->ino.hard = dqp->q_ino.hardlimit; + defq->ino.soft = dqp->q_ino.softlimit; + defq->rtb.hard = dqp->q_rtb.hardlimit; + defq->rtb.soft = dqp->q_rtb.softlimit; xfs_qm_dqdestroy(dqp); } @@ -577,22 +575,21 @@ xfs_qm_set_defquota( static void xfs_qm_init_timelimits( struct xfs_mount *mp, - uint type) + xfs_dqtype_t type) { struct xfs_quotainfo *qinf = mp->m_quotainfo; struct xfs_def_quota *defq; - struct xfs_disk_dquot *ddqp; struct xfs_dquot *dqp; int error; defq = xfs_get_defquota(qinf, type); - defq->btimelimit = XFS_QM_BTIMELIMIT; - defq->itimelimit = XFS_QM_ITIMELIMIT; - defq->rtbtimelimit = XFS_QM_RTBTIMELIMIT; - defq->bwarnlimit = XFS_QM_BWARNLIMIT; - defq->iwarnlimit = XFS_QM_IWARNLIMIT; - defq->rtbwarnlimit = XFS_QM_RTBWARNLIMIT; + defq->blk.time = XFS_QM_BTIMELIMIT; + defq->ino.time = XFS_QM_ITIMELIMIT; + defq->rtb.time = XFS_QM_RTBTIMELIMIT; + defq->blk.warn = XFS_QM_BWARNLIMIT; + defq->ino.warn = XFS_QM_IWARNLIMIT; + defq->rtb.warn = XFS_QM_RTBWARNLIMIT; /* * We try to get the limits from the superuser's limits fields. @@ -605,25 +602,23 @@ xfs_qm_init_timelimits( if (error) return; - ddqp = &dqp->q_core; - /* * The warnings and timers set the grace period given to * a user or group before he or she can not perform any * more writing. If it is zero, a default is used. */ - if (ddqp->d_btimer) - defq->btimelimit = be32_to_cpu(ddqp->d_btimer); - if (ddqp->d_itimer) - defq->itimelimit = be32_to_cpu(ddqp->d_itimer); - if (ddqp->d_rtbtimer) - defq->rtbtimelimit = be32_to_cpu(ddqp->d_rtbtimer); - if (ddqp->d_bwarns) - defq->bwarnlimit = be16_to_cpu(ddqp->d_bwarns); - if (ddqp->d_iwarns) - defq->iwarnlimit = be16_to_cpu(ddqp->d_iwarns); - if (ddqp->d_rtbwarns) - defq->rtbwarnlimit = be16_to_cpu(ddqp->d_rtbwarns); + if (dqp->q_blk.timer) + defq->blk.time = dqp->q_blk.timer; + if (dqp->q_ino.timer) + defq->ino.time = dqp->q_ino.timer; + if (dqp->q_rtb.timer) + defq->rtb.time = dqp->q_rtb.timer; + if (dqp->q_blk.warnings) + defq->blk.warn = dqp->q_blk.warnings; + if (dqp->q_ino.warnings) + defq->ino.warn = dqp->q_ino.warnings; + if (dqp->q_rtb.warnings) + defq->rtb.warn = dqp->q_rtb.warnings; xfs_qm_dqdestroy(dqp); } @@ -669,16 +664,16 @@ xfs_qm_init_quotainfo( mp->m_qflags |= (mp->m_sb.sb_qflags & XFS_ALL_QUOTA_CHKD); - xfs_qm_init_timelimits(mp, XFS_DQ_USER); - xfs_qm_init_timelimits(mp, XFS_DQ_GROUP); - xfs_qm_init_timelimits(mp, XFS_DQ_PROJ); + xfs_qm_init_timelimits(mp, XFS_DQTYPE_USER); + xfs_qm_init_timelimits(mp, XFS_DQTYPE_GROUP); + xfs_qm_init_timelimits(mp, XFS_DQTYPE_PROJ); if (XFS_IS_UQUOTA_RUNNING(mp)) - xfs_qm_set_defquota(mp, XFS_DQ_USER, qinf); + xfs_qm_set_defquota(mp, XFS_DQTYPE_USER, qinf); if (XFS_IS_GQUOTA_RUNNING(mp)) - xfs_qm_set_defquota(mp, XFS_DQ_GROUP, qinf); + xfs_qm_set_defquota(mp, XFS_DQTYPE_GROUP, qinf); if (XFS_IS_PQUOTA_RUNNING(mp)) - xfs_qm_set_defquota(mp, XFS_DQ_PROJ, qinf); + xfs_qm_set_defquota(mp, XFS_DQTYPE_PROJ, qinf); qinf->qi_shrinker.count_objects = xfs_qm_shrink_count; qinf->qi_shrinker.scan_objects = xfs_qm_shrink_scan; @@ -828,14 +823,13 @@ xfs_qm_qino_alloc( STATIC void xfs_qm_reset_dqcounts( - xfs_mount_t *mp, - xfs_buf_t *bp, - xfs_dqid_t id, - uint type) + struct xfs_mount *mp, + struct xfs_buf *bp, + xfs_dqid_t id, + xfs_dqtype_t type) { struct xfs_dqblk *dqb; int j; - xfs_failaddr_t fa; trace_xfs_reset_dqcounts(bp, _RET_IP_); @@ -860,15 +854,15 @@ xfs_qm_reset_dqcounts( * find uninitialised dquot blks. See comment in * xfs_dquot_verify. */ - fa = xfs_dqblk_verify(mp, &dqb[j], id + j, type); - if (fa) + if (xfs_dqblk_verify(mp, &dqb[j], id + j) || + (dqb[j].dd_diskdq.d_type & XFS_DQTYPE_REC_MASK) != type) xfs_dqblk_repair(mp, &dqb[j], id + j, type); /* * Reset type in case we are reusing group quota file for * project quotas or vice versa */ - ddq->d_flags = type; + ddq->d_type = type; ddq->d_bcount = 0; ddq->d_icount = 0; ddq->d_rtbcount = 0; @@ -901,17 +895,13 @@ xfs_qm_reset_dqcounts_all( xfs_dqid_t firstid, xfs_fsblock_t bno, xfs_filblks_t blkcnt, - uint flags, + xfs_dqtype_t type, struct list_head *buffer_list) { struct xfs_buf *bp; - int error; - int type; + int error = 0; ASSERT(blkcnt > 0); - type = flags & XFS_QMOPT_UQUOTA ? XFS_DQ_USER : - (flags & XFS_QMOPT_PQUOTA ? XFS_DQ_PROJ : XFS_DQ_GROUP); - error = 0; /* * Blkcnt arg can be a very big number, and might even be @@ -971,7 +961,7 @@ STATIC int xfs_qm_reset_dqcounts_buf( struct xfs_mount *mp, struct xfs_inode *qip, - uint flags, + xfs_dqtype_t type, struct list_head *buffer_list) { struct xfs_bmbt_irec *map; @@ -1047,7 +1037,7 @@ xfs_qm_reset_dqcounts_buf( error = xfs_qm_reset_dqcounts_all(mp, firstid, map[i].br_startblock, map[i].br_blockcount, - flags, buffer_list); + type, buffer_list); if (error) goto out; } @@ -1069,7 +1059,7 @@ out: STATIC int xfs_qm_quotacheck_dqadjust( struct xfs_inode *ip, - uint type, + xfs_dqtype_t type, xfs_qcnt_t nblks, xfs_qcnt_t rtblks) { @@ -1095,15 +1085,15 @@ xfs_qm_quotacheck_dqadjust( * Adjust the inode count and the block count to reflect this inode's * resource usage. */ - be64_add_cpu(&dqp->q_core.d_icount, 1); - dqp->q_res_icount++; + dqp->q_ino.count++; + dqp->q_ino.reserved++; if (nblks) { - be64_add_cpu(&dqp->q_core.d_bcount, nblks); - dqp->q_res_bcount += nblks; + dqp->q_blk.count += nblks; + dqp->q_blk.reserved += nblks; } if (rtblks) { - be64_add_cpu(&dqp->q_core.d_rtbcount, rtblks); - dqp->q_res_rtbcount += rtblks; + dqp->q_rtb.count += rtblks; + dqp->q_rtb.reserved += rtblks; } /* @@ -1111,12 +1101,12 @@ xfs_qm_quotacheck_dqadjust( * * There are no timers for the default values set in the root dquot. */ - if (dqp->q_core.d_id) { - xfs_qm_adjust_dqlimits(mp, dqp); - xfs_qm_adjust_dqtimers(mp, dqp); + if (dqp->q_id) { + xfs_qm_adjust_dqlimits(dqp); + xfs_qm_adjust_dqtimers(dqp); } - dqp->dq_flags |= XFS_DQ_DIRTY; + dqp->q_flags |= XFS_DQFLAG_DIRTY; xfs_qm_dqput(dqp); return 0; } @@ -1186,21 +1176,21 @@ xfs_qm_dqusage_adjust( * and quotaoffs don't race. (Quotachecks happen at mount time only). */ if (XFS_IS_UQUOTA_ON(mp)) { - error = xfs_qm_quotacheck_dqadjust(ip, XFS_DQ_USER, nblks, + error = xfs_qm_quotacheck_dqadjust(ip, XFS_DQTYPE_USER, nblks, rtblks); if (error) goto error0; } if (XFS_IS_GQUOTA_ON(mp)) { - error = xfs_qm_quotacheck_dqadjust(ip, XFS_DQ_GROUP, nblks, + error = xfs_qm_quotacheck_dqadjust(ip, XFS_DQTYPE_GROUP, nblks, rtblks); if (error) goto error0; } if (XFS_IS_PQUOTA_ON(mp)) { - error = xfs_qm_quotacheck_dqadjust(ip, XFS_DQ_PROJ, nblks, + error = xfs_qm_quotacheck_dqadjust(ip, XFS_DQTYPE_PROJ, nblks, rtblks); if (error) goto error0; @@ -1222,7 +1212,7 @@ xfs_qm_flush_one( int error = 0; xfs_dqlock(dqp); - if (dqp->dq_flags & XFS_DQ_FREEING) + if (dqp->q_flags & XFS_DQFLAG_FREEING) goto out_unlock; if (!XFS_DQ_IS_DIRTY(dqp)) goto out_unlock; @@ -1291,7 +1281,7 @@ xfs_qm_quotacheck( * We don't log our changes till later. */ if (uip) { - error = xfs_qm_reset_dqcounts_buf(mp, uip, XFS_QMOPT_UQUOTA, + error = xfs_qm_reset_dqcounts_buf(mp, uip, XFS_DQTYPE_USER, &buffer_list); if (error) goto error_return; @@ -1299,7 +1289,7 @@ xfs_qm_quotacheck( } if (gip) { - error = xfs_qm_reset_dqcounts_buf(mp, gip, XFS_QMOPT_GQUOTA, + error = xfs_qm_reset_dqcounts_buf(mp, gip, XFS_DQTYPE_GROUP, &buffer_list); if (error) goto error_return; @@ -1307,7 +1297,7 @@ xfs_qm_quotacheck( } if (pip) { - error = xfs_qm_reset_dqcounts_buf(mp, pip, XFS_QMOPT_PQUOTA, + error = xfs_qm_reset_dqcounts_buf(mp, pip, XFS_DQTYPE_PROJ, &buffer_list); if (error) goto error_return; @@ -1324,17 +1314,17 @@ xfs_qm_quotacheck( * down to disk buffers if everything was updated successfully. */ if (XFS_IS_UQUOTA_ON(mp)) { - error = xfs_qm_dquot_walk(mp, XFS_DQ_USER, xfs_qm_flush_one, + error = xfs_qm_dquot_walk(mp, XFS_DQTYPE_USER, xfs_qm_flush_one, &buffer_list); } if (XFS_IS_GQUOTA_ON(mp)) { - error2 = xfs_qm_dquot_walk(mp, XFS_DQ_GROUP, xfs_qm_flush_one, + error2 = xfs_qm_dquot_walk(mp, XFS_DQTYPE_GROUP, xfs_qm_flush_one, &buffer_list); if (!error) error = error2; } if (XFS_IS_PQUOTA_ON(mp)) { - error2 = xfs_qm_dquot_walk(mp, XFS_DQ_PROJ, xfs_qm_flush_one, + error2 = xfs_qm_dquot_walk(mp, XFS_DQTYPE_PROJ, xfs_qm_flush_one, &buffer_list); if (!error) error = error2; @@ -1597,8 +1587,7 @@ xfs_qm_dqfree_one( struct xfs_quotainfo *qi = mp->m_quotainfo; mutex_lock(&qi->qi_tree_lock); - radix_tree_delete(xfs_dquot_tree(qi, dqp->q_core.d_flags), - be32_to_cpu(dqp->q_core.d_id)); + radix_tree_delete(xfs_dquot_tree(qi, xfs_dquot_type(dqp)), dqp->q_id); qi->qi_dquots--; mutex_unlock(&qi->qi_tree_lock); @@ -1673,7 +1662,7 @@ xfs_qm_vop_dqalloc( */ xfs_iunlock(ip, lockflags); error = xfs_qm_dqget(mp, from_kuid(user_ns, uid), - XFS_DQ_USER, true, &uq); + XFS_DQTYPE_USER, true, &uq); if (error) { ASSERT(error != -ENOENT); return error; @@ -1697,7 +1686,7 @@ xfs_qm_vop_dqalloc( if (!gid_eq(inode->i_gid, gid)) { xfs_iunlock(ip, lockflags); error = xfs_qm_dqget(mp, from_kgid(user_ns, gid), - XFS_DQ_GROUP, true, &gq); + XFS_DQTYPE_GROUP, true, &gq); if (error) { ASSERT(error != -ENOENT); goto error_rele; @@ -1713,8 +1702,8 @@ xfs_qm_vop_dqalloc( if ((flags & XFS_QMOPT_PQUOTA) && XFS_IS_PQUOTA_ON(mp)) { if (ip->i_d.di_projid != prid) { xfs_iunlock(ip, lockflags); - error = xfs_qm_dqget(mp, (xfs_dqid_t)prid, XFS_DQ_PROJ, - true, &pq); + error = xfs_qm_dqget(mp, (xfs_dqid_t)prid, + XFS_DQTYPE_PROJ, true, &pq); if (error) { ASSERT(error != -ENOENT); goto error_rele; @@ -1822,7 +1811,7 @@ xfs_qm_vop_chown_reserve( XFS_QMOPT_RES_RTBLKS : XFS_QMOPT_RES_REGBLKS; if (XFS_IS_UQUOTA_ON(mp) && udqp && - i_uid_read(VFS_I(ip)) != be32_to_cpu(udqp->q_core.d_id)) { + i_uid_read(VFS_I(ip)) != udqp->q_id) { udq_delblks = udqp; /* * If there are delayed allocation blocks, then we have to @@ -1835,7 +1824,7 @@ xfs_qm_vop_chown_reserve( } } if (XFS_IS_GQUOTA_ON(ip->i_mount) && gdqp && - i_gid_read(VFS_I(ip)) != be32_to_cpu(gdqp->q_core.d_id)) { + i_gid_read(VFS_I(ip)) != gdqp->q_id) { gdq_delblks = gdqp; if (delblks) { ASSERT(ip->i_gdquot); @@ -1844,7 +1833,7 @@ xfs_qm_vop_chown_reserve( } if (XFS_IS_PQUOTA_ON(ip->i_mount) && pdqp && - ip->i_d.di_projid != be32_to_cpu(pdqp->q_core.d_id)) { + ip->i_d.di_projid != pdqp->q_id) { pdq_delblks = pdqp; if (delblks) { ASSERT(ip->i_pdquot); @@ -1928,21 +1917,21 @@ xfs_qm_vop_create_dqattach( if (udqp && XFS_IS_UQUOTA_ON(mp)) { ASSERT(ip->i_udquot == NULL); - ASSERT(i_uid_read(VFS_I(ip)) == be32_to_cpu(udqp->q_core.d_id)); + ASSERT(i_uid_read(VFS_I(ip)) == udqp->q_id); ip->i_udquot = xfs_qm_dqhold(udqp); xfs_trans_mod_dquot(tp, udqp, XFS_TRANS_DQ_ICOUNT, 1); } if (gdqp && XFS_IS_GQUOTA_ON(mp)) { ASSERT(ip->i_gdquot == NULL); - ASSERT(i_gid_read(VFS_I(ip)) == be32_to_cpu(gdqp->q_core.d_id)); + ASSERT(i_gid_read(VFS_I(ip)) == gdqp->q_id); ip->i_gdquot = xfs_qm_dqhold(gdqp); xfs_trans_mod_dquot(tp, gdqp, XFS_TRANS_DQ_ICOUNT, 1); } if (pdqp && XFS_IS_PQUOTA_ON(mp)) { ASSERT(ip->i_pdquot == NULL); - ASSERT(ip->i_d.di_projid == be32_to_cpu(pdqp->q_core.d_id)); + ASSERT(ip->i_d.di_projid == pdqp->q_id); ip->i_pdquot = xfs_qm_dqhold(pdqp); xfs_trans_mod_dquot(tp, pdqp, XFS_TRANS_DQ_ICOUNT, 1); diff --git a/fs/xfs/xfs_qm.h b/fs/xfs/xfs_qm.h index 7b0e771fcbce..9c078c35d924 100644 --- a/fs/xfs/xfs_qm.h +++ b/fs/xfs/xfs_qm.h @@ -20,41 +20,28 @@ extern struct kmem_zone *xfs_qm_dqtrxzone; #define XFS_DQITER_MAP_SIZE 10 #define XFS_IS_DQUOT_UNINITIALIZED(dqp) ( \ - !dqp->q_core.d_blk_hardlimit && \ - !dqp->q_core.d_blk_softlimit && \ - !dqp->q_core.d_rtb_hardlimit && \ - !dqp->q_core.d_rtb_softlimit && \ - !dqp->q_core.d_ino_hardlimit && \ - !dqp->q_core.d_ino_softlimit && \ - !dqp->q_core.d_bcount && \ - !dqp->q_core.d_rtbcount && \ - !dqp->q_core.d_icount) - -/* - * This defines the unit of allocation of dquots. - * Currently, it is just one file system block, and a 4K blk contains 30 - * (136 * 30 = 4080) dquots. It's probably not worth trying to make - * this more dynamic. - * XXXsup However, if this number is changed, we have to make sure that we don't - * implicitly assume that we do allocations in chunks of a single filesystem - * block in the dquot/xqm code. - */ -#define XFS_DQUOT_CLUSTER_SIZE_FSB (xfs_filblks_t)1 + !dqp->q_blk.hardlimit && \ + !dqp->q_blk.softlimit && \ + !dqp->q_rtb.hardlimit && \ + !dqp->q_rtb.softlimit && \ + !dqp->q_ino.hardlimit && \ + !dqp->q_ino.softlimit && \ + !dqp->q_blk.count && \ + !dqp->q_rtb.count && \ + !dqp->q_ino.count) + +struct xfs_quota_limits { + xfs_qcnt_t hard; /* default hard limit */ + xfs_qcnt_t soft; /* default soft limit */ + time64_t time; /* limit for timers */ + xfs_qwarncnt_t warn; /* limit for warnings */ +}; /* Defaults for each quota type: time limits, warn limits, usage limits */ struct xfs_def_quota { - time64_t btimelimit; /* limit for blks timer */ - time64_t itimelimit; /* limit for inodes timer */ - time64_t rtbtimelimit; /* limit for rt blks timer */ - xfs_qwarncnt_t bwarnlimit; /* limit for blks warnings */ - xfs_qwarncnt_t iwarnlimit; /* limit for inodes warnings */ - xfs_qwarncnt_t rtbwarnlimit; /* limit for rt blks warnings */ - xfs_qcnt_t bhardlimit; /* default data blk hard limit */ - xfs_qcnt_t bsoftlimit; /* default data blk soft limit */ - xfs_qcnt_t ihardlimit; /* default inode count hard limit */ - xfs_qcnt_t isoftlimit; /* default inode count soft limit */ - xfs_qcnt_t rtbhardlimit; /* default realtime blk hard limit */ - xfs_qcnt_t rtbsoftlimit; /* default realtime blk soft limit */ + struct xfs_quota_limits blk; + struct xfs_quota_limits ino; + struct xfs_quota_limits rtb; }; /* @@ -83,14 +70,14 @@ struct xfs_quotainfo { static inline struct radix_tree_root * xfs_dquot_tree( struct xfs_quotainfo *qi, - int type) + xfs_dqtype_t type) { switch (type) { - case XFS_DQ_USER: + case XFS_DQTYPE_USER: return &qi->qi_uquota_tree; - case XFS_DQ_GROUP: + case XFS_DQTYPE_GROUP: return &qi->qi_gquota_tree; - case XFS_DQ_PROJ: + case XFS_DQTYPE_PROJ: return &qi->qi_pquota_tree; default: ASSERT(0); @@ -99,14 +86,14 @@ xfs_dquot_tree( } static inline struct xfs_inode * -xfs_quota_inode(xfs_mount_t *mp, uint dq_flags) +xfs_quota_inode(struct xfs_mount *mp, xfs_dqtype_t type) { - switch (dq_flags & XFS_DQ_ALLTYPES) { - case XFS_DQ_USER: + switch (type) { + case XFS_DQTYPE_USER: return mp->m_quotainfo->qi_uquotaip; - case XFS_DQ_GROUP: + case XFS_DQTYPE_GROUP: return mp->m_quotainfo->qi_gquotaip; - case XFS_DQ_PROJ: + case XFS_DQTYPE_PROJ: return mp->m_quotainfo->qi_pquotaip; default: ASSERT(0); @@ -114,17 +101,6 @@ xfs_quota_inode(xfs_mount_t *mp, uint dq_flags) return NULL; } -static inline int -xfs_dquot_type(struct xfs_dquot *dqp) -{ - if (XFS_QM_ISUDQ(dqp)) - return XFS_DQ_USER; - if (XFS_QM_ISGDQ(dqp)) - return XFS_DQ_GROUP; - ASSERT(XFS_QM_ISPDQ(dqp)); - return XFS_DQ_PROJ; -} - extern void xfs_trans_mod_dquot(struct xfs_trans *tp, struct xfs_dquot *dqp, uint field, int64_t delta); extern void xfs_trans_dqjoin(struct xfs_trans *, struct xfs_dquot *); @@ -166,24 +142,30 @@ extern void xfs_qm_dqrele_all_inodes(struct xfs_mount *, uint); /* quota ops */ extern int xfs_qm_scall_trunc_qfiles(struct xfs_mount *, uint); -extern int xfs_qm_scall_getquota(struct xfs_mount *, xfs_dqid_t, - uint, struct qc_dqblk *); -extern int xfs_qm_scall_getquota_next(struct xfs_mount *, - xfs_dqid_t *, uint, struct qc_dqblk *); -extern int xfs_qm_scall_setqlim(struct xfs_mount *, xfs_dqid_t, uint, - struct qc_dqblk *); +extern int xfs_qm_scall_getquota(struct xfs_mount *mp, + xfs_dqid_t id, + xfs_dqtype_t type, + struct qc_dqblk *dst); +extern int xfs_qm_scall_getquota_next(struct xfs_mount *mp, + xfs_dqid_t *id, + xfs_dqtype_t type, + struct qc_dqblk *dst); +extern int xfs_qm_scall_setqlim(struct xfs_mount *mp, + xfs_dqid_t id, + xfs_dqtype_t type, + struct qc_dqblk *newlim); extern int xfs_qm_scall_quotaon(struct xfs_mount *, uint); extern int xfs_qm_scall_quotaoff(struct xfs_mount *, uint); static inline struct xfs_def_quota * -xfs_get_defquota(struct xfs_quotainfo *qi, int type) +xfs_get_defquota(struct xfs_quotainfo *qi, xfs_dqtype_t type) { switch (type) { - case XFS_DQ_USER: + case XFS_DQTYPE_USER: return &qi->qi_usr_default; - case XFS_DQ_GROUP: + case XFS_DQTYPE_GROUP: return &qi->qi_grp_default; - case XFS_DQ_PROJ: + case XFS_DQTYPE_PROJ: return &qi->qi_prj_default; default: ASSERT(0); diff --git a/fs/xfs/xfs_qm_bhv.c b/fs/xfs/xfs_qm_bhv.c index fc2fa418919f..639398091ad6 100644 --- a/fs/xfs/xfs_qm_bhv.c +++ b/fs/xfs/xfs_qm_bhv.c @@ -23,24 +23,24 @@ xfs_fill_statvfs_from_dquot( { uint64_t limit; - limit = dqp->q_core.d_blk_softlimit ? - be64_to_cpu(dqp->q_core.d_blk_softlimit) : - be64_to_cpu(dqp->q_core.d_blk_hardlimit); + limit = dqp->q_blk.softlimit ? + dqp->q_blk.softlimit : + dqp->q_blk.hardlimit; if (limit && statp->f_blocks > limit) { statp->f_blocks = limit; statp->f_bfree = statp->f_bavail = - (statp->f_blocks > dqp->q_res_bcount) ? - (statp->f_blocks - dqp->q_res_bcount) : 0; + (statp->f_blocks > dqp->q_blk.reserved) ? + (statp->f_blocks - dqp->q_blk.reserved) : 0; } - limit = dqp->q_core.d_ino_softlimit ? - be64_to_cpu(dqp->q_core.d_ino_softlimit) : - be64_to_cpu(dqp->q_core.d_ino_hardlimit); + limit = dqp->q_ino.softlimit ? + dqp->q_ino.softlimit : + dqp->q_ino.hardlimit; if (limit && statp->f_files > limit) { statp->f_files = limit; statp->f_ffree = - (statp->f_files > dqp->q_res_icount) ? - (statp->f_files - dqp->q_res_icount) : 0; + (statp->f_files > dqp->q_ino.reserved) ? + (statp->f_files - dqp->q_ino.reserved) : 0; } } @@ -60,7 +60,7 @@ xfs_qm_statvfs( struct xfs_mount *mp = ip->i_mount; struct xfs_dquot *dqp; - if (!xfs_qm_dqget(mp, ip->i_d.di_projid, XFS_DQ_PROJ, false, &dqp)) { + if (!xfs_qm_dqget(mp, ip->i_d.di_projid, XFS_DQTYPE_PROJ, false, &dqp)) { xfs_fill_statvfs_from_dquot(statp, dqp); xfs_qm_dqput(dqp); } diff --git a/fs/xfs/xfs_qm_syscalls.c b/fs/xfs/xfs_qm_syscalls.c index 7effd7a28136..1c542b4a5220 100644 --- a/fs/xfs/xfs_qm_syscalls.c +++ b/fs/xfs/xfs_qm_syscalls.c @@ -322,23 +322,23 @@ xfs_qm_scall_trunc_qfiles( int error = -EINVAL; if (!xfs_sb_version_hasquota(&mp->m_sb) || flags == 0 || - (flags & ~XFS_DQ_ALLTYPES)) { + (flags & ~XFS_QMOPT_QUOTALL)) { xfs_debug(mp, "%s: flags=%x m_qflags=%x", __func__, flags, mp->m_qflags); return -EINVAL; } - if (flags & XFS_DQ_USER) { + if (flags & XFS_QMOPT_UQUOTA) { error = xfs_qm_scall_trunc_qfile(mp, mp->m_sb.sb_uquotino); if (error) return error; } - if (flags & XFS_DQ_GROUP) { + if (flags & XFS_QMOPT_GQUOTA) { error = xfs_qm_scall_trunc_qfile(mp, mp->m_sb.sb_gquotino); if (error) return error; } - if (flags & XFS_DQ_PROJ) + if (flags & XFS_QMOPT_PQUOTA) error = xfs_qm_scall_trunc_qfile(mp, mp->m_sb.sb_pquotino); return error; @@ -437,20 +437,73 @@ xfs_qm_scall_quotaon( (QC_LIMIT_MASK | QC_TIMER_MASK | QC_WARNS_MASK) /* + * Adjust limits of this quota, and the defaults if passed in. Returns true + * if the new limits made sense and were applied, false otherwise. + */ +static inline bool +xfs_setqlim_limits( + struct xfs_mount *mp, + struct xfs_dquot_res *res, + struct xfs_quota_limits *qlim, + xfs_qcnt_t hard, + xfs_qcnt_t soft, + const char *tag) +{ + /* The hard limit can't be less than the soft limit. */ + if (hard != 0 && hard < soft) { + xfs_debug(mp, "%shard %lld < %ssoft %lld", tag, hard, tag, + soft); + return false; + } + + res->hardlimit = hard; + res->softlimit = soft; + if (qlim) { + qlim->hard = hard; + qlim->soft = soft; + } + + return true; +} + +static inline void +xfs_setqlim_warns( + struct xfs_dquot_res *res, + struct xfs_quota_limits *qlim, + int warns) +{ + res->warnings = warns; + if (qlim) + qlim->warn = warns; +} + +static inline void +xfs_setqlim_timer( + struct xfs_dquot_res *res, + struct xfs_quota_limits *qlim, + s64 timer) +{ + res->timer = timer; + if (qlim) + qlim->time = timer; +} + +/* * Adjust quota limits, and start/stop timers accordingly. */ int xfs_qm_scall_setqlim( struct xfs_mount *mp, xfs_dqid_t id, - uint type, + xfs_dqtype_t type, struct qc_dqblk *newlim) { struct xfs_quotainfo *q = mp->m_quotainfo; - struct xfs_disk_dquot *ddq; struct xfs_dquot *dqp; struct xfs_trans *tp; struct xfs_def_quota *defq; + struct xfs_dquot_res *res; + struct xfs_quota_limits *qlim; int error; xfs_qcnt_t hard, soft; @@ -488,105 +541,72 @@ xfs_qm_scall_setqlim( xfs_dqlock(dqp); xfs_trans_dqjoin(tp, dqp); - ddq = &dqp->q_core; /* + * Update quota limits, warnings, and timers, and the defaults + * if we're touching id == 0. + * * Make sure that hardlimits are >= soft limits before changing. + * + * Update warnings counter(s) if requested. + * + * Timelimits for the super user set the relative time the other users + * can be over quota for this file system. If it is zero a default is + * used. Ditto for the default soft and hard limit values (already + * done, above), and for warnings. + * + * For other IDs, userspace can bump out the grace period if over + * the soft limit. */ + + /* Blocks on the data device. */ hard = (newlim->d_fieldmask & QC_SPC_HARD) ? (xfs_qcnt_t) XFS_B_TO_FSB(mp, newlim->d_spc_hardlimit) : - be64_to_cpu(ddq->d_blk_hardlimit); + dqp->q_blk.hardlimit; soft = (newlim->d_fieldmask & QC_SPC_SOFT) ? (xfs_qcnt_t) XFS_B_TO_FSB(mp, newlim->d_spc_softlimit) : - be64_to_cpu(ddq->d_blk_softlimit); - if (hard == 0 || hard >= soft) { - ddq->d_blk_hardlimit = cpu_to_be64(hard); - ddq->d_blk_softlimit = cpu_to_be64(soft); + dqp->q_blk.softlimit; + res = &dqp->q_blk; + qlim = id == 0 ? &defq->blk : NULL; + + if (xfs_setqlim_limits(mp, res, qlim, hard, soft, "blk")) xfs_dquot_set_prealloc_limits(dqp); - if (id == 0) { - defq->bhardlimit = hard; - defq->bsoftlimit = soft; - } - } else { - xfs_debug(mp, "blkhard %Ld < blksoft %Ld", hard, soft); - } + if (newlim->d_fieldmask & QC_SPC_WARNS) + xfs_setqlim_warns(res, qlim, newlim->d_spc_warns); + if (newlim->d_fieldmask & QC_SPC_TIMER) + xfs_setqlim_timer(res, qlim, newlim->d_spc_timer); + + /* Blocks on the realtime device. */ hard = (newlim->d_fieldmask & QC_RT_SPC_HARD) ? (xfs_qcnt_t) XFS_B_TO_FSB(mp, newlim->d_rt_spc_hardlimit) : - be64_to_cpu(ddq->d_rtb_hardlimit); + dqp->q_rtb.hardlimit; soft = (newlim->d_fieldmask & QC_RT_SPC_SOFT) ? (xfs_qcnt_t) XFS_B_TO_FSB(mp, newlim->d_rt_spc_softlimit) : - be64_to_cpu(ddq->d_rtb_softlimit); - if (hard == 0 || hard >= soft) { - ddq->d_rtb_hardlimit = cpu_to_be64(hard); - ddq->d_rtb_softlimit = cpu_to_be64(soft); - if (id == 0) { - defq->rtbhardlimit = hard; - defq->rtbsoftlimit = soft; - } - } else { - xfs_debug(mp, "rtbhard %Ld < rtbsoft %Ld", hard, soft); - } + dqp->q_rtb.softlimit; + res = &dqp->q_rtb; + qlim = id == 0 ? &defq->rtb : NULL; + xfs_setqlim_limits(mp, res, qlim, hard, soft, "rtb"); + if (newlim->d_fieldmask & QC_RT_SPC_WARNS) + xfs_setqlim_warns(res, qlim, newlim->d_rt_spc_warns); + if (newlim->d_fieldmask & QC_RT_SPC_TIMER) + xfs_setqlim_timer(res, qlim, newlim->d_rt_spc_timer); + + /* Inodes */ hard = (newlim->d_fieldmask & QC_INO_HARD) ? (xfs_qcnt_t) newlim->d_ino_hardlimit : - be64_to_cpu(ddq->d_ino_hardlimit); + dqp->q_ino.hardlimit; soft = (newlim->d_fieldmask & QC_INO_SOFT) ? (xfs_qcnt_t) newlim->d_ino_softlimit : - be64_to_cpu(ddq->d_ino_softlimit); - if (hard == 0 || hard >= soft) { - ddq->d_ino_hardlimit = cpu_to_be64(hard); - ddq->d_ino_softlimit = cpu_to_be64(soft); - if (id == 0) { - defq->ihardlimit = hard; - defq->isoftlimit = soft; - } - } else { - xfs_debug(mp, "ihard %Ld < isoft %Ld", hard, soft); - } + dqp->q_ino.softlimit; + res = &dqp->q_ino; + qlim = id == 0 ? &defq->ino : NULL; - /* - * Update warnings counter(s) if requested - */ - if (newlim->d_fieldmask & QC_SPC_WARNS) - ddq->d_bwarns = cpu_to_be16(newlim->d_spc_warns); + xfs_setqlim_limits(mp, res, qlim, hard, soft, "ino"); if (newlim->d_fieldmask & QC_INO_WARNS) - ddq->d_iwarns = cpu_to_be16(newlim->d_ino_warns); - if (newlim->d_fieldmask & QC_RT_SPC_WARNS) - ddq->d_rtbwarns = cpu_to_be16(newlim->d_rt_spc_warns); - - if (id == 0) { - if (newlim->d_fieldmask & QC_SPC_WARNS) - defq->bwarnlimit = newlim->d_spc_warns; - if (newlim->d_fieldmask & QC_INO_WARNS) - defq->iwarnlimit = newlim->d_ino_warns; - if (newlim->d_fieldmask & QC_RT_SPC_WARNS) - defq->rtbwarnlimit = newlim->d_rt_spc_warns; - } - - /* - * Timelimits for the super user set the relative time the other users - * can be over quota for this file system. If it is zero a default is - * used. Ditto for the default soft and hard limit values (already - * done, above), and for warnings. - * - * For other IDs, userspace can bump out the grace period if over - * the soft limit. - */ - if (newlim->d_fieldmask & QC_SPC_TIMER) - ddq->d_btimer = cpu_to_be32(newlim->d_spc_timer); + xfs_setqlim_warns(res, qlim, newlim->d_ino_warns); if (newlim->d_fieldmask & QC_INO_TIMER) - ddq->d_itimer = cpu_to_be32(newlim->d_ino_timer); - if (newlim->d_fieldmask & QC_RT_SPC_TIMER) - ddq->d_rtbtimer = cpu_to_be32(newlim->d_rt_spc_timer); - - if (id == 0) { - if (newlim->d_fieldmask & QC_SPC_TIMER) - defq->btimelimit = newlim->d_spc_timer; - if (newlim->d_fieldmask & QC_INO_TIMER) - defq->itimelimit = newlim->d_ino_timer; - if (newlim->d_fieldmask & QC_RT_SPC_TIMER) - defq->rtbtimelimit = newlim->d_rt_spc_timer; - } + xfs_setqlim_timer(res, qlim, newlim->d_ino_timer); if (id != 0) { /* @@ -596,9 +616,9 @@ xfs_qm_scall_setqlim( * is on or off. We don't really want to bother with iterating * over all ondisk dquots and turning the timers on/off. */ - xfs_qm_adjust_dqtimers(mp, dqp); + xfs_qm_adjust_dqtimers(dqp); } - dqp->dq_flags |= XFS_DQ_DIRTY; + dqp->q_flags |= XFS_DQFLAG_DIRTY; xfs_trans_log_dquot(tp, dqp); error = xfs_trans_commit(tp); @@ -614,58 +634,46 @@ out_unlock: static void xfs_qm_scall_getquota_fill_qc( struct xfs_mount *mp, - uint type, + xfs_dqtype_t type, const struct xfs_dquot *dqp, struct qc_dqblk *dst) { memset(dst, 0, sizeof(*dst)); - dst->d_spc_hardlimit = - XFS_FSB_TO_B(mp, be64_to_cpu(dqp->q_core.d_blk_hardlimit)); - dst->d_spc_softlimit = - XFS_FSB_TO_B(mp, be64_to_cpu(dqp->q_core.d_blk_softlimit)); - dst->d_ino_hardlimit = be64_to_cpu(dqp->q_core.d_ino_hardlimit); - dst->d_ino_softlimit = be64_to_cpu(dqp->q_core.d_ino_softlimit); - dst->d_space = XFS_FSB_TO_B(mp, dqp->q_res_bcount); - dst->d_ino_count = dqp->q_res_icount; - dst->d_spc_timer = be32_to_cpu(dqp->q_core.d_btimer); - dst->d_ino_timer = be32_to_cpu(dqp->q_core.d_itimer); - dst->d_ino_warns = be16_to_cpu(dqp->q_core.d_iwarns); - dst->d_spc_warns = be16_to_cpu(dqp->q_core.d_bwarns); - dst->d_rt_spc_hardlimit = - XFS_FSB_TO_B(mp, be64_to_cpu(dqp->q_core.d_rtb_hardlimit)); - dst->d_rt_spc_softlimit = - XFS_FSB_TO_B(mp, be64_to_cpu(dqp->q_core.d_rtb_softlimit)); - dst->d_rt_space = XFS_FSB_TO_B(mp, dqp->q_res_rtbcount); - dst->d_rt_spc_timer = be32_to_cpu(dqp->q_core.d_rtbtimer); - dst->d_rt_spc_warns = be16_to_cpu(dqp->q_core.d_rtbwarns); + dst->d_spc_hardlimit = XFS_FSB_TO_B(mp, dqp->q_blk.hardlimit); + dst->d_spc_softlimit = XFS_FSB_TO_B(mp, dqp->q_blk.softlimit); + dst->d_ino_hardlimit = dqp->q_ino.hardlimit; + dst->d_ino_softlimit = dqp->q_ino.softlimit; + dst->d_space = XFS_FSB_TO_B(mp, dqp->q_blk.reserved); + dst->d_ino_count = dqp->q_ino.reserved; + dst->d_spc_timer = dqp->q_blk.timer; + dst->d_ino_timer = dqp->q_ino.timer; + dst->d_ino_warns = dqp->q_ino.warnings; + dst->d_spc_warns = dqp->q_blk.warnings; + dst->d_rt_spc_hardlimit = XFS_FSB_TO_B(mp, dqp->q_rtb.hardlimit); + dst->d_rt_spc_softlimit = XFS_FSB_TO_B(mp, dqp->q_rtb.softlimit); + dst->d_rt_space = XFS_FSB_TO_B(mp, dqp->q_rtb.reserved); + dst->d_rt_spc_timer = dqp->q_rtb.timer; + dst->d_rt_spc_warns = dqp->q_rtb.warnings; /* * Internally, we don't reset all the timers when quota enforcement * gets turned off. No need to confuse the user level code, * so return zeroes in that case. */ - if ((!XFS_IS_UQUOTA_ENFORCED(mp) && - dqp->q_core.d_flags == XFS_DQ_USER) || - (!XFS_IS_GQUOTA_ENFORCED(mp) && - dqp->q_core.d_flags == XFS_DQ_GROUP) || - (!XFS_IS_PQUOTA_ENFORCED(mp) && - dqp->q_core.d_flags == XFS_DQ_PROJ)) { + if (!xfs_dquot_is_enforced(dqp)) { dst->d_spc_timer = 0; dst->d_ino_timer = 0; dst->d_rt_spc_timer = 0; } #ifdef DEBUG - if (((XFS_IS_UQUOTA_ENFORCED(mp) && type == XFS_DQ_USER) || - (XFS_IS_GQUOTA_ENFORCED(mp) && type == XFS_DQ_GROUP) || - (XFS_IS_PQUOTA_ENFORCED(mp) && type == XFS_DQ_PROJ)) && - dqp->q_core.d_id != 0) { + if (xfs_dquot_is_enforced(dqp) && dqp->q_id != 0) { if ((dst->d_space > dst->d_spc_softlimit) && (dst->d_spc_softlimit > 0)) { ASSERT(dst->d_spc_timer != 0); } - if ((dst->d_ino_count > dst->d_ino_softlimit) && - (dst->d_ino_softlimit > 0)) { + if ((dst->d_ino_count > dqp->q_ino.softlimit) && + (dqp->q_ino.softlimit > 0)) { ASSERT(dst->d_ino_timer != 0); } } @@ -677,7 +685,7 @@ int xfs_qm_scall_getquota( struct xfs_mount *mp, xfs_dqid_t id, - uint type, + xfs_dqtype_t type, struct qc_dqblk *dst) { struct xfs_dquot *dqp; @@ -715,7 +723,7 @@ int xfs_qm_scall_getquota_next( struct xfs_mount *mp, xfs_dqid_t *id, - uint type, + xfs_dqtype_t type, struct qc_dqblk *dst) { struct xfs_dquot *dqp; @@ -726,7 +734,7 @@ xfs_qm_scall_getquota_next( return error; /* Fill in the ID we actually read from disk */ - *id = be32_to_cpu(dqp->q_core.d_id); + *id = dqp->q_id; xfs_qm_scall_getquota_fill_qc(mp, type, dqp, dst); diff --git a/fs/xfs/xfs_quota.h b/fs/xfs/xfs_quota.h index aa8fc1f55fbd..06b22e35fc90 100644 --- a/fs/xfs/xfs_quota.h +++ b/fs/xfs/xfs_quota.h @@ -13,6 +13,7 @@ */ struct xfs_trans; +struct xfs_buf; /* * This check is done typically without holding the inode lock; @@ -38,14 +39,14 @@ struct xfs_trans; static inline uint xfs_quota_chkd_flag( - uint dqtype) + xfs_dqtype_t type) { - switch (dqtype) { - case XFS_DQ_USER: + switch (type) { + case XFS_DQTYPE_USER: return XFS_UQUOTA_CHKD; - case XFS_DQ_GROUP: + case XFS_DQTYPE_GROUP: return XFS_GQUOTA_CHKD; - case XFS_DQ_PROJ: + case XFS_DQTYPE_PROJ: return XFS_PQUOTA_CHKD; default: return 0; @@ -107,6 +108,8 @@ extern void xfs_qm_mount_quotas(struct xfs_mount *); extern void xfs_qm_unmount(struct xfs_mount *); extern void xfs_qm_unmount_quotas(struct xfs_mount *); +void xfs_dquot_done(struct xfs_buf *); + #else static inline int xfs_qm_vop_dqalloc(struct xfs_inode *ip, kuid_t kuid, kgid_t kgid, @@ -148,6 +151,12 @@ static inline int xfs_trans_reserve_quota_bydquots(struct xfs_trans *tp, #define xfs_qm_mount_quotas(mp) #define xfs_qm_unmount(mp) #define xfs_qm_unmount_quotas(mp) + +static inline void xfs_dquot_done(struct xfs_buf *bp) +{ + return; +} + #endif /* CONFIG_XFS_QUOTA */ #define xfs_trans_unreserve_quota_nblks(tp, ip, nblks, ninos, flags) \ diff --git a/fs/xfs/xfs_quotaops.c b/fs/xfs/xfs_quotaops.c index bf809b77a316..d27c0e852c0b 100644 --- a/fs/xfs/xfs_quotaops.c +++ b/fs/xfs/xfs_quotaops.c @@ -37,12 +37,12 @@ xfs_qm_fill_state( tstate->flags |= QCI_SYSFILE; tstate->blocks = ip->i_d.di_nblocks; tstate->nextents = ip->i_df.if_nextents; - tstate->spc_timelimit = (u32)defq->btimelimit; - tstate->ino_timelimit = (u32)defq->itimelimit; - tstate->rt_spc_timelimit = (u32)defq->rtbtimelimit; - tstate->spc_warnlimit = defq->bwarnlimit; - tstate->ino_warnlimit = defq->iwarnlimit; - tstate->rt_spc_warnlimit = defq->rtbwarnlimit; + tstate->spc_timelimit = (u32)defq->blk.time; + tstate->ino_timelimit = (u32)defq->ino.time; + tstate->rt_spc_timelimit = (u32)defq->rtb.time; + tstate->spc_warnlimit = defq->blk.warn; + tstate->ino_warnlimit = defq->ino.warn; + tstate->rt_spc_warnlimit = defq->rtb.warn; if (tempqip) xfs_irele(ip); } @@ -85,16 +85,16 @@ xfs_fs_get_quota_state( return 0; } -STATIC int +STATIC xfs_dqtype_t xfs_quota_type(int type) { switch (type) { case USRQUOTA: - return XFS_DQ_USER; + return XFS_DQTYPE_USER; case GRPQUOTA: - return XFS_DQ_GROUP; + return XFS_DQTYPE_GROUP; default: - return XFS_DQ_PROJ; + return XFS_DQTYPE_PROJ; } } @@ -205,11 +205,11 @@ xfs_fs_rm_xquota( return -EINVAL; if (uflags & FS_USER_QUOTA) - flags |= XFS_DQ_USER; + flags |= XFS_QMOPT_UQUOTA; if (uflags & FS_GROUP_QUOTA) - flags |= XFS_DQ_GROUP; + flags |= XFS_QMOPT_GQUOTA; if (uflags & FS_PROJ_QUOTA) - flags |= XFS_DQ_PROJ; + flags |= XFS_QMOPT_PQUOTA; return xfs_qm_scall_trunc_qfiles(mp, flags); } diff --git a/fs/xfs/xfs_refcount_item.c b/fs/xfs/xfs_refcount_item.c index c81639891e29..ca93b6488377 100644 --- a/fs/xfs/xfs_refcount_item.c +++ b/fs/xfs/xfs_refcount_item.c @@ -143,7 +143,8 @@ xfs_cui_init( cuip = kmem_zalloc(xfs_cui_log_item_sizeof(nextents), 0); else - cuip = kmem_zone_zalloc(xfs_cui_zone, 0); + cuip = kmem_cache_zalloc(xfs_cui_zone, + GFP_KERNEL | __GFP_NOFAIL); xfs_log_item_init(mp, &cuip->cui_item, XFS_LI_CUI, &xfs_cui_item_ops); cuip->cui_format.cui_nextents = nextents; @@ -220,7 +221,7 @@ xfs_trans_get_cud( { struct xfs_cud_log_item *cudp; - cudp = kmem_zone_zalloc(xfs_cud_zone, 0); + cudp = kmem_cache_zalloc(xfs_cud_zone, GFP_KERNEL | __GFP_NOFAIL); xfs_log_item_init(tp->t_mountp, &cudp->cud_item, XFS_LI_CUD, &xfs_cud_item_ops); cudp->cud_cuip = cuip; @@ -484,7 +485,7 @@ xfs_cui_item_recover( * transaction. Normally, any work that needs to be deferred * gets attached to the same defer_ops that scheduled the * refcount update. However, we're in log recovery here, so we - * we use the passed in defer_ops and to finish up any work that + * use the passed in defer_ops and to finish up any work that * doesn't fit. We need to reserve enough blocks to handle a * full btree split on either end of the refcount range. */ diff --git a/fs/xfs/xfs_reflink.c b/fs/xfs/xfs_reflink.c index 107bf2a2f344..16098dc42add 100644 --- a/fs/xfs/xfs_reflink.c +++ b/fs/xfs/xfs_reflink.c @@ -179,7 +179,7 @@ xfs_reflink_trim_around_shared( int error = 0; /* Holes, unwritten, and delalloc extents cannot be shared */ - if (!xfs_is_cow_inode(ip) || !xfs_bmap_is_real_extent(irec)) { + if (!xfs_is_cow_inode(ip) || !xfs_bmap_is_written_extent(irec)) { *shared = false; return 0; } @@ -655,7 +655,7 @@ xfs_reflink_end_cow_extent( * preallocations can leak into the range we are called upon, and we * need to skip them. */ - if (!xfs_bmap_is_real_extent(&got)) { + if (!xfs_bmap_is_written_extent(&got)) { *end_fsb = del.br_startoff; goto out_cancel; } @@ -721,7 +721,7 @@ xfs_reflink_end_cow( * repeatedly cycles the ILOCK to allocate one transaction per remapped * extent. * - * If we're being called by writeback then the the pages will still + * If we're being called by writeback then the pages will still * have PageWriteback set, which prevents races with reflink remapping * and truncate. Reflink remapping prevents races with writeback by * taking the iolock and mmaplock before flushing the pages and @@ -984,40 +984,28 @@ xfs_reflink_ag_has_free_space( } /* - * Unmap a range of blocks from a file, then map other blocks into the hole. - * The range to unmap is (destoff : destoff + srcioff + irec->br_blockcount). - * The extent irec is mapped into dest at irec->br_startoff. + * Remap the given extent into the file. The dmap blockcount will be set to + * the number of blocks that were actually remapped. */ STATIC int xfs_reflink_remap_extent( struct xfs_inode *ip, - struct xfs_bmbt_irec *irec, - xfs_fileoff_t destoff, + struct xfs_bmbt_irec *dmap, xfs_off_t new_isize) { + struct xfs_bmbt_irec smap; struct xfs_mount *mp = ip->i_mount; - bool real_extent = xfs_bmap_is_real_extent(irec); struct xfs_trans *tp; - unsigned int resblks; - struct xfs_bmbt_irec uirec; - xfs_filblks_t rlen; - xfs_filblks_t unmap_len; xfs_off_t newlen; + int64_t qres, qdelta; + unsigned int resblks; + bool smap_real; + bool dmap_written = xfs_bmap_is_written_extent(dmap); + int nimaps; int error; - unmap_len = irec->br_startoff + irec->br_blockcount - destoff; - trace_xfs_reflink_punch_range(ip, destoff, unmap_len); - - /* No reflinking if we're low on space */ - if (real_extent) { - error = xfs_reflink_ag_has_free_space(mp, - XFS_FSB_TO_AGNO(mp, irec->br_startblock)); - if (error) - goto out; - } - /* Start a rolling transaction to switch the mappings */ - resblks = XFS_EXTENTADD_SPACE_RES(ip->i_mount, XFS_DATA_FORK); + resblks = XFS_EXTENTADD_SPACE_RES(mp, XFS_DATA_FORK); error = xfs_trans_alloc(mp, &M_RES(mp)->tr_write, resblks, 0, 0, &tp); if (error) goto out; @@ -1025,87 +1013,147 @@ xfs_reflink_remap_extent( xfs_ilock(ip, XFS_ILOCK_EXCL); xfs_trans_ijoin(tp, ip, 0); - /* If we're not just clearing space, then do we have enough quota? */ - if (real_extent) { - error = xfs_trans_reserve_quota_nblks(tp, ip, - irec->br_blockcount, 0, XFS_QMOPT_RES_REGBLKS); + /* + * Read what's currently mapped in the destination file into smap. + * If smap isn't a hole, we will have to remove it before we can add + * dmap to the destination file. + */ + nimaps = 1; + error = xfs_bmapi_read(ip, dmap->br_startoff, dmap->br_blockcount, + &smap, &nimaps, 0); + if (error) + goto out_cancel; + ASSERT(nimaps == 1 && smap.br_startoff == dmap->br_startoff); + smap_real = xfs_bmap_is_real_extent(&smap); + + /* + * We can only remap as many blocks as the smaller of the two extent + * maps, because we can only remap one extent at a time. + */ + dmap->br_blockcount = min(dmap->br_blockcount, smap.br_blockcount); + ASSERT(dmap->br_blockcount == smap.br_blockcount); + + trace_xfs_reflink_remap_extent_dest(ip, &smap); + + /* + * Two extents mapped to the same physical block must not have + * different states; that's filesystem corruption. Move on to the next + * extent if they're both holes or both the same physical extent. + */ + if (dmap->br_startblock == smap.br_startblock) { + if (dmap->br_state != smap.br_state) + error = -EFSCORRUPTED; + goto out_cancel; + } + + /* If both extents are unwritten, leave them alone. */ + if (dmap->br_state == XFS_EXT_UNWRITTEN && + smap.br_state == XFS_EXT_UNWRITTEN) + goto out_cancel; + + /* No reflinking if the AG of the dest mapping is low on space. */ + if (dmap_written) { + error = xfs_reflink_ag_has_free_space(mp, + XFS_FSB_TO_AGNO(mp, dmap->br_startblock)); if (error) goto out_cancel; } - trace_xfs_reflink_remap(ip, irec->br_startoff, - irec->br_blockcount, irec->br_startblock); - - /* Unmap the old blocks in the data fork. */ - rlen = unmap_len; - while (rlen) { - ASSERT(tp->t_firstblock == NULLFSBLOCK); - error = __xfs_bunmapi(tp, ip, destoff, &rlen, 0, 1); + /* + * Compute quota reservation if we think the quota block counter for + * this file could increase. + * + * Adding a written extent to the extent map can cause a bmbt split, + * and removing a mapped extent from the extent can cause a bmbt split. + * The two operations cannot both cause a split since they operate on + * the same index in the bmap btree, so we only need a reservation for + * one bmbt split if either thing is happening. + * + * If we are mapping a written extent into the file, we need to have + * enough quota block count reservation to handle the blocks in that + * extent. We log only the delta to the quota block counts, so if the + * extent we're unmapping also has blocks allocated to it, we don't + * need a quota reservation for the extent itself. + * + * Note that if we're replacing a delalloc reservation with a written + * extent, we have to take the full quota reservation because removing + * the delalloc reservation gives the block count back to the quota + * count. This is suboptimal, but the VFS flushed the dest range + * before we started. That should have removed all the delalloc + * reservations, but we code defensively. + */ + qres = qdelta = 0; + if (smap_real || dmap_written) + qres = XFS_EXTENTADD_SPACE_RES(mp, XFS_DATA_FORK); + if (!smap_real && dmap_written) + qres += dmap->br_blockcount; + if (qres > 0) { + error = xfs_trans_reserve_quota_nblks(tp, ip, qres, 0, + XFS_QMOPT_RES_REGBLKS); if (error) goto out_cancel; + } + if (smap_real) { /* - * Trim the extent to whatever got unmapped. - * Remember, bunmapi works backwards. + * If the extent we're unmapping is backed by storage (written + * or not), unmap the extent and drop its refcount. */ - uirec.br_startblock = irec->br_startblock + rlen; - uirec.br_startoff = irec->br_startoff + rlen; - uirec.br_blockcount = unmap_len - rlen; - uirec.br_state = irec->br_state; - unmap_len = rlen; - - /* If this isn't a real mapping, we're done. */ - if (!real_extent || uirec.br_blockcount == 0) - goto next_extent; + xfs_bmap_unmap_extent(tp, ip, &smap); + xfs_refcount_decrease_extent(tp, &smap); + qdelta -= smap.br_blockcount; + } else if (smap.br_startblock == DELAYSTARTBLOCK) { + xfs_filblks_t len = smap.br_blockcount; - trace_xfs_reflink_remap(ip, uirec.br_startoff, - uirec.br_blockcount, uirec.br_startblock); - - /* Update the refcount tree */ - xfs_refcount_increase_extent(tp, &uirec); - - /* Map the new blocks into the data fork. */ - xfs_bmap_map_extent(tp, ip, &uirec); + /* + * If the extent we're unmapping is a delalloc reservation, + * we can use the regular bunmapi function to release the + * incore state. Dropping the delalloc reservation takes care + * of the quota reservation for us. + */ + error = __xfs_bunmapi(NULL, ip, smap.br_startoff, &len, 0, 1); + if (error) + goto out_cancel; + ASSERT(len == 0); + } - /* Update quota accounting. */ - xfs_trans_mod_dquot_byino(tp, ip, XFS_TRANS_DQ_BCOUNT, - uirec.br_blockcount); + /* + * If the extent we're sharing is backed by written storage, increase + * its refcount and map it into the file. + */ + if (dmap_written) { + xfs_refcount_increase_extent(tp, dmap); + xfs_bmap_map_extent(tp, ip, dmap); + qdelta += dmap->br_blockcount; + } - /* Update dest isize if needed. */ - newlen = XFS_FSB_TO_B(mp, - uirec.br_startoff + uirec.br_blockcount); - newlen = min_t(xfs_off_t, newlen, new_isize); - if (newlen > i_size_read(VFS_I(ip))) { - trace_xfs_reflink_update_inode_size(ip, newlen); - i_size_write(VFS_I(ip), newlen); - ip->i_d.di_size = newlen; - xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE); - } + xfs_trans_mod_dquot_byino(tp, ip, XFS_TRANS_DQ_BCOUNT, qdelta); -next_extent: - /* Process all the deferred stuff. */ - error = xfs_defer_finish(&tp); - if (error) - goto out_cancel; + /* Update dest isize if needed. */ + newlen = XFS_FSB_TO_B(mp, dmap->br_startoff + dmap->br_blockcount); + newlen = min_t(xfs_off_t, newlen, new_isize); + if (newlen > i_size_read(VFS_I(ip))) { + trace_xfs_reflink_update_inode_size(ip, newlen); + i_size_write(VFS_I(ip), newlen); + ip->i_d.di_size = newlen; + xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE); } + /* Commit everything and unlock. */ error = xfs_trans_commit(tp); - xfs_iunlock(ip, XFS_ILOCK_EXCL); - if (error) - goto out; - return 0; + goto out_unlock; out_cancel: xfs_trans_cancel(tp); +out_unlock: xfs_iunlock(ip, XFS_ILOCK_EXCL); out: - trace_xfs_reflink_remap_extent_error(ip, error, _RET_IP_); + if (error) + trace_xfs_reflink_remap_extent_error(ip, error, _RET_IP_); return error; } -/* - * Iteratively remap one file's extents (and holes) to another's. - */ +/* Remap a range of one file to the other. */ int xfs_reflink_remap_blocks( struct xfs_inode *src, @@ -1116,25 +1164,22 @@ xfs_reflink_remap_blocks( loff_t *remapped) { struct xfs_bmbt_irec imap; - xfs_fileoff_t srcoff; - xfs_fileoff_t destoff; + struct xfs_mount *mp = src->i_mount; + xfs_fileoff_t srcoff = XFS_B_TO_FSBT(mp, pos_in); + xfs_fileoff_t destoff = XFS_B_TO_FSBT(mp, pos_out); xfs_filblks_t len; - xfs_filblks_t range_len; xfs_filblks_t remapped_len = 0; xfs_off_t new_isize = pos_out + remap_len; int nimaps; int error = 0; - destoff = XFS_B_TO_FSBT(src->i_mount, pos_out); - srcoff = XFS_B_TO_FSBT(src->i_mount, pos_in); - len = XFS_B_TO_FSB(src->i_mount, remap_len); + len = min_t(xfs_filblks_t, XFS_B_TO_FSB(mp, remap_len), + XFS_MAX_FILEOFF); - /* drange = (destoff, destoff + len); srange = (srcoff, srcoff + len) */ - while (len) { - uint lock_mode; + trace_xfs_reflink_remap_blocks(src, srcoff, len, dest, destoff); - trace_xfs_reflink_remap_blocks_loop(src, srcoff, len, - dest, destoff); + while (len > 0) { + unsigned int lock_mode; /* Read extent from the source file */ nimaps = 1; @@ -1143,18 +1188,25 @@ xfs_reflink_remap_blocks( xfs_iunlock(src, lock_mode); if (error) break; - ASSERT(nimaps == 1); - - trace_xfs_reflink_remap_imap(src, srcoff, len, XFS_DATA_FORK, - &imap); + /* + * The caller supposedly flushed all dirty pages in the source + * file range, which means that writeback should have allocated + * or deleted all delalloc reservations in that range. If we + * find one, that's a good sign that something is seriously + * wrong here. + */ + ASSERT(nimaps == 1 && imap.br_startoff == srcoff); + if (imap.br_startblock == DELAYSTARTBLOCK) { + ASSERT(imap.br_startblock != DELAYSTARTBLOCK); + error = -EFSCORRUPTED; + break; + } - /* Translate imap into the destination file. */ - range_len = imap.br_startoff + imap.br_blockcount - srcoff; - imap.br_startoff += destoff - srcoff; + trace_xfs_reflink_remap_extent_src(src, &imap); - /* Clear dest from destoff to the end of imap and map it in. */ - error = xfs_reflink_remap_extent(dest, &imap, destoff, - new_isize); + /* Remap into the destination file at the given offset. */ + imap.br_startoff = destoff; + error = xfs_reflink_remap_extent(dest, &imap, new_isize); if (error) break; @@ -1164,10 +1216,10 @@ xfs_reflink_remap_blocks( } /* Advance drange/srange */ - srcoff += range_len; - destoff += range_len; - len -= range_len; - remapped_len += range_len; + srcoff += imap.br_blockcount; + destoff += imap.br_blockcount; + len -= imap.br_blockcount; + remapped_len += imap.br_blockcount; } if (error) @@ -1178,81 +1230,6 @@ xfs_reflink_remap_blocks( } /* - * Grab the exclusive iolock for a data copy from src to dest, making sure to - * abide vfs locking order (lowest pointer value goes first) and breaking the - * layout leases before proceeding. The loop is needed because we cannot call - * the blocking break_layout() with the iolocks held, and therefore have to - * back out both locks. - */ -static int -xfs_iolock_two_inodes_and_break_layout( - struct inode *src, - struct inode *dest) -{ - int error; - - if (src > dest) - swap(src, dest); - -retry: - /* Wait to break both inodes' layouts before we start locking. */ - error = break_layout(src, true); - if (error) - return error; - if (src != dest) { - error = break_layout(dest, true); - if (error) - return error; - } - - /* Lock one inode and make sure nobody got in and leased it. */ - inode_lock(src); - error = break_layout(src, false); - if (error) { - inode_unlock(src); - if (error == -EWOULDBLOCK) - goto retry; - return error; - } - - if (src == dest) - return 0; - - /* Lock the other inode and make sure nobody got in and leased it. */ - inode_lock_nested(dest, I_MUTEX_NONDIR2); - error = break_layout(dest, false); - if (error) { - inode_unlock(src); - inode_unlock(dest); - if (error == -EWOULDBLOCK) - goto retry; - return error; - } - - return 0; -} - -/* Unlock both inodes after they've been prepped for a range clone. */ -void -xfs_reflink_remap_unlock( - struct file *file_in, - struct file *file_out) -{ - struct inode *inode_in = file_inode(file_in); - struct xfs_inode *src = XFS_I(inode_in); - struct inode *inode_out = file_inode(file_out); - struct xfs_inode *dest = XFS_I(inode_out); - bool same_inode = (inode_in == inode_out); - - xfs_iunlock(dest, XFS_MMAPLOCK_EXCL); - if (!same_inode) - xfs_iunlock(src, XFS_MMAPLOCK_EXCL); - inode_unlock(inode_out); - if (!same_inode) - inode_unlock(inode_in); -} - -/* * If we're reflinking to a point past the destination file's EOF, we must * zero any speculative post-EOF preallocations that sit between the old EOF * and the destination file offset. @@ -1314,18 +1291,12 @@ xfs_reflink_remap_prep( struct xfs_inode *src = XFS_I(inode_in); struct inode *inode_out = file_inode(file_out); struct xfs_inode *dest = XFS_I(inode_out); - bool same_inode = (inode_in == inode_out); - ssize_t ret; + int ret; /* Lock both files against IO */ - ret = xfs_iolock_two_inodes_and_break_layout(inode_in, inode_out); + ret = xfs_ilock2_io_mmap(src, dest); if (ret) return ret; - if (same_inode) - xfs_ilock(src, XFS_MMAPLOCK_EXCL); - else - xfs_lock_two_inodes(src, XFS_MMAPLOCK_EXCL, dest, - XFS_MMAPLOCK_EXCL); /* Check file eligibility and prepare for block sharing. */ ret = -EINVAL; @@ -1339,7 +1310,7 @@ xfs_reflink_remap_prep( ret = generic_remap_file_range_prep(file_in, pos_in, file_out, pos_out, len, remap_flags); - if (ret < 0 || *len == 0) + if (ret || *len == 0) goto out_unlock; /* Attach dquots to dest inode before changing block map */ @@ -1374,9 +1345,9 @@ xfs_reflink_remap_prep( if (ret) goto out_unlock; - return 1; + return 0; out_unlock: - xfs_reflink_remap_unlock(file_in, file_out); + xfs_iunlock2_io_mmap(src, dest); return ret; } diff --git a/fs/xfs/xfs_reflink.h b/fs/xfs/xfs_reflink.h index 3e4fd46373ab..487b00434b96 100644 --- a/fs/xfs/xfs_reflink.h +++ b/fs/xfs/xfs_reflink.h @@ -56,7 +56,5 @@ extern int xfs_reflink_remap_blocks(struct xfs_inode *src, loff_t pos_in, loff_t *remapped); extern int xfs_reflink_update_dest(struct xfs_inode *dest, xfs_off_t newlen, xfs_extlen_t cowextsize, unsigned int remap_flags); -extern void xfs_reflink_remap_unlock(struct file *file_in, - struct file *file_out); #endif /* __XFS_REFLINK_H */ diff --git a/fs/xfs/xfs_rmap_item.c b/fs/xfs/xfs_rmap_item.c index a86599db20a6..dc5b0753cd51 100644 --- a/fs/xfs/xfs_rmap_item.c +++ b/fs/xfs/xfs_rmap_item.c @@ -141,7 +141,8 @@ xfs_rui_init( if (nextents > XFS_RUI_MAX_FAST_EXTENTS) ruip = kmem_zalloc(xfs_rui_log_item_sizeof(nextents), 0); else - ruip = kmem_zone_zalloc(xfs_rui_zone, 0); + ruip = kmem_cache_zalloc(xfs_rui_zone, + GFP_KERNEL | __GFP_NOFAIL); xfs_log_item_init(mp, &ruip->rui_item, XFS_LI_RUI, &xfs_rui_item_ops); ruip->rui_format.rui_nextents = nextents; @@ -243,7 +244,7 @@ xfs_trans_get_rud( { struct xfs_rud_log_item *rudp; - rudp = kmem_zone_zalloc(xfs_rud_zone, 0); + rudp = kmem_cache_zalloc(xfs_rud_zone, GFP_KERNEL | __GFP_NOFAIL); xfs_log_item_init(tp->t_mountp, &rudp->rud_item, XFS_LI_RUD, &xfs_rud_item_ops); rudp->rud_ruip = ruip; diff --git a/fs/xfs/xfs_super.c b/fs/xfs/xfs_super.c index 379cbff438bc..71ac6c1cdc36 100644 --- a/fs/xfs/xfs_super.c +++ b/fs/xfs/xfs_super.c @@ -890,9 +890,6 @@ xfs_quiesce_attr( /* force the log to unpin objects from the now complete transactions */ xfs_log_force(mp, XFS_LOG_SYNC); - /* reclaim inodes to do any IO before the freeze completes */ - xfs_reclaim_inodes(mp, 0); - xfs_reclaim_inodes(mp, SYNC_WAIT); /* Push the superblock and write an unmount record */ error = xfs_log_sbcount(mp); @@ -913,11 +910,21 @@ xfs_fs_freeze( struct super_block *sb) { struct xfs_mount *mp = XFS_M(sb); + unsigned int flags; + int ret; + /* + * The filesystem is now frozen far enough that memory reclaim + * cannot safely operate on the filesystem. Hence we need to + * set a GFP_NOFS context here to avoid recursion deadlocks. + */ + flags = memalloc_nofs_save(); xfs_stop_block_reaping(mp); xfs_save_resvblks(mp); xfs_quiesce_attr(mp); - return xfs_sync_sb(mp, true); + ret = xfs_sync_sb(mp, true); + memalloc_nofs_restore(flags); + return ret; } STATIC int @@ -1714,6 +1721,10 @@ xfs_fc_reconfigure( int flags = fc->sb_flags; int error; + /* version 5 superblocks always support version counters. */ + if (XFS_SB_VERSION_NUM(&mp->m_sb) == XFS_SB_VERSION_5) + fc->sb_flags |= SB_I_VERSION; + error = xfs_fc_validate_params(new_mp); if (error) return error; diff --git a/fs/xfs/xfs_sysfs.h b/fs/xfs/xfs_sysfs.h index e9f810fc6731..43585850f154 100644 --- a/fs/xfs/xfs_sysfs.h +++ b/fs/xfs/xfs_sysfs.h @@ -32,9 +32,11 @@ xfs_sysfs_init( struct xfs_kobj *parent_kobj, const char *name) { + struct kobject *parent; + + parent = parent_kobj ? &parent_kobj->kobject : NULL; init_completion(&kobj->complete); - return kobject_init_and_add(&kobj->kobject, ktype, - &parent_kobj->kobject, "%s", name); + return kobject_init_and_add(&kobj->kobject, ktype, parent, "%s", name); } static inline void diff --git a/fs/xfs/xfs_trace.h b/fs/xfs/xfs_trace.h index 460136628a79..abb1d859f226 100644 --- a/fs/xfs/xfs_trace.h +++ b/fs/xfs/xfs_trace.h @@ -36,6 +36,7 @@ struct xfs_owner_info; struct xfs_trans_res; struct xfs_inobt_rec_incore; union xfs_btree_ptr; +struct xfs_dqtrx; #define XFS_ATTR_FILTER_FLAGS \ { XFS_ATTR_ROOT, "ROOT" }, \ @@ -864,44 +865,65 @@ DECLARE_EVENT_CLASS(xfs_dquot_class, TP_STRUCT__entry( __field(dev_t, dev) __field(u32, id) + __field(xfs_dqtype_t, type) __field(unsigned, flags) __field(unsigned, nrefs) __field(unsigned long long, res_bcount) + __field(unsigned long long, res_rtbcount) + __field(unsigned long long, res_icount) + __field(unsigned long long, bcount) + __field(unsigned long long, rtbcount) __field(unsigned long long, icount) + __field(unsigned long long, blk_hardlimit) __field(unsigned long long, blk_softlimit) + __field(unsigned long long, rtb_hardlimit) + __field(unsigned long long, rtb_softlimit) __field(unsigned long long, ino_hardlimit) __field(unsigned long long, ino_softlimit) - ), \ + ), TP_fast_assign( __entry->dev = dqp->q_mount->m_super->s_dev; - __entry->id = be32_to_cpu(dqp->q_core.d_id); - __entry->flags = dqp->dq_flags; + __entry->id = dqp->q_id; + __entry->type = dqp->q_type; + __entry->flags = dqp->q_flags; __entry->nrefs = dqp->q_nrefs; - __entry->res_bcount = dqp->q_res_bcount; - __entry->bcount = be64_to_cpu(dqp->q_core.d_bcount); - __entry->icount = be64_to_cpu(dqp->q_core.d_icount); - __entry->blk_hardlimit = - be64_to_cpu(dqp->q_core.d_blk_hardlimit); - __entry->blk_softlimit = - be64_to_cpu(dqp->q_core.d_blk_softlimit); - __entry->ino_hardlimit = - be64_to_cpu(dqp->q_core.d_ino_hardlimit); - __entry->ino_softlimit = - be64_to_cpu(dqp->q_core.d_ino_softlimit); - ), - TP_printk("dev %d:%d id 0x%x flags %s nrefs %u res_bc 0x%llx " + + __entry->res_bcount = dqp->q_blk.reserved; + __entry->res_rtbcount = dqp->q_rtb.reserved; + __entry->res_icount = dqp->q_ino.reserved; + + __entry->bcount = dqp->q_blk.count; + __entry->rtbcount = dqp->q_rtb.count; + __entry->icount = dqp->q_ino.count; + + __entry->blk_hardlimit = dqp->q_blk.hardlimit; + __entry->blk_softlimit = dqp->q_blk.softlimit; + __entry->rtb_hardlimit = dqp->q_rtb.hardlimit; + __entry->rtb_softlimit = dqp->q_rtb.softlimit; + __entry->ino_hardlimit = dqp->q_ino.hardlimit; + __entry->ino_softlimit = dqp->q_ino.softlimit; + ), + TP_printk("dev %d:%d id 0x%x type %s flags %s nrefs %u " + "res_bc 0x%llx res_rtbc 0x%llx res_ic 0x%llx " "bcnt 0x%llx bhardlimit 0x%llx bsoftlimit 0x%llx " + "rtbcnt 0x%llx rtbhardlimit 0x%llx rtbsoftlimit 0x%llx " "icnt 0x%llx ihardlimit 0x%llx isoftlimit 0x%llx]", MAJOR(__entry->dev), MINOR(__entry->dev), __entry->id, - __print_flags(__entry->flags, "|", XFS_DQ_FLAGS), + __print_flags(__entry->type, "|", XFS_DQTYPE_STRINGS), + __print_flags(__entry->flags, "|", XFS_DQFLAG_STRINGS), __entry->nrefs, __entry->res_bcount, + __entry->res_rtbcount, + __entry->res_icount, __entry->bcount, __entry->blk_hardlimit, __entry->blk_softlimit, + __entry->rtbcount, + __entry->rtb_hardlimit, + __entry->rtb_softlimit, __entry->icount, __entry->ino_hardlimit, __entry->ino_softlimit) @@ -932,6 +954,125 @@ DEFINE_DQUOT_EVENT(xfs_dqrele); DEFINE_DQUOT_EVENT(xfs_dqflush); DEFINE_DQUOT_EVENT(xfs_dqflush_force); DEFINE_DQUOT_EVENT(xfs_dqflush_done); +DEFINE_DQUOT_EVENT(xfs_trans_apply_dquot_deltas_before); +DEFINE_DQUOT_EVENT(xfs_trans_apply_dquot_deltas_after); + +#define XFS_QMOPT_FLAGS \ + { XFS_QMOPT_UQUOTA, "UQUOTA" }, \ + { XFS_QMOPT_PQUOTA, "PQUOTA" }, \ + { XFS_QMOPT_FORCE_RES, "FORCE_RES" }, \ + { XFS_QMOPT_SBVERSION, "SBVERSION" }, \ + { XFS_QMOPT_GQUOTA, "GQUOTA" }, \ + { XFS_QMOPT_INHERIT, "INHERIT" }, \ + { XFS_QMOPT_RES_REGBLKS, "RES_REGBLKS" }, \ + { XFS_QMOPT_RES_RTBLKS, "RES_RTBLKS" }, \ + { XFS_QMOPT_BCOUNT, "BCOUNT" }, \ + { XFS_QMOPT_ICOUNT, "ICOUNT" }, \ + { XFS_QMOPT_RTBCOUNT, "RTBCOUNT" }, \ + { XFS_QMOPT_DELBCOUNT, "DELBCOUNT" }, \ + { XFS_QMOPT_DELRTBCOUNT, "DELRTBCOUNT" }, \ + { XFS_QMOPT_RES_INOS, "RES_INOS" } + +TRACE_EVENT(xfs_trans_mod_dquot, + TP_PROTO(struct xfs_trans *tp, struct xfs_dquot *dqp, + unsigned int field, int64_t delta), + TP_ARGS(tp, dqp, field, delta), + TP_STRUCT__entry( + __field(dev_t, dev) + __field(xfs_dqtype_t, type) + __field(unsigned int, flags) + __field(unsigned int, dqid) + __field(unsigned int, field) + __field(int64_t, delta) + ), + TP_fast_assign( + __entry->dev = tp->t_mountp->m_super->s_dev; + __entry->type = dqp->q_type; + __entry->flags = dqp->q_flags; + __entry->dqid = dqp->q_id; + __entry->field = field; + __entry->delta = delta; + ), + TP_printk("dev %d:%d dquot id 0x%x type %s flags %s field %s delta %lld", + MAJOR(__entry->dev), MINOR(__entry->dev), + __entry->dqid, + __print_flags(__entry->type, "|", XFS_DQTYPE_STRINGS), + __print_flags(__entry->flags, "|", XFS_DQFLAG_STRINGS), + __print_flags(__entry->field, "|", XFS_QMOPT_FLAGS), + __entry->delta) +); + +DECLARE_EVENT_CLASS(xfs_dqtrx_class, + TP_PROTO(struct xfs_dqtrx *qtrx), + TP_ARGS(qtrx), + TP_STRUCT__entry( + __field(dev_t, dev) + __field(xfs_dqtype_t, type) + __field(unsigned int, flags) + __field(u32, dqid) + + __field(uint64_t, blk_res) + __field(int64_t, bcount_delta) + __field(int64_t, delbcnt_delta) + + __field(uint64_t, rtblk_res) + __field(uint64_t, rtblk_res_used) + __field(int64_t, rtbcount_delta) + __field(int64_t, delrtb_delta) + + __field(uint64_t, ino_res) + __field(uint64_t, ino_res_used) + __field(int64_t, icount_delta) + ), + TP_fast_assign( + __entry->dev = qtrx->qt_dquot->q_mount->m_super->s_dev; + __entry->type = qtrx->qt_dquot->q_type; + __entry->flags = qtrx->qt_dquot->q_flags; + __entry->dqid = qtrx->qt_dquot->q_id; + + __entry->blk_res = qtrx->qt_blk_res; + __entry->bcount_delta = qtrx->qt_bcount_delta; + __entry->delbcnt_delta = qtrx->qt_delbcnt_delta; + + __entry->rtblk_res = qtrx->qt_rtblk_res; + __entry->rtblk_res_used = qtrx->qt_rtblk_res_used; + __entry->rtbcount_delta = qtrx->qt_rtbcount_delta; + __entry->delrtb_delta = qtrx->qt_delrtb_delta; + + __entry->ino_res = qtrx->qt_ino_res; + __entry->ino_res_used = qtrx->qt_ino_res_used; + __entry->icount_delta = qtrx->qt_icount_delta; + ), + TP_printk("dev %d:%d dquot id 0x%x type %s flags %s" + "blk_res %llu bcount_delta %lld delbcnt_delta %lld " + "rtblk_res %llu rtblk_res_used %llu rtbcount_delta %lld delrtb_delta %lld " + "ino_res %llu ino_res_used %llu icount_delta %lld", + MAJOR(__entry->dev), MINOR(__entry->dev), + __entry->dqid, + __print_flags(__entry->type, "|", XFS_DQTYPE_STRINGS), + __print_flags(__entry->flags, "|", XFS_DQFLAG_STRINGS), + + __entry->blk_res, + __entry->bcount_delta, + __entry->delbcnt_delta, + + __entry->rtblk_res, + __entry->rtblk_res_used, + __entry->rtbcount_delta, + __entry->delrtb_delta, + + __entry->ino_res, + __entry->ino_res_used, + __entry->icount_delta) +) + +#define DEFINE_DQTRX_EVENT(name) \ +DEFINE_EVENT(xfs_dqtrx_class, name, \ + TP_PROTO(struct xfs_dqtrx *qtrx), \ + TP_ARGS(qtrx)) +DEFINE_DQTRX_EVENT(xfs_trans_apply_dquot_deltas); +DEFINE_DQTRX_EVENT(xfs_trans_mod_dquot_before); +DEFINE_DQTRX_EVENT(xfs_trans_mod_dquot_after); DECLARE_EVENT_CLASS(xfs_loggrant_class, TP_PROTO(struct xlog *log, struct xlog_ticket *tic), @@ -3052,8 +3193,7 @@ DEFINE_EVENT(xfs_inode_irec_class, name, \ DEFINE_INODE_EVENT(xfs_reflink_set_inode_flag); DEFINE_INODE_EVENT(xfs_reflink_unset_inode_flag); DEFINE_ITRUNC_EVENT(xfs_reflink_update_inode_size); -DEFINE_IMAP_EVENT(xfs_reflink_remap_imap); -TRACE_EVENT(xfs_reflink_remap_blocks_loop, +TRACE_EVENT(xfs_reflink_remap_blocks, TP_PROTO(struct xfs_inode *src, xfs_fileoff_t soffset, xfs_filblks_t len, struct xfs_inode *dest, xfs_fileoff_t doffset), @@ -3084,59 +3224,14 @@ TRACE_EVENT(xfs_reflink_remap_blocks_loop, __entry->dest_ino, __entry->dest_lblk) ); -TRACE_EVENT(xfs_reflink_punch_range, - TP_PROTO(struct xfs_inode *ip, xfs_fileoff_t lblk, - xfs_extlen_t len), - TP_ARGS(ip, lblk, len), - TP_STRUCT__entry( - __field(dev_t, dev) - __field(xfs_ino_t, ino) - __field(xfs_fileoff_t, lblk) - __field(xfs_extlen_t, len) - ), - TP_fast_assign( - __entry->dev = VFS_I(ip)->i_sb->s_dev; - __entry->ino = ip->i_ino; - __entry->lblk = lblk; - __entry->len = len; - ), - TP_printk("dev %d:%d ino 0x%llx lblk 0x%llx len 0x%x", - MAJOR(__entry->dev), MINOR(__entry->dev), - __entry->ino, - __entry->lblk, - __entry->len) -); -TRACE_EVENT(xfs_reflink_remap, - TP_PROTO(struct xfs_inode *ip, xfs_fileoff_t lblk, - xfs_extlen_t len, xfs_fsblock_t new_pblk), - TP_ARGS(ip, lblk, len, new_pblk), - TP_STRUCT__entry( - __field(dev_t, dev) - __field(xfs_ino_t, ino) - __field(xfs_fileoff_t, lblk) - __field(xfs_extlen_t, len) - __field(xfs_fsblock_t, new_pblk) - ), - TP_fast_assign( - __entry->dev = VFS_I(ip)->i_sb->s_dev; - __entry->ino = ip->i_ino; - __entry->lblk = lblk; - __entry->len = len; - __entry->new_pblk = new_pblk; - ), - TP_printk("dev %d:%d ino 0x%llx lblk 0x%llx len 0x%x new_pblk %llu", - MAJOR(__entry->dev), MINOR(__entry->dev), - __entry->ino, - __entry->lblk, - __entry->len, - __entry->new_pblk) -); DEFINE_DOUBLE_IO_EVENT(xfs_reflink_remap_range); DEFINE_INODE_ERROR_EVENT(xfs_reflink_remap_range_error); DEFINE_INODE_ERROR_EVENT(xfs_reflink_set_inode_flag_error); DEFINE_INODE_ERROR_EVENT(xfs_reflink_update_inode_size_error); DEFINE_INODE_ERROR_EVENT(xfs_reflink_remap_blocks_error); DEFINE_INODE_ERROR_EVENT(xfs_reflink_remap_extent_error); +DEFINE_INODE_IREC_EVENT(xfs_reflink_remap_extent_src); +DEFINE_INODE_IREC_EVENT(xfs_reflink_remap_extent_dest); /* dedupe tracepoints */ DEFINE_DOUBLE_IO_EVENT(xfs_reflink_compare_extents); @@ -3582,7 +3677,6 @@ DEFINE_KMEM_EVENT(kmem_alloc); DEFINE_KMEM_EVENT(kmem_alloc_io); DEFINE_KMEM_EVENT(kmem_alloc_large); DEFINE_KMEM_EVENT(kmem_realloc); -DEFINE_KMEM_EVENT(kmem_zone_alloc); TRACE_EVENT(xfs_check_new_dalign, TP_PROTO(struct xfs_mount *mp, int new_dalign, xfs_ino_t calc_rootino), diff --git a/fs/xfs/xfs_trans.c b/fs/xfs/xfs_trans.c index 3c94e5ff4316..ed72867b1a19 100644 --- a/fs/xfs/xfs_trans.c +++ b/fs/xfs/xfs_trans.c @@ -90,7 +90,7 @@ xfs_trans_dup( trace_xfs_trans_dup(tp, _RET_IP_); - ntp = kmem_zone_zalloc(xfs_trans_zone, 0); + ntp = kmem_cache_zalloc(xfs_trans_zone, GFP_KERNEL | __GFP_NOFAIL); /* * Initialize the new transaction structure. @@ -107,7 +107,8 @@ xfs_trans_dup( ntp->t_flags = XFS_TRANS_PERM_LOG_RES | (tp->t_flags & XFS_TRANS_RESERVE) | - (tp->t_flags & XFS_TRANS_NO_WRITECOUNT); + (tp->t_flags & XFS_TRANS_NO_WRITECOUNT) | + (tp->t_flags & XFS_TRANS_RES_FDBLKS); /* We gave our writer reference to the new transaction */ tp->t_flags |= XFS_TRANS_NO_WRITECOUNT; ntp->t_ticket = xfs_log_ticket_get(tp->t_ticket); @@ -262,7 +263,7 @@ xfs_trans_alloc( * GFP_NOFS allocation context so that we avoid lockdep false positives * by doing GFP_KERNEL allocations inside sb_start_intwrite(). */ - tp = kmem_zone_zalloc(xfs_trans_zone, 0); + tp = kmem_cache_zalloc(xfs_trans_zone, GFP_KERNEL | __GFP_NOFAIL); if (!(flags & XFS_TRANS_NO_WRITECOUNT)) sb_start_intwrite(mp->m_super); @@ -272,6 +273,8 @@ xfs_trans_alloc( */ WARN_ON(resp->tr_logres > 0 && mp->m_super->s_writers.frozen == SB_FREEZE_COMPLETE); + ASSERT(!(flags & XFS_TRANS_RES_FDBLKS) || + xfs_sb_version_haslazysbcount(&mp->m_sb)); tp->t_magic = XFS_TRANS_HEADER_MAGIC; tp->t_flags = flags; @@ -365,6 +368,20 @@ xfs_trans_mod_sb( tp->t_blk_res_used += (uint)-delta; if (tp->t_blk_res_used > tp->t_blk_res) xfs_force_shutdown(mp, SHUTDOWN_CORRUPT_INCORE); + } else if (delta > 0 && (tp->t_flags & XFS_TRANS_RES_FDBLKS)) { + int64_t blkres_delta; + + /* + * Return freed blocks directly to the reservation + * instead of the global pool, being careful not to + * overflow the trans counter. This is used to preserve + * reservation across chains of transaction rolls that + * repeatedly free and allocate blocks. + */ + blkres_delta = min_t(int64_t, delta, + UINT_MAX - tp->t_blk_res); + tp->t_blk_res += blkres_delta; + delta -= blkres_delta; } tp->t_fdblocks_delta += delta; if (xfs_sb_version_haslazysbcount(&mp->m_sb)) diff --git a/fs/xfs/xfs_trans.h b/fs/xfs/xfs_trans.h index 8308bf6d7e40..b752501818d2 100644 --- a/fs/xfs/xfs_trans.h +++ b/fs/xfs/xfs_trans.h @@ -37,10 +37,6 @@ struct xfs_log_item { unsigned long li_flags; /* misc flags */ struct xfs_buf *li_buf; /* real buffer pointer */ struct list_head li_bio_list; /* buffer item list */ - void (*li_cb)(struct xfs_buf *, - struct xfs_log_item *); - /* buffer item iodone */ - /* callback func */ const struct xfs_item_ops *li_ops; /* function list */ /* delayed logging */ @@ -78,7 +74,6 @@ struct xfs_item_ops { void (*iop_committing)(struct xfs_log_item *, xfs_lsn_t commit_lsn); void (*iop_release)(struct xfs_log_item *); xfs_lsn_t (*iop_committed)(struct xfs_log_item *, xfs_lsn_t); - void (*iop_error)(struct xfs_log_item *, xfs_buf_t *); int (*iop_recover)(struct xfs_log_item *lip, struct xfs_trans *tp); bool (*iop_match)(struct xfs_log_item *item, uint64_t id); }; diff --git a/fs/xfs/xfs_trans_ail.c b/fs/xfs/xfs_trans_ail.c index ac5019361a13..dbb69b4bf3ed 100644 --- a/fs/xfs/xfs_trans_ail.c +++ b/fs/xfs/xfs_trans_ail.c @@ -377,8 +377,12 @@ xfsaild_resubmit_item( } /* protected by ail_lock */ - list_for_each_entry(lip, &bp->b_li_list, li_bio_list) - xfs_clear_li_failed(lip); + list_for_each_entry(lip, &bp->b_li_list, li_bio_list) { + if (bp->b_flags & _XBF_INODES) + clear_bit(XFS_LI_FAILED, &lip->li_flags); + else + xfs_clear_li_failed(lip); + } xfs_buf_unlock(bp); return XFS_ITEM_SUCCESS; @@ -444,16 +448,10 @@ xfsaild_push( target = ailp->ail_target; ailp->ail_target_prev = target; + /* we're done if the AIL is empty or our push has reached the end */ lip = xfs_trans_ail_cursor_first(ailp, &cur, ailp->ail_last_pushed_lsn); - if (!lip) { - /* - * If the AIL is empty or our push has reached the end we are - * done now. - */ - xfs_trans_ail_cursor_done(&cur); - spin_unlock(&ailp->ail_lock); + if (!lip) goto out_done; - } XFS_STATS_INC(mp, xs_push_ail); @@ -482,7 +480,7 @@ xfsaild_push( * inode buffer is locked because we already pushed the * updates to it as part of inode clustering. * - * We do not want to to stop flushing just because lots + * We do not want to stop flushing just because lots * of items are already being flushed, but we need to * re-try the flushing relatively soon if most of the * AIL is being flushed. @@ -517,7 +515,7 @@ xfsaild_push( /* * Are there too many items we can't do anything with? * - * If we we are skipping too many items because we can't flush + * If we are skipping too many items because we can't flush * them or they are already being flushed, we back off and * given them time to complete whatever operation is being * done. i.e. remove pressure from the AIL while we can't make @@ -535,6 +533,8 @@ xfsaild_push( break; lsn = lip->li_lsn; } + +out_done: xfs_trans_ail_cursor_done(&cur); spin_unlock(&ailp->ail_lock); @@ -542,7 +542,6 @@ xfsaild_push( ailp->ail_log_flush++; if (!count || XFS_LSN_CMP(lsn, target) >= 0) { -out_done: /* * We reached the target or the AIL is empty, so wait a bit * longer for I/O to complete and remove pushed items from the @@ -634,7 +633,8 @@ xfsaild( */ smp_rmb(); if (!xfs_ail_min(ailp) && - ailp->ail_target == ailp->ail_target_prev) { + ailp->ail_target == ailp->ail_target_prev && + list_empty(&ailp->ail_buf_list)) { spin_unlock(&ailp->ail_lock); freezable_schedule(); tout = 0; @@ -843,7 +843,6 @@ xfs_ail_delete_one( trace_xfs_ail_delete(lip, mlip->li_lsn, lip->li_lsn); xfs_ail_delete(ailp, lip); - xfs_clear_li_failed(lip); clear_bit(XFS_LI_IN_AIL, &lip->li_flags); lip->li_lsn = 0; @@ -874,6 +873,7 @@ xfs_trans_ail_delete( } /* xfs_ail_update_finish() drops the AIL lock */ + xfs_clear_li_failed(lip); tail_lsn = xfs_ail_delete_one(ailp, lip); xfs_ail_update_finish(ailp, tail_lsn); } diff --git a/fs/xfs/xfs_trans_buf.c b/fs/xfs/xfs_trans_buf.c index 08174ffa2118..11cd666cd99a 100644 --- a/fs/xfs/xfs_trans_buf.c +++ b/fs/xfs/xfs_trans_buf.c @@ -465,24 +465,16 @@ xfs_trans_dirty_buf( ASSERT(bp->b_transp == tp); ASSERT(bip != NULL); - ASSERT(bp->b_iodone == NULL || - bp->b_iodone == xfs_buf_iodone_callbacks); /* * Mark the buffer as needing to be written out eventually, * and set its iodone function to remove the buffer's buf log * item from the AIL and free it when the buffer is flushed - * to disk. See xfs_buf_attach_iodone() for more details - * on li_cb and xfs_buf_iodone_callbacks(). - * If we end up aborting this transaction, we trap this buffer - * inside the b_bdstrat callback so that this won't get written to - * disk. + * to disk. */ bp->b_flags |= XBF_DONE; ASSERT(atomic_read(&bip->bli_refcount) > 0); - bp->b_iodone = xfs_buf_iodone_callbacks; - bip->bli_item.li_cb = xfs_buf_iodone; /* * If we invalidated the buffer within this transaction, then @@ -626,6 +618,7 @@ xfs_trans_inode_buf( ASSERT(atomic_read(&bip->bli_refcount) > 0); bip->bli_flags |= XFS_BLI_INODE_BUF; + bp->b_flags |= _XBF_INODES; xfs_trans_buf_set_type(tp, bp, XFS_BLFT_DINO_BUF); } @@ -650,7 +643,7 @@ xfs_trans_stale_inode_buf( ASSERT(atomic_read(&bip->bli_refcount) > 0); bip->bli_flags |= XFS_BLI_STALE_INODE; - bip->bli_item.li_cb = xfs_buf_iodone; + bp->b_flags |= _XBF_INODES; xfs_trans_buf_set_type(tp, bp, XFS_BLFT_DINO_BUF); } @@ -675,6 +668,7 @@ xfs_trans_inode_alloc_buf( ASSERT(atomic_read(&bip->bli_refcount) > 0); bip->bli_flags |= XFS_BLI_INODE_ALLOC_BUF; + bp->b_flags |= _XBF_INODES; xfs_trans_buf_set_type(tp, bp, XFS_BLFT_DINO_BUF); } @@ -785,5 +779,6 @@ xfs_trans_dquot_buf( break; } + bp->b_flags |= _XBF_DQUOTS; xfs_trans_buf_set_type(tp, bp, type); } diff --git a/fs/xfs/xfs_trans_dquot.c b/fs/xfs/xfs_trans_dquot.c index c0f73b82c055..c6ba7ef18e06 100644 --- a/fs/xfs/xfs_trans_dquot.c +++ b/fs/xfs/xfs_trans_dquot.c @@ -15,6 +15,7 @@ #include "xfs_trans_priv.h" #include "xfs_quota.h" #include "xfs_qm.h" +#include "xfs_trace.h" STATIC void xfs_trans_alloc_dqinfo(xfs_trans_t *); @@ -155,14 +156,19 @@ xfs_trans_get_dqtrx( int i; struct xfs_dqtrx *qa; - if (XFS_QM_ISUDQ(dqp)) + switch (xfs_dquot_type(dqp)) { + case XFS_DQTYPE_USER: qa = tp->t_dqinfo->dqs[XFS_QM_TRANS_USR]; - else if (XFS_QM_ISGDQ(dqp)) + break; + case XFS_DQTYPE_GROUP: qa = tp->t_dqinfo->dqs[XFS_QM_TRANS_GRP]; - else if (XFS_QM_ISPDQ(dqp)) + break; + case XFS_DQTYPE_PROJ: qa = tp->t_dqinfo->dqs[XFS_QM_TRANS_PRJ]; - else + break; + default: return NULL; + } for (i = 0; i < XFS_QM_TRANS_MAXDQS; i++) { if (qa[i].qt_dquot == NULL || @@ -203,6 +209,11 @@ xfs_trans_mod_dquot( if (qtrx->qt_dquot == NULL) qtrx->qt_dquot = dqp; + if (delta) { + trace_xfs_trans_mod_dquot_before(qtrx); + trace_xfs_trans_mod_dquot(tp, dqp, field, delta); + } + switch (field) { /* @@ -266,6 +277,10 @@ xfs_trans_mod_dquot( default: ASSERT(0); } + + if (delta) + trace_xfs_trans_mod_dquot_after(qtrx); + tp->t_flags |= XFS_TRANS_DQ_DIRTY; } @@ -293,6 +308,37 @@ xfs_trans_dqlockedjoin( } } +/* Apply dqtrx changes to the quota reservation counters. */ +static inline void +xfs_apply_quota_reservation_deltas( + struct xfs_dquot_res *res, + uint64_t reserved, + int64_t res_used, + int64_t count_delta) +{ + if (reserved != 0) { + /* + * Subtle math here: If reserved > res_used (the normal case), + * we're simply subtracting the unused transaction quota + * reservation from the dquot reservation. + * + * If, however, res_used > reserved, then we have allocated + * more quota blocks than were reserved for the transaction. + * We must add that excess to the dquot reservation since it + * tracks (usage + resv) and by definition we didn't reserve + * that excess. + */ + res->reserved -= abs(reserved - res_used); + } else if (count_delta != 0) { + /* + * These blks were never reserved, either inside a transaction + * or outside one (in a delayed allocation). Also, this isn't + * always a negative number since we sometimes deliberately + * skip quota reservations. + */ + res->reserved += count_delta; + } +} /* * Called by xfs_trans_commit() and similar in spirit to @@ -309,7 +355,6 @@ xfs_trans_apply_dquot_deltas( int i, j; struct xfs_dquot *dqp; struct xfs_dqtrx *qtrx, *qa; - struct xfs_disk_dquot *d; int64_t totalbdelta; int64_t totalrtbdelta; @@ -328,6 +373,8 @@ xfs_trans_apply_dquot_deltas( xfs_trans_dqlockedjoin(tp, qa); for (i = 0; i < XFS_QM_TRANS_MAXDQS; i++) { + uint64_t blk_res_used; + qtrx = &qa[i]; /* * The array of dquots is filled @@ -341,7 +388,6 @@ xfs_trans_apply_dquot_deltas( /* * adjust the actual number of blocks used */ - d = &dqp->q_core; /* * The issue here is - sometimes we don't make a blkquota @@ -360,38 +406,46 @@ xfs_trans_apply_dquot_deltas( qtrx->qt_delbcnt_delta; totalrtbdelta = qtrx->qt_rtbcount_delta + qtrx->qt_delrtb_delta; + + if (totalbdelta != 0 || totalrtbdelta != 0 || + qtrx->qt_icount_delta != 0) { + trace_xfs_trans_apply_dquot_deltas_before(dqp); + trace_xfs_trans_apply_dquot_deltas(qtrx); + } + #ifdef DEBUG if (totalbdelta < 0) - ASSERT(be64_to_cpu(d->d_bcount) >= - -totalbdelta); + ASSERT(dqp->q_blk.count >= -totalbdelta); if (totalrtbdelta < 0) - ASSERT(be64_to_cpu(d->d_rtbcount) >= - -totalrtbdelta); + ASSERT(dqp->q_rtb.count >= -totalrtbdelta); if (qtrx->qt_icount_delta < 0) - ASSERT(be64_to_cpu(d->d_icount) >= - -qtrx->qt_icount_delta); + ASSERT(dqp->q_ino.count >= -qtrx->qt_icount_delta); #endif if (totalbdelta) - be64_add_cpu(&d->d_bcount, (xfs_qcnt_t)totalbdelta); + dqp->q_blk.count += totalbdelta; if (qtrx->qt_icount_delta) - be64_add_cpu(&d->d_icount, (xfs_qcnt_t)qtrx->qt_icount_delta); + dqp->q_ino.count += qtrx->qt_icount_delta; if (totalrtbdelta) - be64_add_cpu(&d->d_rtbcount, (xfs_qcnt_t)totalrtbdelta); + dqp->q_rtb.count += totalrtbdelta; + + if (totalbdelta != 0 || totalrtbdelta != 0 || + qtrx->qt_icount_delta != 0) + trace_xfs_trans_apply_dquot_deltas_after(dqp); /* * Get any default limits in use. * Start/reset the timer(s) if needed. */ - if (d->d_id) { - xfs_qm_adjust_dqlimits(tp->t_mountp, dqp); - xfs_qm_adjust_dqtimers(tp->t_mountp, dqp); + if (dqp->q_id) { + xfs_qm_adjust_dqlimits(dqp); + xfs_qm_adjust_dqtimers(dqp); } - dqp->dq_flags |= XFS_DQ_DIRTY; + dqp->q_flags |= XFS_DQFLAG_DIRTY; /* * add this to the list of items to get logged */ @@ -401,78 +455,31 @@ xfs_trans_apply_dquot_deltas( * In case of delayed allocations, there's no * reservation that a transaction structure knows of. */ - if (qtrx->qt_blk_res != 0) { - uint64_t blk_res_used = 0; - - if (qtrx->qt_bcount_delta > 0) - blk_res_used = qtrx->qt_bcount_delta; - - if (qtrx->qt_blk_res != blk_res_used) { - if (qtrx->qt_blk_res > blk_res_used) - dqp->q_res_bcount -= (xfs_qcnt_t) - (qtrx->qt_blk_res - - blk_res_used); - else - dqp->q_res_bcount -= (xfs_qcnt_t) - (blk_res_used - - qtrx->qt_blk_res); - } - } else { - /* - * These blks were never reserved, either inside - * a transaction or outside one (in a delayed - * allocation). Also, this isn't always a - * negative number since we sometimes - * deliberately skip quota reservations. - */ - if (qtrx->qt_bcount_delta) { - dqp->q_res_bcount += - (xfs_qcnt_t)qtrx->qt_bcount_delta; - } - } + blk_res_used = max_t(int64_t, 0, qtrx->qt_bcount_delta); + xfs_apply_quota_reservation_deltas(&dqp->q_blk, + qtrx->qt_blk_res, blk_res_used, + qtrx->qt_bcount_delta); + /* * Adjust the RT reservation. */ - if (qtrx->qt_rtblk_res != 0) { - if (qtrx->qt_rtblk_res != qtrx->qt_rtblk_res_used) { - if (qtrx->qt_rtblk_res > - qtrx->qt_rtblk_res_used) - dqp->q_res_rtbcount -= (xfs_qcnt_t) - (qtrx->qt_rtblk_res - - qtrx->qt_rtblk_res_used); - else - dqp->q_res_rtbcount -= (xfs_qcnt_t) - (qtrx->qt_rtblk_res_used - - qtrx->qt_rtblk_res); - } - } else { - if (qtrx->qt_rtbcount_delta) - dqp->q_res_rtbcount += - (xfs_qcnt_t)qtrx->qt_rtbcount_delta; - } + xfs_apply_quota_reservation_deltas(&dqp->q_rtb, + qtrx->qt_rtblk_res, + qtrx->qt_rtblk_res_used, + qtrx->qt_rtbcount_delta); /* * Adjust the inode reservation. */ - if (qtrx->qt_ino_res != 0) { - ASSERT(qtrx->qt_ino_res >= - qtrx->qt_ino_res_used); - if (qtrx->qt_ino_res > qtrx->qt_ino_res_used) - dqp->q_res_icount -= (xfs_qcnt_t) - (qtrx->qt_ino_res - - qtrx->qt_ino_res_used); - } else { - if (qtrx->qt_icount_delta) - dqp->q_res_icount += - (xfs_qcnt_t)qtrx->qt_icount_delta; - } - - ASSERT(dqp->q_res_bcount >= - be64_to_cpu(dqp->q_core.d_bcount)); - ASSERT(dqp->q_res_icount >= - be64_to_cpu(dqp->q_core.d_icount)); - ASSERT(dqp->q_res_rtbcount >= - be64_to_cpu(dqp->q_core.d_rtbcount)); + ASSERT(qtrx->qt_ino_res >= qtrx->qt_ino_res_used); + xfs_apply_quota_reservation_deltas(&dqp->q_ino, + qtrx->qt_ino_res, + qtrx->qt_ino_res_used, + qtrx->qt_icount_delta); + + ASSERT(dqp->q_blk.reserved >= dqp->q_blk.count); + ASSERT(dqp->q_ino.reserved >= dqp->q_ino.count); + ASSERT(dqp->q_rtb.reserved >= dqp->q_rtb.count); } } } @@ -516,7 +523,7 @@ xfs_trans_unreserve_and_mod_dquots( if (qtrx->qt_blk_res) { xfs_dqlock(dqp); locked = true; - dqp->q_res_bcount -= + dqp->q_blk.reserved -= (xfs_qcnt_t)qtrx->qt_blk_res; } if (qtrx->qt_ino_res) { @@ -524,7 +531,7 @@ xfs_trans_unreserve_and_mod_dquots( xfs_dqlock(dqp); locked = true; } - dqp->q_res_icount -= + dqp->q_ino.reserved -= (xfs_qcnt_t)qtrx->qt_ino_res; } @@ -533,7 +540,7 @@ xfs_trans_unreserve_and_mod_dquots( xfs_dqlock(dqp); locked = true; } - dqp->q_res_rtbcount -= + dqp->q_rtb.reserved -= (xfs_qcnt_t)qtrx->qt_rtblk_res; } if (locked) @@ -549,21 +556,80 @@ xfs_quota_warn( struct xfs_dquot *dqp, int type) { - enum quota_type qtype; + enum quota_type qtype; - if (dqp->dq_flags & XFS_DQ_PROJ) + switch (xfs_dquot_type(dqp)) { + case XFS_DQTYPE_PROJ: qtype = PRJQUOTA; - else if (dqp->dq_flags & XFS_DQ_USER) + break; + case XFS_DQTYPE_USER: qtype = USRQUOTA; - else + break; + case XFS_DQTYPE_GROUP: qtype = GRPQUOTA; + break; + default: + return; + } - quota_send_warning(make_kqid(&init_user_ns, qtype, - be32_to_cpu(dqp->q_core.d_id)), + quota_send_warning(make_kqid(&init_user_ns, qtype, dqp->q_id), mp->m_super->s_dev, type); } /* + * Decide if we can make an additional reservation against a quota resource. + * Returns an inode QUOTA_NL_ warning code and whether or not it's fatal. + * + * Note that we assume that the numeric difference between the inode and block + * warning codes will always be 3 since it's userspace ABI now, and will never + * decrease the quota reservation, so the *BELOW messages are irrelevant. + */ +static inline int +xfs_dqresv_check( + struct xfs_dquot_res *res, + struct xfs_quota_limits *qlim, + int64_t delta, + bool *fatal) +{ + xfs_qcnt_t hardlimit = res->hardlimit; + xfs_qcnt_t softlimit = res->softlimit; + xfs_qcnt_t total_count = res->reserved + delta; + + BUILD_BUG_ON(QUOTA_NL_BHARDWARN != QUOTA_NL_IHARDWARN + 3); + BUILD_BUG_ON(QUOTA_NL_BSOFTLONGWARN != QUOTA_NL_ISOFTLONGWARN + 3); + BUILD_BUG_ON(QUOTA_NL_BSOFTWARN != QUOTA_NL_ISOFTWARN + 3); + + *fatal = false; + if (delta <= 0) + return QUOTA_NL_NOWARN; + + if (!hardlimit) + hardlimit = qlim->hard; + if (!softlimit) + softlimit = qlim->soft; + + if (hardlimit && total_count > hardlimit) { + *fatal = true; + return QUOTA_NL_IHARDWARN; + } + + if (softlimit && total_count > softlimit) { + time64_t now = ktime_get_real_seconds(); + + if ((res->timer != 0 && now > res->timer) || + (res->warnings != 0 && res->warnings >= qlim->warn)) { + *fatal = true; + return QUOTA_NL_ISOFTLONGWARN; + } + + res->warnings++; + return QUOTA_NL_ISOFTWARN; + } + + return QUOTA_NL_NOWARN; +} + +/* * This reserves disk blocks and inodes against a dquot. * Flags indicate if the dquot is to be locked here and also * if the blk reservation is for RT or regular blocks. @@ -578,110 +644,58 @@ xfs_trans_dqresv( long ninos, uint flags) { - xfs_qcnt_t hardlimit; - xfs_qcnt_t softlimit; - time64_t timer; - xfs_qwarncnt_t warns; - xfs_qwarncnt_t warnlimit; - xfs_qcnt_t total_count; - xfs_qcnt_t *resbcountp; struct xfs_quotainfo *q = mp->m_quotainfo; struct xfs_def_quota *defq; - + struct xfs_dquot_res *blkres; + struct xfs_quota_limits *qlim; xfs_dqlock(dqp); defq = xfs_get_defquota(q, xfs_dquot_type(dqp)); if (flags & XFS_TRANS_DQ_RES_BLKS) { - hardlimit = be64_to_cpu(dqp->q_core.d_blk_hardlimit); - if (!hardlimit) - hardlimit = defq->bhardlimit; - softlimit = be64_to_cpu(dqp->q_core.d_blk_softlimit); - if (!softlimit) - softlimit = defq->bsoftlimit; - timer = be32_to_cpu(dqp->q_core.d_btimer); - warns = be16_to_cpu(dqp->q_core.d_bwarns); - warnlimit = defq->bwarnlimit; - resbcountp = &dqp->q_res_bcount; + blkres = &dqp->q_blk; + qlim = &defq->blk; } else { - ASSERT(flags & XFS_TRANS_DQ_RES_RTBLKS); - hardlimit = be64_to_cpu(dqp->q_core.d_rtb_hardlimit); - if (!hardlimit) - hardlimit = defq->rtbhardlimit; - softlimit = be64_to_cpu(dqp->q_core.d_rtb_softlimit); - if (!softlimit) - softlimit = defq->rtbsoftlimit; - timer = be32_to_cpu(dqp->q_core.d_rtbtimer); - warns = be16_to_cpu(dqp->q_core.d_rtbwarns); - warnlimit = defq->rtbwarnlimit; - resbcountp = &dqp->q_res_rtbcount; + blkres = &dqp->q_rtb; + qlim = &defq->rtb; } - if ((flags & XFS_QMOPT_FORCE_RES) == 0 && - dqp->q_core.d_id && - ((XFS_IS_UQUOTA_ENFORCED(dqp->q_mount) && XFS_QM_ISUDQ(dqp)) || - (XFS_IS_GQUOTA_ENFORCED(dqp->q_mount) && XFS_QM_ISGDQ(dqp)) || - (XFS_IS_PQUOTA_ENFORCED(dqp->q_mount) && XFS_QM_ISPDQ(dqp)))) { - if (nblks > 0) { + if ((flags & XFS_QMOPT_FORCE_RES) == 0 && dqp->q_id && + xfs_dquot_is_enforced(dqp)) { + int quota_nl; + bool fatal; + + /* + * dquot is locked already. See if we'd go over the hardlimit + * or exceed the timelimit if we'd reserve resources. + */ + quota_nl = xfs_dqresv_check(blkres, qlim, nblks, &fatal); + if (quota_nl != QUOTA_NL_NOWARN) { /* - * dquot is locked already. See if we'd go over the - * hardlimit or exceed the timelimit if we allocate - * nblks. + * Quota block warning codes are 3 more than the inode + * codes, which we check above. */ - total_count = *resbcountp + nblks; - if (hardlimit && total_count > hardlimit) { - xfs_quota_warn(mp, dqp, QUOTA_NL_BHARDWARN); + xfs_quota_warn(mp, dqp, quota_nl + 3); + if (fatal) goto error_return; - } - if (softlimit && total_count > softlimit) { - if ((timer != 0 && - ktime_get_real_seconds() > timer) || - (warns != 0 && warns >= warnlimit)) { - xfs_quota_warn(mp, dqp, - QUOTA_NL_BSOFTLONGWARN); - goto error_return; - } - - xfs_quota_warn(mp, dqp, QUOTA_NL_BSOFTWARN); - } } - if (ninos > 0) { - total_count = be64_to_cpu(dqp->q_core.d_icount) + ninos; - timer = be32_to_cpu(dqp->q_core.d_itimer); - warns = be16_to_cpu(dqp->q_core.d_iwarns); - warnlimit = defq->iwarnlimit; - hardlimit = be64_to_cpu(dqp->q_core.d_ino_hardlimit); - if (!hardlimit) - hardlimit = defq->ihardlimit; - softlimit = be64_to_cpu(dqp->q_core.d_ino_softlimit); - if (!softlimit) - softlimit = defq->isoftlimit; - - if (hardlimit && total_count > hardlimit) { - xfs_quota_warn(mp, dqp, QUOTA_NL_IHARDWARN); + + quota_nl = xfs_dqresv_check(&dqp->q_ino, &defq->ino, ninos, + &fatal); + if (quota_nl != QUOTA_NL_NOWARN) { + xfs_quota_warn(mp, dqp, quota_nl); + if (fatal) goto error_return; - } - if (softlimit && total_count > softlimit) { - if ((timer != 0 && - ktime_get_real_seconds() > timer) || - (warns != 0 && warns >= warnlimit)) { - xfs_quota_warn(mp, dqp, - QUOTA_NL_ISOFTLONGWARN); - goto error_return; - } - xfs_quota_warn(mp, dqp, QUOTA_NL_ISOFTWARN); - } } } /* * Change the reservation, but not the actual usage. - * Note that q_res_bcount = q_core.d_bcount + resv + * Note that q_blk.reserved = q_blk.count + resv */ - (*resbcountp) += (xfs_qcnt_t)nblks; - if (ninos != 0) - dqp->q_res_icount += (xfs_qcnt_t)ninos; + blkres->reserved += (xfs_qcnt_t)nblks; + dqp->q_ino.reserved += (xfs_qcnt_t)ninos; /* * note the reservation amt in the trans struct too, @@ -702,16 +716,16 @@ xfs_trans_dqresv( XFS_TRANS_DQ_RES_INOS, ninos); } - ASSERT(dqp->q_res_bcount >= be64_to_cpu(dqp->q_core.d_bcount)); - ASSERT(dqp->q_res_rtbcount >= be64_to_cpu(dqp->q_core.d_rtbcount)); - ASSERT(dqp->q_res_icount >= be64_to_cpu(dqp->q_core.d_icount)); + ASSERT(dqp->q_blk.reserved >= dqp->q_blk.count); + ASSERT(dqp->q_rtb.reserved >= dqp->q_rtb.count); + ASSERT(dqp->q_ino.reserved >= dqp->q_ino.count); xfs_dqunlock(dqp); return 0; error_return: xfs_dqunlock(dqp); - if (XFS_QM_ISPDQ(dqp)) + if (xfs_dquot_type(dqp) == XFS_DQTYPE_PROJ) return -ENOSPC; return -EDQUOT; } @@ -860,7 +874,8 @@ STATIC void xfs_trans_alloc_dqinfo( xfs_trans_t *tp) { - tp->t_dqinfo = kmem_zone_zalloc(xfs_qm_dqtrxzone, 0); + tp->t_dqinfo = kmem_cache_zalloc(xfs_qm_dqtrxzone, + GFP_KERNEL | __GFP_NOFAIL); } void diff --git a/fs/zonefs/super.c b/fs/zonefs/super.c index abfb17f88f9a..8ec7c8f109d7 100644 --- a/fs/zonefs/super.c +++ b/fs/zonefs/super.c @@ -335,7 +335,7 @@ static void zonefs_io_error(struct inode *inode, bool write) struct zonefs_sb_info *sbi = ZONEFS_SB(sb); unsigned int noio_flag; unsigned int nr_zones = - zi->i_max_size >> (sbi->s_zone_sectors_shift + SECTOR_SHIFT); + zi->i_zone_size >> (sbi->s_zone_sectors_shift + SECTOR_SHIFT); struct zonefs_ioerr_data err = { .inode = inode, .write = write, @@ -398,7 +398,7 @@ static int zonefs_file_truncate(struct inode *inode, loff_t isize) goto unlock; ret = blkdev_zone_mgmt(inode->i_sb->s_bdev, op, zi->i_zsector, - zi->i_max_size >> SECTOR_SHIFT, GFP_NOFS); + zi->i_zone_size >> SECTOR_SHIFT, GFP_NOFS); if (ret) { zonefs_err(inode->i_sb, "Zone management operation at %llu failed %d", @@ -786,8 +786,11 @@ static ssize_t zonefs_file_write_iter(struct kiocb *iocb, struct iov_iter *from) if (iocb->ki_pos >= ZONEFS_I(inode)->i_max_size) return -EFBIG; - if (iocb->ki_flags & IOCB_DIRECT) - return zonefs_file_dio_write(iocb, from); + if (iocb->ki_flags & IOCB_DIRECT) { + ssize_t ret = zonefs_file_dio_write(iocb, from); + if (ret != -ENOTBLK) + return ret; + } return zonefs_file_buffered_write(iocb, from); } @@ -1050,14 +1053,16 @@ static void zonefs_init_file_inode(struct inode *inode, struct blk_zone *zone, zi->i_ztype = type; zi->i_zsector = zone->start; + zi->i_zone_size = zone->len << SECTOR_SHIFT; + zi->i_max_size = min_t(loff_t, MAX_LFS_FILESIZE, - zone->len << SECTOR_SHIFT); + zone->capacity << SECTOR_SHIFT); zi->i_wpoffset = zonefs_check_zone_condition(inode, zone, true, true); inode->i_uid = sbi->s_uid; inode->i_gid = sbi->s_gid; inode->i_size = zi->i_wpoffset; - inode->i_blocks = zone->len; + inode->i_blocks = zi->i_max_size >> SECTOR_SHIFT; inode->i_op = &zonefs_file_inode_operations; inode->i_fop = &zonefs_file_operations; @@ -1164,12 +1169,18 @@ static int zonefs_create_zgroup(struct zonefs_zone_data *zd, if (zonefs_zone_type(next) != type) break; zone->len += next->len; + zone->capacity += next->capacity; if (next->cond == BLK_ZONE_COND_READONLY && zone->cond != BLK_ZONE_COND_OFFLINE) zone->cond = BLK_ZONE_COND_READONLY; else if (next->cond == BLK_ZONE_COND_OFFLINE) zone->cond = BLK_ZONE_COND_OFFLINE; } + if (zone->capacity != zone->len) { + zonefs_err(sb, "Invalid conventional zone capacity\n"); + ret = -EINVAL; + goto free; + } } /* diff --git a/fs/zonefs/zonefs.h b/fs/zonefs/zonefs.h index ad17fef7ce91..55b39970acb2 100644 --- a/fs/zonefs/zonefs.h +++ b/fs/zonefs/zonefs.h @@ -56,6 +56,9 @@ struct zonefs_inode_info { /* File maximum size */ loff_t i_max_size; + /* File zone size */ + loff_t i_zone_size; + /* * To serialise fully against both syscall and mmap based IO and * sequential file truncation, two locks are used. For serializing |