diff options
Diffstat (limited to 'fs')
-rw-r--r-- | fs/afs/cmservice.c | 10 | ||||
-rw-r--r-- | fs/afs/dir.c | 89 | ||||
-rw-r--r-- | fs/afs/file.c | 12 | ||||
-rw-r--r-- | fs/afs/vlclient.c | 11 | ||||
-rw-r--r-- | fs/block_dev.c | 64 | ||||
-rw-r--r-- | fs/btrfs/ctree.h | 4 | ||||
-rw-r--r-- | fs/btrfs/disk-io.c | 2 | ||||
-rw-r--r-- | fs/btrfs/extent-tree.c | 71 | ||||
-rw-r--r-- | fs/btrfs/volumes.c | 13 | ||||
-rw-r--r-- | fs/cifs/connect.c | 1 | ||||
-rw-r--r-- | fs/cifs/smb2ops.c | 39 | ||||
-rw-r--r-- | fs/cifs/smb2pdu.c | 7 | ||||
-rw-r--r-- | fs/dax.c | 2 | ||||
-rw-r--r-- | fs/gfs2/bmap.c | 164 | ||||
-rw-r--r-- | fs/io_uring.c | 20 | ||||
-rw-r--r-- | fs/nfs/delegation.c | 25 | ||||
-rw-r--r-- | fs/nfs/delegation.h | 2 | ||||
-rw-r--r-- | fs/nfs/fscache.c | 7 | ||||
-rw-r--r-- | fs/nfs/fscache.h | 2 | ||||
-rw-r--r-- | fs/nfs/nfs4_fs.h | 3 | ||||
-rw-r--r-- | fs/nfs/nfs4client.c | 5 | ||||
-rw-r--r-- | fs/nfs/nfs4proc.c | 109 | ||||
-rw-r--r-- | fs/nfs/nfs4state.c | 49 | ||||
-rw-r--r-- | fs/nfs/pnfs.c | 7 | ||||
-rw-r--r-- | fs/nfs/super.c | 1 | ||||
-rw-r--r-- | fs/seq_file.c | 2 | ||||
-rw-r--r-- | fs/xfs/libxfs/xfs_bmap.c | 29 | ||||
-rw-r--r-- | fs/xfs/libxfs/xfs_da_btree.c | 19 | ||||
-rw-r--r-- | fs/xfs/libxfs/xfs_dir2_node.c | 3 | ||||
-rw-r--r-- | fs/xfs/xfs_log.c | 5 |
30 files changed, 453 insertions, 324 deletions
diff --git a/fs/afs/cmservice.c b/fs/afs/cmservice.c index 4f1b6f466ff5..b86195e4dc6c 100644 --- a/fs/afs/cmservice.c +++ b/fs/afs/cmservice.c @@ -505,18 +505,14 @@ static void SRXAFSCB_ProbeUuid(struct work_struct *work) struct afs_call *call = container_of(work, struct afs_call, work); struct afs_uuid *r = call->request; - struct { - __be32 match; - } reply; - _enter(""); if (memcmp(r, &call->net->uuid, sizeof(call->net->uuid)) == 0) - reply.match = htonl(0); + afs_send_empty_reply(call); else - reply.match = htonl(1); + rxrpc_kernel_abort_call(call->net->socket, call->rxcall, + 1, 1, "K-1"); - afs_send_simple_reply(call, &reply, sizeof(reply)); afs_put_call(call); _leave(""); } diff --git a/fs/afs/dir.c b/fs/afs/dir.c index e640d67274be..81207dc3c997 100644 --- a/fs/afs/dir.c +++ b/fs/afs/dir.c @@ -440,7 +440,7 @@ static int afs_dir_iterate_block(struct afs_vnode *dvnode, * iterate through the data blob that lists the contents of an AFS directory */ static int afs_dir_iterate(struct inode *dir, struct dir_context *ctx, - struct key *key) + struct key *key, afs_dataversion_t *_dir_version) { struct afs_vnode *dvnode = AFS_FS_I(dir); struct afs_xdr_dir_page *dbuf; @@ -460,6 +460,7 @@ static int afs_dir_iterate(struct inode *dir, struct dir_context *ctx, req = afs_read_dir(dvnode, key); if (IS_ERR(req)) return PTR_ERR(req); + *_dir_version = req->data_version; /* round the file position up to the next entry boundary */ ctx->pos += sizeof(union afs_xdr_dirent) - 1; @@ -514,7 +515,10 @@ out: */ static int afs_readdir(struct file *file, struct dir_context *ctx) { - return afs_dir_iterate(file_inode(file), ctx, afs_file_key(file)); + afs_dataversion_t dir_version; + + return afs_dir_iterate(file_inode(file), ctx, afs_file_key(file), + &dir_version); } /* @@ -555,7 +559,8 @@ static int afs_lookup_one_filldir(struct dir_context *ctx, const char *name, * - just returns the FID the dentry name maps to if found */ static int afs_do_lookup_one(struct inode *dir, struct dentry *dentry, - struct afs_fid *fid, struct key *key) + struct afs_fid *fid, struct key *key, + afs_dataversion_t *_dir_version) { struct afs_super_info *as = dir->i_sb->s_fs_info; struct afs_lookup_one_cookie cookie = { @@ -568,7 +573,7 @@ static int afs_do_lookup_one(struct inode *dir, struct dentry *dentry, _enter("{%lu},%p{%pd},", dir->i_ino, dentry, dentry); /* search the directory */ - ret = afs_dir_iterate(dir, &cookie.ctx, key); + ret = afs_dir_iterate(dir, &cookie.ctx, key, _dir_version); if (ret < 0) { _leave(" = %d [iter]", ret); return ret; @@ -642,6 +647,7 @@ static struct inode *afs_do_lookup(struct inode *dir, struct dentry *dentry, struct afs_server *server; struct afs_vnode *dvnode = AFS_FS_I(dir), *vnode; struct inode *inode = NULL, *ti; + afs_dataversion_t data_version = READ_ONCE(dvnode->status.data_version); int ret, i; _enter("{%lu},%p{%pd},", dir->i_ino, dentry, dentry); @@ -669,12 +675,14 @@ static struct inode *afs_do_lookup(struct inode *dir, struct dentry *dentry, cookie->fids[i].vid = as->volume->vid; /* search the directory */ - ret = afs_dir_iterate(dir, &cookie->ctx, key); + ret = afs_dir_iterate(dir, &cookie->ctx, key, &data_version); if (ret < 0) { inode = ERR_PTR(ret); goto out; } + dentry->d_fsdata = (void *)(unsigned long)data_version; + inode = ERR_PTR(-ENOENT); if (!cookie->found) goto out; @@ -968,7 +976,8 @@ static int afs_d_revalidate(struct dentry *dentry, unsigned int flags) struct dentry *parent; struct inode *inode; struct key *key; - long dir_version, de_version; + afs_dataversion_t dir_version; + long de_version; int ret; if (flags & LOOKUP_RCU) @@ -1014,20 +1023,20 @@ static int afs_d_revalidate(struct dentry *dentry, unsigned int flags) * on a 32-bit system, we only have 32 bits in the dentry to store the * version. */ - dir_version = (long)dir->status.data_version; + dir_version = dir->status.data_version; de_version = (long)dentry->d_fsdata; - if (de_version == dir_version) - goto out_valid; + if (de_version == (long)dir_version) + goto out_valid_noupdate; - dir_version = (long)dir->invalid_before; - if (de_version - dir_version >= 0) + dir_version = dir->invalid_before; + if (de_version - (long)dir_version >= 0) goto out_valid; _debug("dir modified"); afs_stat_v(dir, n_reval); /* search the directory for this vnode */ - ret = afs_do_lookup_one(&dir->vfs_inode, dentry, &fid, key); + ret = afs_do_lookup_one(&dir->vfs_inode, dentry, &fid, key, &dir_version); switch (ret) { case 0: /* the filename maps to something */ @@ -1080,7 +1089,8 @@ static int afs_d_revalidate(struct dentry *dentry, unsigned int flags) } out_valid: - dentry->d_fsdata = (void *)dir_version; + dentry->d_fsdata = (void *)(unsigned long)dir_version; +out_valid_noupdate: dput(parent); key_put(key); _leave(" = 1 [valid]"); @@ -1186,6 +1196,20 @@ static void afs_prep_for_new_inode(struct afs_fs_cursor *fc, } /* + * Note that a dentry got changed. We need to set d_fsdata to the data version + * number derived from the result of the operation. It doesn't matter if + * d_fsdata goes backwards as we'll just revalidate. + */ +static void afs_update_dentry_version(struct afs_fs_cursor *fc, + struct dentry *dentry, + struct afs_status_cb *scb) +{ + if (fc->ac.error == 0) + dentry->d_fsdata = + (void *)(unsigned long)scb->status.data_version; +} + +/* * create a directory on an AFS filesystem */ static int afs_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode) @@ -1227,6 +1251,7 @@ static int afs_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode) afs_check_for_remote_deletion(&fc, dvnode); afs_vnode_commit_status(&fc, dvnode, fc.cb_break, &data_version, &scb[0]); + afs_update_dentry_version(&fc, dentry, &scb[0]); afs_vnode_new_inode(&fc, dentry, &iget_data, &scb[1]); ret = afs_end_vnode_operation(&fc); if (ret < 0) @@ -1319,6 +1344,7 @@ static int afs_rmdir(struct inode *dir, struct dentry *dentry) afs_vnode_commit_status(&fc, dvnode, fc.cb_break, &data_version, scb); + afs_update_dentry_version(&fc, dentry, scb); ret = afs_end_vnode_operation(&fc); if (ret == 0) { afs_dir_remove_subdir(dentry); @@ -1458,6 +1484,7 @@ static int afs_unlink(struct inode *dir, struct dentry *dentry) &data_version, &scb[0]); afs_vnode_commit_status(&fc, vnode, fc.cb_break_2, &data_version_2, &scb[1]); + afs_update_dentry_version(&fc, dentry, &scb[0]); ret = afs_end_vnode_operation(&fc); if (ret == 0 && !(scb[1].have_status || scb[1].have_error)) ret = afs_dir_remove_link(dvnode, dentry, key); @@ -1526,6 +1553,7 @@ static int afs_create(struct inode *dir, struct dentry *dentry, umode_t mode, afs_check_for_remote_deletion(&fc, dvnode); afs_vnode_commit_status(&fc, dvnode, fc.cb_break, &data_version, &scb[0]); + afs_update_dentry_version(&fc, dentry, &scb[0]); afs_vnode_new_inode(&fc, dentry, &iget_data, &scb[1]); ret = afs_end_vnode_operation(&fc); if (ret < 0) @@ -1607,6 +1635,7 @@ static int afs_link(struct dentry *from, struct inode *dir, afs_vnode_commit_status(&fc, vnode, fc.cb_break_2, NULL, &scb[1]); ihold(&vnode->vfs_inode); + afs_update_dentry_version(&fc, dentry, &scb[0]); d_instantiate(dentry, &vnode->vfs_inode); mutex_unlock(&vnode->io_lock); @@ -1686,6 +1715,7 @@ static int afs_symlink(struct inode *dir, struct dentry *dentry, afs_check_for_remote_deletion(&fc, dvnode); afs_vnode_commit_status(&fc, dvnode, fc.cb_break, &data_version, &scb[0]); + afs_update_dentry_version(&fc, dentry, &scb[0]); afs_vnode_new_inode(&fc, dentry, &iget_data, &scb[1]); ret = afs_end_vnode_operation(&fc); if (ret < 0) @@ -1791,6 +1821,17 @@ static int afs_rename(struct inode *old_dir, struct dentry *old_dentry, } } + /* This bit is potentially nasty as there's a potential race with + * afs_d_revalidate{,_rcu}(). We have to change d_fsdata on the dentry + * to reflect it's new parent's new data_version after the op, but + * d_revalidate may see old_dentry between the op having taken place + * and the version being updated. + * + * So drop the old_dentry for now to make other threads go through + * lookup instead - which we hold a lock against. + */ + d_drop(old_dentry); + ret = -ERESTARTSYS; if (afs_begin_vnode_operation(&fc, orig_dvnode, key, true)) { afs_dataversion_t orig_data_version; @@ -1802,9 +1843,9 @@ static int afs_rename(struct inode *old_dir, struct dentry *old_dentry, if (orig_dvnode != new_dvnode) { if (mutex_lock_interruptible_nested(&new_dvnode->io_lock, 1) < 0) { afs_end_vnode_operation(&fc); - goto error_rehash; + goto error_rehash_old; } - new_data_version = new_dvnode->status.data_version; + new_data_version = new_dvnode->status.data_version + 1; } else { new_data_version = orig_data_version; new_scb = &scb[0]; @@ -1827,7 +1868,7 @@ static int afs_rename(struct inode *old_dir, struct dentry *old_dentry, } ret = afs_end_vnode_operation(&fc); if (ret < 0) - goto error_rehash; + goto error_rehash_old; } if (ret == 0) { @@ -1853,10 +1894,26 @@ static int afs_rename(struct inode *old_dir, struct dentry *old_dentry, drop_nlink(new_inode); spin_unlock(&new_inode->i_lock); } + + /* Now we can update d_fsdata on the dentries to reflect their + * new parent's data_version. + * + * Note that if we ever implement RENAME_EXCHANGE, we'll have + * to update both dentries with opposing dir versions. + */ + if (new_dvnode != orig_dvnode) { + afs_update_dentry_version(&fc, old_dentry, &scb[1]); + afs_update_dentry_version(&fc, new_dentry, &scb[1]); + } else { + afs_update_dentry_version(&fc, old_dentry, &scb[0]); + afs_update_dentry_version(&fc, new_dentry, &scb[0]); + } d_move(old_dentry, new_dentry); goto error_tmp; } +error_rehash_old: + d_rehash(new_dentry); error_rehash: if (rehash) d_rehash(rehash); diff --git a/fs/afs/file.c b/fs/afs/file.c index 56b69576274d..dd3c55c9101c 100644 --- a/fs/afs/file.c +++ b/fs/afs/file.c @@ -191,11 +191,13 @@ void afs_put_read(struct afs_read *req) int i; if (refcount_dec_and_test(&req->usage)) { - for (i = 0; i < req->nr_pages; i++) - if (req->pages[i]) - put_page(req->pages[i]); - if (req->pages != req->array) - kfree(req->pages); + if (req->pages) { + for (i = 0; i < req->nr_pages; i++) + if (req->pages[i]) + put_page(req->pages[i]); + if (req->pages != req->array) + kfree(req->pages); + } kfree(req); } } diff --git a/fs/afs/vlclient.c b/fs/afs/vlclient.c index d7e0fd3c00df..cfb0ac4bd039 100644 --- a/fs/afs/vlclient.c +++ b/fs/afs/vlclient.c @@ -56,23 +56,24 @@ static int afs_deliver_vl_get_entry_by_name_u(struct afs_call *call) struct afs_uuid__xdr *xdr; struct afs_uuid *uuid; int j; + int n = entry->nr_servers; tmp = ntohl(uvldb->serverFlags[i]); if (tmp & AFS_VLSF_DONTUSE || (new_only && !(tmp & AFS_VLSF_NEWREPSITE))) continue; if (tmp & AFS_VLSF_RWVOL) { - entry->fs_mask[i] |= AFS_VOL_VTM_RW; + entry->fs_mask[n] |= AFS_VOL_VTM_RW; if (vlflags & AFS_VLF_BACKEXISTS) - entry->fs_mask[i] |= AFS_VOL_VTM_BAK; + entry->fs_mask[n] |= AFS_VOL_VTM_BAK; } if (tmp & AFS_VLSF_ROVOL) - entry->fs_mask[i] |= AFS_VOL_VTM_RO; - if (!entry->fs_mask[i]) + entry->fs_mask[n] |= AFS_VOL_VTM_RO; + if (!entry->fs_mask[n]) continue; xdr = &uvldb->serverNumber[i]; - uuid = (struct afs_uuid *)&entry->fs_server[i]; + uuid = (struct afs_uuid *)&entry->fs_server[n]; uuid->time_low = xdr->time_low; uuid->time_mid = htons(ntohl(xdr->time_mid)); uuid->time_hi_and_version = htons(ntohl(xdr->time_hi_and_version)); diff --git a/fs/block_dev.c b/fs/block_dev.c index a6f7c892cb4a..677cb364d33f 100644 --- a/fs/block_dev.c +++ b/fs/block_dev.c @@ -345,24 +345,15 @@ __blkdev_direct_IO(struct kiocb *iocb, struct iov_iter *iter, int nr_pages) struct bio *bio; bool is_poll = (iocb->ki_flags & IOCB_HIPRI) != 0; bool is_read = (iov_iter_rw(iter) == READ), is_sync; - bool nowait = (iocb->ki_flags & IOCB_NOWAIT) != 0; loff_t pos = iocb->ki_pos; blk_qc_t qc = BLK_QC_T_NONE; - gfp_t gfp; - ssize_t ret; + int ret = 0; if ((pos | iov_iter_alignment(iter)) & (bdev_logical_block_size(bdev) - 1)) return -EINVAL; - if (nowait) - gfp = GFP_NOWAIT; - else - gfp = GFP_KERNEL; - - bio = bio_alloc_bioset(gfp, nr_pages, &blkdev_dio_pool); - if (!bio) - return -EAGAIN; + bio = bio_alloc_bioset(GFP_KERNEL, nr_pages, &blkdev_dio_pool); dio = container_of(bio, struct blkdev_dio, bio); dio->is_sync = is_sync = is_sync_kiocb(iocb); @@ -384,10 +375,7 @@ __blkdev_direct_IO(struct kiocb *iocb, struct iov_iter *iter, int nr_pages) if (!is_poll) blk_start_plug(&plug); - ret = 0; for (;;) { - int err; - bio_set_dev(bio, bdev); bio->bi_iter.bi_sector = pos >> 9; bio->bi_write_hint = iocb->ki_hint; @@ -395,10 +383,8 @@ __blkdev_direct_IO(struct kiocb *iocb, struct iov_iter *iter, int nr_pages) bio->bi_end_io = blkdev_bio_end_io; bio->bi_ioprio = iocb->ki_ioprio; - err = bio_iov_iter_get_pages(bio, iter); - if (unlikely(err)) { - if (!ret) - ret = err; + ret = bio_iov_iter_get_pages(bio, iter); + if (unlikely(ret)) { bio->bi_status = BLK_STS_IOERR; bio_endio(bio); break; @@ -413,14 +399,6 @@ __blkdev_direct_IO(struct kiocb *iocb, struct iov_iter *iter, int nr_pages) task_io_account_write(bio->bi_iter.bi_size); } - /* - * Tell underlying layer to not block for resource shortage. - * And if we would have blocked, return error inline instead - * of through the bio->bi_end_io() callback. - */ - if (nowait) - bio->bi_opf |= (REQ_NOWAIT | REQ_NOWAIT_INLINE); - dio->size += bio->bi_iter.bi_size; pos += bio->bi_iter.bi_size; @@ -434,12 +412,6 @@ __blkdev_direct_IO(struct kiocb *iocb, struct iov_iter *iter, int nr_pages) } qc = submit_bio(bio); - if (qc == BLK_QC_T_EAGAIN) { - if (!ret) - ret = -EAGAIN; - goto error; - } - ret = dio->size; if (polled) WRITE_ONCE(iocb->ki_cookie, qc); @@ -460,20 +432,8 @@ __blkdev_direct_IO(struct kiocb *iocb, struct iov_iter *iter, int nr_pages) atomic_inc(&dio->ref); } - qc = submit_bio(bio); - if (qc == BLK_QC_T_EAGAIN) { - if (!ret) - ret = -EAGAIN; - goto error; - } - ret = dio->size; - - bio = bio_alloc(gfp, nr_pages); - if (!bio) { - if (!ret) - ret = -EAGAIN; - goto error; - } + submit_bio(bio); + bio = bio_alloc(GFP_KERNEL, nr_pages); } if (!is_poll) @@ -493,16 +453,13 @@ __blkdev_direct_IO(struct kiocb *iocb, struct iov_iter *iter, int nr_pages) } __set_current_state(TASK_RUNNING); -out: if (!ret) ret = blk_status_to_errno(dio->bio.bi_status); + if (likely(!ret)) + ret = dio->size; bio_put(&dio->bio); return ret; -error: - if (!is_poll) - blk_finish_plug(&plug); - goto out; } static ssize_t @@ -1754,7 +1711,10 @@ int blkdev_get(struct block_device *bdev, fmode_t mode, void *holder) /* finish claiming */ mutex_lock(&bdev->bd_mutex); - bd_finish_claiming(bdev, whole, holder); + if (!res) + bd_finish_claiming(bdev, whole, holder); + else + bd_abort_claiming(bdev, whole, holder); /* * Block event polling for write claims if requested. Any * write holder makes the write_holder state stick until diff --git a/fs/btrfs/ctree.h b/fs/btrfs/ctree.h index 299e11e6c554..94660063a162 100644 --- a/fs/btrfs/ctree.h +++ b/fs/btrfs/ctree.h @@ -401,7 +401,6 @@ struct btrfs_dev_replace { struct raid_kobject { u64 flags; struct kobject kobj; - struct list_head list; }; /* @@ -915,8 +914,6 @@ struct btrfs_fs_info { u32 thread_pool_size; struct kobject *space_info_kobj; - struct list_head pending_raid_kobjs; - spinlock_t pending_raid_kobjs_lock; /* uncontended */ u64 total_pinned; @@ -2698,7 +2695,6 @@ int btrfs_can_relocate(struct btrfs_fs_info *fs_info, u64 bytenr); int btrfs_make_block_group(struct btrfs_trans_handle *trans, u64 bytes_used, u64 type, u64 chunk_offset, u64 size); -void btrfs_add_raid_kobjects(struct btrfs_fs_info *fs_info); struct btrfs_trans_handle *btrfs_start_trans_remove_block_group( struct btrfs_fs_info *fs_info, const u64 chunk_offset); diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c index 5f7ee70b3d1a..97beb351a10c 100644 --- a/fs/btrfs/disk-io.c +++ b/fs/btrfs/disk-io.c @@ -2683,8 +2683,6 @@ int open_ctree(struct super_block *sb, INIT_LIST_HEAD(&fs_info->delayed_iputs); INIT_LIST_HEAD(&fs_info->delalloc_roots); INIT_LIST_HEAD(&fs_info->caching_block_groups); - INIT_LIST_HEAD(&fs_info->pending_raid_kobjs); - spin_lock_init(&fs_info->pending_raid_kobjs_lock); spin_lock_init(&fs_info->delalloc_root_lock); spin_lock_init(&fs_info->trans_lock); spin_lock_init(&fs_info->fs_roots_radix_lock); diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c index d3b58e388535..8b7eb22d508a 100644 --- a/fs/btrfs/extent-tree.c +++ b/fs/btrfs/extent-tree.c @@ -4,6 +4,7 @@ */ #include <linux/sched.h> +#include <linux/sched/mm.h> #include <linux/sched/signal.h> #include <linux/pagemap.h> #include <linux/writeback.h> @@ -7888,33 +7889,6 @@ int btrfs_free_block_groups(struct btrfs_fs_info *info) return 0; } -/* link_block_group will queue up kobjects to add when we're reclaim-safe */ -void btrfs_add_raid_kobjects(struct btrfs_fs_info *fs_info) -{ - struct btrfs_space_info *space_info; - struct raid_kobject *rkobj; - LIST_HEAD(list); - int ret = 0; - - spin_lock(&fs_info->pending_raid_kobjs_lock); - list_splice_init(&fs_info->pending_raid_kobjs, &list); - spin_unlock(&fs_info->pending_raid_kobjs_lock); - - list_for_each_entry(rkobj, &list, list) { - space_info = btrfs_find_space_info(fs_info, rkobj->flags); - - ret = kobject_add(&rkobj->kobj, &space_info->kobj, - "%s", btrfs_bg_type_to_raid_name(rkobj->flags)); - if (ret) { - kobject_put(&rkobj->kobj); - break; - } - } - if (ret) - btrfs_warn(fs_info, - "failed to add kobject for block cache, ignoring"); -} - static void link_block_group(struct btrfs_block_group_cache *cache) { struct btrfs_space_info *space_info = cache->space_info; @@ -7929,18 +7903,36 @@ static void link_block_group(struct btrfs_block_group_cache *cache) up_write(&space_info->groups_sem); if (first) { - struct raid_kobject *rkobj = kzalloc(sizeof(*rkobj), GFP_NOFS); + struct raid_kobject *rkobj; + unsigned int nofs_flag; + int ret; + + /* + * Setup a NOFS context because kobject_add(), deep in its call + * chain, does GFP_KERNEL allocations, and we are often called + * in a context where if reclaim is triggered we can deadlock + * (we are either holding a transaction handle or some lock + * required for a transaction commit). + */ + nofs_flag = memalloc_nofs_save(); + rkobj = kzalloc(sizeof(*rkobj), GFP_KERNEL); if (!rkobj) { + memalloc_nofs_restore(nofs_flag); btrfs_warn(cache->fs_info, "couldn't alloc memory for raid level kobject"); return; } rkobj->flags = cache->flags; kobject_init(&rkobj->kobj, &btrfs_raid_ktype); - - spin_lock(&fs_info->pending_raid_kobjs_lock); - list_add_tail(&rkobj->list, &fs_info->pending_raid_kobjs); - spin_unlock(&fs_info->pending_raid_kobjs_lock); + ret = kobject_add(&rkobj->kobj, &space_info->kobj, "%s", + btrfs_bg_type_to_raid_name(rkobj->flags)); + memalloc_nofs_restore(nofs_flag); + if (ret) { + kobject_put(&rkobj->kobj); + btrfs_warn(fs_info, + "failed to add kobject for block cache, ignoring"); + return; + } space_info->block_group_kobjs[index] = &rkobj->kobj; } } @@ -8206,7 +8198,6 @@ int btrfs_read_block_groups(struct btrfs_fs_info *info) inc_block_group_ro(cache, 1); } - btrfs_add_raid_kobjects(info); btrfs_init_global_block_rsv(info); ret = check_chunk_block_group_mappings(info); error: @@ -8975,6 +8966,7 @@ int btrfs_trim_fs(struct btrfs_fs_info *fs_info, struct fstrim_range *range) struct btrfs_device *device; struct list_head *devices; u64 group_trimmed; + u64 range_end = U64_MAX; u64 start; u64 end; u64 trimmed = 0; @@ -8984,16 +8976,23 @@ int btrfs_trim_fs(struct btrfs_fs_info *fs_info, struct fstrim_range *range) int dev_ret = 0; int ret = 0; + /* + * Check range overflow if range->len is set. + * The default range->len is U64_MAX. + */ + if (range->len != U64_MAX && + check_add_overflow(range->start, range->len, &range_end)) + return -EINVAL; + cache = btrfs_lookup_first_block_group(fs_info, range->start); for (; cache; cache = next_block_group(cache)) { - if (cache->key.objectid >= (range->start + range->len)) { + if (cache->key.objectid >= range_end) { btrfs_put_block_group(cache); break; } start = max(range->start, cache->key.objectid); - end = min(range->start + range->len, - cache->key.objectid + cache->key.offset); + end = min(range_end, cache->key.objectid + cache->key.offset); if (end - start >= range->minlen) { if (!block_group_cache_done(cache)) { diff --git a/fs/btrfs/volumes.c b/fs/btrfs/volumes.c index d74b74ca07af..a447d3ec48d5 100644 --- a/fs/btrfs/volumes.c +++ b/fs/btrfs/volumes.c @@ -3087,16 +3087,6 @@ static int btrfs_relocate_chunk(struct btrfs_fs_info *fs_info, u64 chunk_offset) if (ret) return ret; - /* - * We add the kobjects here (and after forcing data chunk creation) - * since relocation is the only place we'll create chunks of a new - * type at runtime. The only place where we'll remove the last - * chunk of a type is the call immediately below this one. Even - * so, we're protected against races with the cleaner thread since - * we're covered by the delete_unused_bgs_mutex. - */ - btrfs_add_raid_kobjects(fs_info); - trans = btrfs_start_trans_remove_block_group(root->fs_info, chunk_offset); if (IS_ERR(trans)) { @@ -3223,9 +3213,6 @@ static int btrfs_may_alloc_data_chunk(struct btrfs_fs_info *fs_info, btrfs_end_transaction(trans); if (ret < 0) return ret; - - btrfs_add_raid_kobjects(fs_info); - return 1; } } diff --git a/fs/cifs/connect.c b/fs/cifs/connect.c index a4830ced0f98..a15a6e738eb5 100644 --- a/fs/cifs/connect.c +++ b/fs/cifs/connect.c @@ -1113,6 +1113,7 @@ cifs_demultiplex_thread(void *p) mempool_resize(cifs_req_poolp, length + cifs_min_rcv); set_freezable(); + allow_signal(SIGKILL); while (server->tcpStatus != CifsExiting) { if (try_to_freeze()) continue; diff --git a/fs/cifs/smb2ops.c b/fs/cifs/smb2ops.c index a5bc1b671c12..64a5864127be 100644 --- a/fs/cifs/smb2ops.c +++ b/fs/cifs/smb2ops.c @@ -3489,7 +3489,15 @@ fill_transform_hdr(struct smb2_transform_hdr *tr_hdr, unsigned int orig_len, static inline void smb2_sg_set_buf(struct scatterlist *sg, const void *buf, unsigned int buflen) { - sg_set_page(sg, virt_to_page(buf), buflen, offset_in_page(buf)); + void *addr; + /* + * VMAP_STACK (at least) puts stack into the vmalloc address space + */ + if (is_vmalloc_addr(buf)) + addr = vmalloc_to_page(buf); + else + addr = virt_to_page(buf); + sg_set_page(sg, addr, buflen, offset_in_page(buf)); } /* Assumes the first rqst has a transform header as the first iov. @@ -4070,7 +4078,6 @@ receive_encrypted_standard(struct TCP_Server_Info *server, { int ret, length; char *buf = server->smallbuf; - char *tmpbuf; struct smb2_sync_hdr *shdr; unsigned int pdu_length = server->pdu_size; unsigned int buf_size; @@ -4100,18 +4107,15 @@ receive_encrypted_standard(struct TCP_Server_Info *server, return length; next_is_large = server->large_buf; - one_more: +one_more: shdr = (struct smb2_sync_hdr *)buf; if (shdr->NextCommand) { - if (next_is_large) { - tmpbuf = server->bigbuf; + if (next_is_large) next_buffer = (char *)cifs_buf_get(); - } else { - tmpbuf = server->smallbuf; + else next_buffer = (char *)cifs_small_buf_get(); - } memcpy(next_buffer, - tmpbuf + le32_to_cpu(shdr->NextCommand), + buf + le32_to_cpu(shdr->NextCommand), pdu_length - le32_to_cpu(shdr->NextCommand)); } @@ -4140,12 +4144,21 @@ receive_encrypted_standard(struct TCP_Server_Info *server, pdu_length -= le32_to_cpu(shdr->NextCommand); server->large_buf = next_is_large; if (next_is_large) - server->bigbuf = next_buffer; + server->bigbuf = buf = next_buffer; else - server->smallbuf = next_buffer; - - buf += le32_to_cpu(shdr->NextCommand); + server->smallbuf = buf = next_buffer; goto one_more; + } else if (ret != 0) { + /* + * ret != 0 here means that we didn't get to handle_mid() thus + * server->smallbuf and server->bigbuf are still valid. We need + * to free next_buffer because it is not going to be used + * anywhere. + */ + if (next_is_large) + free_rsp_buf(CIFS_LARGE_BUFFER, next_buffer); + else + free_rsp_buf(CIFS_SMALL_BUFFER, next_buffer); } return ret; diff --git a/fs/cifs/smb2pdu.c b/fs/cifs/smb2pdu.c index c8cd7b6cdda2..31e4a1b0b170 100644 --- a/fs/cifs/smb2pdu.c +++ b/fs/cifs/smb2pdu.c @@ -252,7 +252,7 @@ smb2_reconnect(__le16 smb2_command, struct cifs_tcon *tcon) if (tcon == NULL) return 0; - if (smb2_command == SMB2_TREE_CONNECT) + if (smb2_command == SMB2_TREE_CONNECT || smb2_command == SMB2_IOCTL) return 0; if (tcon->tidStatus == CifsExiting) { @@ -1196,7 +1196,12 @@ SMB2_sess_alloc_buffer(struct SMB2_sess_data *sess_data) else req->SecurityMode = 0; +#ifdef CONFIG_CIFS_DFS_UPCALL + req->Capabilities = cpu_to_le32(SMB2_GLOBAL_CAP_DFS); +#else req->Capabilities = 0; +#endif /* DFS_UPCALL */ + req->Channel = 0; /* MBZ */ sess_data->iov[0].iov_base = (char *)req; @@ -600,7 +600,7 @@ struct page *dax_layout_busy_page(struct address_space *mapping) * guaranteed to either see new references or prevent new * references from being established. */ - unmap_mapping_range(mapping, 0, 0, 1); + unmap_mapping_range(mapping, 0, 0, 0); xas_lock_irq(&xas); xas_for_each(&xas, entry, ULONG_MAX) { diff --git a/fs/gfs2/bmap.c b/fs/gfs2/bmap.c index 4df26ef2b2b1..4f8b5fd6c81f 100644 --- a/fs/gfs2/bmap.c +++ b/fs/gfs2/bmap.c @@ -390,6 +390,19 @@ static int fillup_metapath(struct gfs2_inode *ip, struct metapath *mp, int h) return mp->mp_aheight - x - 1; } +static sector_t metapath_to_block(struct gfs2_sbd *sdp, struct metapath *mp) +{ + sector_t factor = 1, block = 0; + int hgt; + + for (hgt = mp->mp_fheight - 1; hgt >= 0; hgt--) { + if (hgt < mp->mp_aheight) + block += mp->mp_list[hgt] * factor; + factor *= sdp->sd_inptrs; + } + return block; +} + static void release_metapath(struct metapath *mp) { int i; @@ -430,60 +443,84 @@ static inline unsigned int gfs2_extent_length(struct buffer_head *bh, __be64 *pt return ptr - first; } -typedef const __be64 *(*gfs2_metadata_walker)( - struct metapath *mp, - const __be64 *start, const __be64 *end, - u64 factor, void *data); +enum walker_status { WALK_STOP, WALK_FOLLOW, WALK_CONTINUE }; -#define WALK_STOP ((__be64 *)0) -#define WALK_NEXT ((__be64 *)1) +/* + * gfs2_metadata_walker - walk an indirect block + * @mp: Metapath to indirect block + * @ptrs: Number of pointers to look at + * + * When returning WALK_FOLLOW, the walker must update @mp to point at the right + * indirect block to follow. + */ +typedef enum walker_status (*gfs2_metadata_walker)(struct metapath *mp, + unsigned int ptrs); -static int gfs2_walk_metadata(struct inode *inode, sector_t lblock, - u64 len, struct metapath *mp, gfs2_metadata_walker walker, - void *data) +/* + * gfs2_walk_metadata - walk a tree of indirect blocks + * @inode: The inode + * @mp: Starting point of walk + * @max_len: Maximum number of blocks to walk + * @walker: Called during the walk + * + * Returns 1 if the walk was stopped by @walker, 0 if we went past @max_len or + * past the end of metadata, and a negative error code otherwise. + */ + +static int gfs2_walk_metadata(struct inode *inode, struct metapath *mp, + u64 max_len, gfs2_metadata_walker walker) { - struct metapath clone; struct gfs2_inode *ip = GFS2_I(inode); struct gfs2_sbd *sdp = GFS2_SB(inode); - const __be64 *start, *end, *ptr; u64 factor = 1; unsigned int hgt; - int ret = 0; + int ret; - for (hgt = ip->i_height - 1; hgt >= mp->mp_aheight; hgt--) + /* + * The walk starts in the lowest allocated indirect block, which may be + * before the position indicated by @mp. Adjust @max_len accordingly + * to avoid a short walk. + */ + for (hgt = mp->mp_fheight - 1; hgt >= mp->mp_aheight; hgt--) { + max_len += mp->mp_list[hgt] * factor; + mp->mp_list[hgt] = 0; factor *= sdp->sd_inptrs; + } for (;;) { - u64 step; + u16 start = mp->mp_list[hgt]; + enum walker_status status; + unsigned int ptrs; + u64 len; /* Walk indirect block. */ - start = metapointer(hgt, mp); - end = metaend(hgt, mp); - - step = (end - start) * factor; - if (step > len) - end = start + DIV_ROUND_UP_ULL(len, factor); - - ptr = walker(mp, start, end, factor, data); - if (ptr == WALK_STOP) + ptrs = (hgt >= 1 ? sdp->sd_inptrs : sdp->sd_diptrs) - start; + len = ptrs * factor; + if (len > max_len) + ptrs = DIV_ROUND_UP_ULL(max_len, factor); + status = walker(mp, ptrs); + switch (status) { + case WALK_STOP: + return 1; + case WALK_FOLLOW: + BUG_ON(mp->mp_aheight == mp->mp_fheight); + ptrs = mp->mp_list[hgt] - start; + len = ptrs * factor; break; - if (step >= len) + case WALK_CONTINUE: break; - len -= step; - if (ptr != WALK_NEXT) { - BUG_ON(!*ptr); - mp->mp_list[hgt] += ptr - start; - goto fill_up_metapath; } + if (len >= max_len) + break; + max_len -= len; + if (status == WALK_FOLLOW) + goto fill_up_metapath; lower_metapath: /* Decrease height of metapath. */ - if (mp != &clone) { - clone_metapath(&clone, mp); - mp = &clone; - } brelse(mp->mp_bh[hgt]); mp->mp_bh[hgt] = NULL; + mp->mp_list[hgt] = 0; if (!hgt) break; hgt--; @@ -491,10 +528,7 @@ lower_metapath: /* Advance in metadata tree. */ (mp->mp_list[hgt])++; - start = metapointer(hgt, mp); - end = metaend(hgt, mp); - if (start >= end) { - mp->mp_list[hgt] = 0; + if (mp->mp_list[hgt] >= sdp->sd_inptrs) { if (!hgt) break; goto lower_metapath; @@ -502,44 +536,36 @@ lower_metapath: fill_up_metapath: /* Increase height of metapath. */ - if (mp != &clone) { - clone_metapath(&clone, mp); - mp = &clone; - } ret = fillup_metapath(ip, mp, ip->i_height - 1); if (ret < 0) - break; + return ret; hgt += ret; for (; ret; ret--) do_div(factor, sdp->sd_inptrs); mp->mp_aheight = hgt + 1; } - if (mp == &clone) - release_metapath(mp); - return ret; + return 0; } -struct gfs2_hole_walker_args { - u64 blocks; -}; - -static const __be64 *gfs2_hole_walker(struct metapath *mp, - const __be64 *start, const __be64 *end, - u64 factor, void *data) +static enum walker_status gfs2_hole_walker(struct metapath *mp, + unsigned int ptrs) { - struct gfs2_hole_walker_args *args = data; - const __be64 *ptr; + const __be64 *start, *ptr, *end; + unsigned int hgt; + + hgt = mp->mp_aheight - 1; + start = metapointer(hgt, mp); + end = start + ptrs; for (ptr = start; ptr < end; ptr++) { if (*ptr) { - args->blocks += (ptr - start) * factor; + mp->mp_list[hgt] += ptr - start; if (mp->mp_aheight == mp->mp_fheight) return WALK_STOP; - return ptr; /* increase height */ + return WALK_FOLLOW; } } - args->blocks += (end - start) * factor; - return WALK_NEXT; + return WALK_CONTINUE; } /** @@ -557,12 +583,24 @@ static const __be64 *gfs2_hole_walker(struct metapath *mp, static int gfs2_hole_size(struct inode *inode, sector_t lblock, u64 len, struct metapath *mp, struct iomap *iomap) { - struct gfs2_hole_walker_args args = { }; - int ret = 0; + struct metapath clone; + u64 hole_size; + int ret; - ret = gfs2_walk_metadata(inode, lblock, len, mp, gfs2_hole_walker, &args); - if (!ret) - iomap->length = args.blocks << inode->i_blkbits; + clone_metapath(&clone, mp); + ret = gfs2_walk_metadata(inode, &clone, len, gfs2_hole_walker); + if (ret < 0) + goto out; + + if (ret == 1) + hole_size = metapath_to_block(GFS2_SB(inode), &clone) - lblock; + else + hole_size = len; + iomap->length = hole_size << inode->i_blkbits; + ret = 0; + +out: + release_metapath(&clone); return ret; } diff --git a/fs/io_uring.c b/fs/io_uring.c index d542f1cf4428..24bbe3cb7ad4 100644 --- a/fs/io_uring.c +++ b/fs/io_uring.c @@ -1097,10 +1097,8 @@ static int io_import_fixed(struct io_ring_ctx *ctx, int rw, iter->bvec = bvec + seg_skip; iter->nr_segs -= seg_skip; - iter->count -= (seg_skip << PAGE_SHIFT); + iter->count -= bvec->bv_len + offset; iter->iov_offset = offset & ~PAGE_MASK; - if (iter->iov_offset) - iter->count -= iter->iov_offset; } } @@ -2025,6 +2023,15 @@ static int io_queue_sqe(struct io_ring_ctx *ctx, struct io_kiocb *req, { int ret; + ret = io_req_defer(ctx, req, s->sqe); + if (ret) { + if (ret != -EIOCBQUEUED) { + io_free_req(req); + io_cqring_add_event(ctx, s->sqe->user_data, ret); + } + return 0; + } + ret = __io_submit_sqe(ctx, req, s, true); if (ret == -EAGAIN && !(req->flags & REQ_F_NOWAIT)) { struct io_uring_sqe *sqe_copy; @@ -2097,13 +2104,6 @@ err: return; } - ret = io_req_defer(ctx, req, s->sqe); - if (ret) { - if (ret != -EIOCBQUEUED) - goto err_req; - return; - } - /* * If we already have a head request, queue this one for async * submittal once the head completes. If we don't have a head but diff --git a/fs/nfs/delegation.c b/fs/nfs/delegation.c index 0ff3facf81da..071b90a45933 100644 --- a/fs/nfs/delegation.c +++ b/fs/nfs/delegation.c @@ -153,7 +153,7 @@ again: /* Block nfs4_proc_unlck */ mutex_lock(&sp->so_delegreturn_mutex); seq = raw_seqcount_begin(&sp->so_reclaim_seqcount); - err = nfs4_open_delegation_recall(ctx, state, stateid, type); + err = nfs4_open_delegation_recall(ctx, state, stateid); if (!err) err = nfs_delegation_claim_locks(state, stateid); if (!err && read_seqcount_retry(&sp->so_reclaim_seqcount, seq)) @@ -1046,6 +1046,22 @@ void nfs_test_expired_all_delegations(struct nfs_client *clp) nfs4_schedule_state_manager(clp); } +static void +nfs_delegation_test_free_expired(struct inode *inode, + nfs4_stateid *stateid, + const struct cred *cred) +{ + struct nfs_server *server = NFS_SERVER(inode); + const struct nfs4_minor_version_ops *ops = server->nfs_client->cl_mvops; + int status; + + if (!cred) + return; + status = ops->test_and_free_expired(server, stateid, cred); + if (status == -NFS4ERR_EXPIRED || status == -NFS4ERR_BAD_STATEID) + nfs_remove_bad_delegation(inode, stateid); +} + /** * nfs_reap_expired_delegations - reap expired delegations * @clp: nfs_client to process @@ -1057,7 +1073,6 @@ void nfs_test_expired_all_delegations(struct nfs_client *clp) */ void nfs_reap_expired_delegations(struct nfs_client *clp) { - const struct nfs4_minor_version_ops *ops = clp->cl_mvops; struct nfs_delegation *delegation; struct nfs_server *server; struct inode *inode; @@ -1088,11 +1103,7 @@ restart: nfs4_stateid_copy(&stateid, &delegation->stateid); clear_bit(NFS_DELEGATION_TEST_EXPIRED, &delegation->flags); rcu_read_unlock(); - if (cred != NULL && - ops->test_and_free_expired(server, &stateid, cred) < 0) { - nfs_revoke_delegation(inode, &stateid); - nfs_inode_find_state_and_recover(inode, &stateid); - } + nfs_delegation_test_free_expired(inode, &stateid, cred); put_cred(cred); if (nfs4_server_rebooted(clp)) { nfs_inode_mark_test_expired_delegation(server,inode); diff --git a/fs/nfs/delegation.h b/fs/nfs/delegation.h index 5799777df5ec..9eb87ae4c982 100644 --- a/fs/nfs/delegation.h +++ b/fs/nfs/delegation.h @@ -63,7 +63,7 @@ void nfs_reap_expired_delegations(struct nfs_client *clp); /* NFSv4 delegation-related procedures */ int nfs4_proc_delegreturn(struct inode *inode, const struct cred *cred, const nfs4_stateid *stateid, int issync); -int nfs4_open_delegation_recall(struct nfs_open_context *ctx, struct nfs4_state *state, const nfs4_stateid *stateid, fmode_t type); +int nfs4_open_delegation_recall(struct nfs_open_context *ctx, struct nfs4_state *state, const nfs4_stateid *stateid); int nfs4_lock_delegation_recall(struct file_lock *fl, struct nfs4_state *state, const nfs4_stateid *stateid); bool nfs4_copy_delegation_stateid(struct inode *inode, fmode_t flags, nfs4_stateid *dst, const struct cred **cred); bool nfs4_refresh_delegation_stateid(nfs4_stateid *dst, struct inode *inode); diff --git a/fs/nfs/fscache.c b/fs/nfs/fscache.c index 53507aa96b0b..3800ab6f08fa 100644 --- a/fs/nfs/fscache.c +++ b/fs/nfs/fscache.c @@ -114,6 +114,10 @@ void nfs_fscache_get_super_cookie(struct super_block *sb, const char *uniq, int struct rb_node **p, *parent; int diff; + nfss->fscache_key = NULL; + nfss->fscache = NULL; + if (!(nfss->options & NFS_OPTION_FSCACHE)) + return; if (!uniq) { uniq = ""; ulen = 1; @@ -226,10 +230,11 @@ void nfs_fscache_release_super_cookie(struct super_block *sb) void nfs_fscache_init_inode(struct inode *inode) { struct nfs_fscache_inode_auxdata auxdata; + struct nfs_server *nfss = NFS_SERVER(inode); struct nfs_inode *nfsi = NFS_I(inode); nfsi->fscache = NULL; - if (!S_ISREG(inode->i_mode)) + if (!(nfss->fscache && S_ISREG(inode->i_mode))) return; memset(&auxdata, 0, sizeof(auxdata)); diff --git a/fs/nfs/fscache.h b/fs/nfs/fscache.h index 25a75e40d91d..ad041cfbf9ec 100644 --- a/fs/nfs/fscache.h +++ b/fs/nfs/fscache.h @@ -182,7 +182,7 @@ static inline void nfs_fscache_wait_on_invalidate(struct inode *inode) */ static inline const char *nfs_server_fscache_state(struct nfs_server *server) { - if (server->fscache && (server->options & NFS_OPTION_FSCACHE)) + if (server->fscache) return "yes"; return "no "; } diff --git a/fs/nfs/nfs4_fs.h b/fs/nfs/nfs4_fs.h index d778dad9a75e..3564da1ba8a1 100644 --- a/fs/nfs/nfs4_fs.h +++ b/fs/nfs/nfs4_fs.h @@ -465,7 +465,8 @@ static inline void nfs4_schedule_session_recovery(struct nfs4_session *session, extern struct nfs4_state_owner *nfs4_get_state_owner(struct nfs_server *, const struct cred *, gfp_t); extern void nfs4_put_state_owner(struct nfs4_state_owner *); -extern void nfs4_purge_state_owners(struct nfs_server *); +extern void nfs4_purge_state_owners(struct nfs_server *, struct list_head *); +extern void nfs4_free_state_owners(struct list_head *head); extern struct nfs4_state * nfs4_get_open_state(struct inode *, struct nfs4_state_owner *); extern void nfs4_put_open_state(struct nfs4_state *); extern void nfs4_close_state(struct nfs4_state *, fmode_t); diff --git a/fs/nfs/nfs4client.c b/fs/nfs/nfs4client.c index 616393a01c06..da6204025a2d 100644 --- a/fs/nfs/nfs4client.c +++ b/fs/nfs/nfs4client.c @@ -758,9 +758,12 @@ out: static void nfs4_destroy_server(struct nfs_server *server) { + LIST_HEAD(freeme); + nfs_server_return_all_delegations(server); unset_pnfs_layoutdriver(server); - nfs4_purge_state_owners(server); + nfs4_purge_state_owners(server, &freeme); + nfs4_free_state_owners(&freeme); } /* diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c index 39896afc6edf..1406858bae6c 100644 --- a/fs/nfs/nfs4proc.c +++ b/fs/nfs/nfs4proc.c @@ -1683,6 +1683,14 @@ static void nfs_state_set_open_stateid(struct nfs4_state *state, write_sequnlock(&state->seqlock); } +static void nfs_state_clear_open_state_flags(struct nfs4_state *state) +{ + clear_bit(NFS_O_RDWR_STATE, &state->flags); + clear_bit(NFS_O_WRONLY_STATE, &state->flags); + clear_bit(NFS_O_RDONLY_STATE, &state->flags); + clear_bit(NFS_OPEN_STATE, &state->flags); +} + static void nfs_state_set_delegation(struct nfs4_state *state, const nfs4_stateid *deleg_stateid, fmode_t fmode) @@ -1907,8 +1915,9 @@ _nfs4_opendata_reclaim_to_nfs4_state(struct nfs4_opendata *data) if (data->o_res.delegation_type != 0) nfs4_opendata_check_deleg(data, state); update: - update_open_stateid(state, &data->o_res.stateid, NULL, - data->o_arg.fmode); + if (!update_open_stateid(state, &data->o_res.stateid, + NULL, data->o_arg.fmode)) + return ERR_PTR(-EAGAIN); refcount_inc(&state->count); return state; @@ -1973,8 +1982,11 @@ _nfs4_opendata_to_nfs4_state(struct nfs4_opendata *data) if (data->o_res.delegation_type != 0) nfs4_opendata_check_deleg(data, state); - update_open_stateid(state, &data->o_res.stateid, NULL, - data->o_arg.fmode); + if (!update_open_stateid(state, &data->o_res.stateid, + NULL, data->o_arg.fmode)) { + nfs4_put_open_state(state); + state = ERR_PTR(-EAGAIN); + } out: nfs_release_seqid(data->o_arg.seqid); return state; @@ -2074,13 +2086,7 @@ static int nfs4_open_recover(struct nfs4_opendata *opendata, struct nfs4_state * { int ret; - /* Don't trigger recovery in nfs_test_and_clear_all_open_stateid */ - clear_bit(NFS_O_RDWR_STATE, &state->flags); - clear_bit(NFS_O_WRONLY_STATE, &state->flags); - clear_bit(NFS_O_RDONLY_STATE, &state->flags); /* memory barrier prior to reading state->n_* */ - clear_bit(NFS_DELEGATED_STATE, &state->flags); - clear_bit(NFS_OPEN_STATE, &state->flags); smp_rmb(); ret = nfs4_open_recover_helper(opendata, FMODE_READ|FMODE_WRITE); if (ret != 0) @@ -2156,6 +2162,8 @@ static int nfs4_open_reclaim(struct nfs4_state_owner *sp, struct nfs4_state *sta ctx = nfs4_state_find_open_context(state); if (IS_ERR(ctx)) return -EAGAIN; + clear_bit(NFS_DELEGATED_STATE, &state->flags); + nfs_state_clear_open_state_flags(state); ret = nfs4_do_open_reclaim(ctx, state); put_nfs_open_context(ctx); return ret; @@ -2171,18 +2179,17 @@ static int nfs4_handle_delegation_recall_error(struct nfs_server *server, struct case -ENOENT: case -EAGAIN: case -ESTALE: + case -ETIMEDOUT: break; case -NFS4ERR_BADSESSION: case -NFS4ERR_BADSLOT: case -NFS4ERR_BAD_HIGH_SLOT: case -NFS4ERR_CONN_NOT_BOUND_TO_SESSION: case -NFS4ERR_DEADSESSION: - set_bit(NFS_DELEGATED_STATE, &state->flags); nfs4_schedule_session_recovery(server->nfs_client->cl_session, err); return -EAGAIN; case -NFS4ERR_STALE_CLIENTID: case -NFS4ERR_STALE_STATEID: - set_bit(NFS_DELEGATED_STATE, &state->flags); /* Don't recall a delegation if it was lost */ nfs4_schedule_lease_recovery(server->nfs_client); return -EAGAIN; @@ -2203,7 +2210,6 @@ static int nfs4_handle_delegation_recall_error(struct nfs_server *server, struct return -EAGAIN; case -NFS4ERR_DELAY: case -NFS4ERR_GRACE: - set_bit(NFS_DELEGATED_STATE, &state->flags); ssleep(1); return -EAGAIN; case -ENOMEM: @@ -2219,8 +2225,7 @@ static int nfs4_handle_delegation_recall_error(struct nfs_server *server, struct } int nfs4_open_delegation_recall(struct nfs_open_context *ctx, - struct nfs4_state *state, const nfs4_stateid *stateid, - fmode_t type) + struct nfs4_state *state, const nfs4_stateid *stateid) { struct nfs_server *server = NFS_SERVER(state->inode); struct nfs4_opendata *opendata; @@ -2231,20 +2236,23 @@ int nfs4_open_delegation_recall(struct nfs_open_context *ctx, if (IS_ERR(opendata)) return PTR_ERR(opendata); nfs4_stateid_copy(&opendata->o_arg.u.delegation, stateid); - nfs_state_clear_delegation(state); - switch (type & (FMODE_READ|FMODE_WRITE)) { - case FMODE_READ|FMODE_WRITE: - case FMODE_WRITE: + if (!test_bit(NFS_O_RDWR_STATE, &state->flags)) { err = nfs4_open_recover_helper(opendata, FMODE_READ|FMODE_WRITE); if (err) - break; + goto out; + } + if (!test_bit(NFS_O_WRONLY_STATE, &state->flags)) { err = nfs4_open_recover_helper(opendata, FMODE_WRITE); if (err) - break; - /* Fall through */ - case FMODE_READ: + goto out; + } + if (!test_bit(NFS_O_RDONLY_STATE, &state->flags)) { err = nfs4_open_recover_helper(opendata, FMODE_READ); + if (err) + goto out; } + nfs_state_clear_delegation(state); +out: nfs4_opendata_put(opendata); return nfs4_handle_delegation_recall_error(server, state, stateid, NULL, err); } @@ -2492,6 +2500,7 @@ static int nfs4_run_open_task(struct nfs4_opendata *data, if (!ctx) { nfs4_init_sequence(&o_arg->seq_args, &o_res->seq_res, 1, 1); data->is_recover = true; + task_setup_data.flags |= RPC_TASK_TIMEOUT; } else { nfs4_init_sequence(&o_arg->seq_args, &o_res->seq_res, 1, 0); pnfs_lgopen_prepare(data, ctx); @@ -2698,6 +2707,7 @@ static int nfs40_open_expired(struct nfs4_state_owner *sp, struct nfs4_state *st { /* NFSv4.0 doesn't allow for delegation recovery on open expire */ nfs40_clear_delegation_stateid(state); + nfs_state_clear_open_state_flags(state); return nfs4_open_expired(sp, state); } @@ -2740,13 +2750,13 @@ out_free: return -NFS4ERR_EXPIRED; } -static void nfs41_check_delegation_stateid(struct nfs4_state *state) +static int nfs41_check_delegation_stateid(struct nfs4_state *state) { struct nfs_server *server = NFS_SERVER(state->inode); nfs4_stateid stateid; struct nfs_delegation *delegation; const struct cred *cred = NULL; - int status; + int status, ret = NFS_OK; /* Get the delegation credential for use by test/free_stateid */ rcu_read_lock(); @@ -2754,20 +2764,15 @@ static void nfs41_check_delegation_stateid(struct nfs4_state *state) if (delegation == NULL) { rcu_read_unlock(); nfs_state_clear_delegation(state); - return; + return NFS_OK; } nfs4_stateid_copy(&stateid, &delegation->stateid); - if (test_bit(NFS_DELEGATION_REVOKED, &delegation->flags)) { - rcu_read_unlock(); - nfs_state_clear_delegation(state); - return; - } if (!test_and_clear_bit(NFS_DELEGATION_TEST_EXPIRED, &delegation->flags)) { rcu_read_unlock(); - return; + return NFS_OK; } if (delegation->cred) @@ -2777,9 +2782,24 @@ static void nfs41_check_delegation_stateid(struct nfs4_state *state) trace_nfs4_test_delegation_stateid(state, NULL, status); if (status == -NFS4ERR_EXPIRED || status == -NFS4ERR_BAD_STATEID) nfs_finish_clear_delegation_stateid(state, &stateid); + else + ret = status; - if (delegation->cred) - put_cred(cred); + put_cred(cred); + return ret; +} + +static void nfs41_delegation_recover_stateid(struct nfs4_state *state) +{ + nfs4_stateid tmp; + + if (test_bit(NFS_DELEGATED_STATE, &state->flags) && + nfs4_copy_delegation_stateid(state->inode, state->state, + &tmp, NULL) && + nfs4_stateid_match_other(&state->stateid, &tmp)) + nfs_state_set_delegation(state, &tmp, state->state); + else + nfs_state_clear_delegation(state); } /** @@ -2849,21 +2869,12 @@ static int nfs41_check_open_stateid(struct nfs4_state *state) const struct cred *cred = state->owner->so_cred; int status; - if (test_bit(NFS_OPEN_STATE, &state->flags) == 0) { - if (test_bit(NFS_DELEGATED_STATE, &state->flags) == 0) { - if (nfs4_have_delegation(state->inode, state->state)) - return NFS_OK; - return -NFS4ERR_OPENMODE; - } + if (test_bit(NFS_OPEN_STATE, &state->flags) == 0) return -NFS4ERR_BAD_STATEID; - } status = nfs41_test_and_free_expired_stateid(server, stateid, cred); trace_nfs4_test_open_stateid(state, NULL, status); if (status == -NFS4ERR_EXPIRED || status == -NFS4ERR_BAD_STATEID) { - clear_bit(NFS_O_RDONLY_STATE, &state->flags); - clear_bit(NFS_O_WRONLY_STATE, &state->flags); - clear_bit(NFS_O_RDWR_STATE, &state->flags); - clear_bit(NFS_OPEN_STATE, &state->flags); + nfs_state_clear_open_state_flags(state); stateid->type = NFS4_INVALID_STATEID_TYPE; return status; } @@ -2876,7 +2887,11 @@ static int nfs41_open_expired(struct nfs4_state_owner *sp, struct nfs4_state *st { int status; - nfs41_check_delegation_stateid(state); + status = nfs41_check_delegation_stateid(state); + if (status != NFS_OK) + return status; + nfs41_delegation_recover_stateid(state); + status = nfs41_check_expired_locks(state); if (status != NFS_OK) return status; @@ -3201,7 +3216,7 @@ static int _nfs4_do_setattr(struct inode *inode, if (nfs4_copy_delegation_stateid(inode, FMODE_WRITE, &arg->stateid, &delegation_cred)) { /* Use that stateid */ - } else if (ctx != NULL) { + } else if (ctx != NULL && ctx->state) { struct nfs_lock_context *l_ctx; if (!nfs4_valid_open_stateid(ctx->state)) return -EBADF; diff --git a/fs/nfs/nfs4state.c b/fs/nfs/nfs4state.c index 9afd051a4876..cad4e064b328 100644 --- a/fs/nfs/nfs4state.c +++ b/fs/nfs/nfs4state.c @@ -624,24 +624,39 @@ void nfs4_put_state_owner(struct nfs4_state_owner *sp) /** * nfs4_purge_state_owners - Release all cached state owners * @server: nfs_server with cached state owners to release + * @head: resulting list of state owners * * Called at umount time. Remaining state owners will be on * the LRU with ref count of zero. + * Note that the state owners are not freed, but are added + * to the list @head, which can later be used as an argument + * to nfs4_free_state_owners. */ -void nfs4_purge_state_owners(struct nfs_server *server) +void nfs4_purge_state_owners(struct nfs_server *server, struct list_head *head) { struct nfs_client *clp = server->nfs_client; struct nfs4_state_owner *sp, *tmp; - LIST_HEAD(doomed); spin_lock(&clp->cl_lock); list_for_each_entry_safe(sp, tmp, &server->state_owners_lru, so_lru) { - list_move(&sp->so_lru, &doomed); + list_move(&sp->so_lru, head); nfs4_remove_state_owner_locked(sp); } spin_unlock(&clp->cl_lock); +} - list_for_each_entry_safe(sp, tmp, &doomed, so_lru) { +/** + * nfs4_purge_state_owners - Release all cached state owners + * @head: resulting list of state owners + * + * Frees a list of state owners that was generated by + * nfs4_purge_state_owners + */ +void nfs4_free_state_owners(struct list_head *head) +{ + struct nfs4_state_owner *sp, *tmp; + + list_for_each_entry_safe(sp, tmp, head, so_lru) { list_del(&sp->so_lru); nfs4_free_state_owner(sp); } @@ -1463,7 +1478,7 @@ void nfs_inode_find_state_and_recover(struct inode *inode, nfs4_schedule_state_manager(clp); } -static void nfs4_state_mark_open_context_bad(struct nfs4_state *state) +static void nfs4_state_mark_open_context_bad(struct nfs4_state *state, int err) { struct inode *inode = state->inode; struct nfs_inode *nfsi = NFS_I(inode); @@ -1474,6 +1489,8 @@ static void nfs4_state_mark_open_context_bad(struct nfs4_state *state) if (ctx->state != state) continue; set_bit(NFS_CONTEXT_BAD, &ctx->flags); + pr_warn("NFSv4: state recovery failed for open file %pd2, " + "error = %d\n", ctx->dentry, err); } rcu_read_unlock(); } @@ -1481,7 +1498,7 @@ static void nfs4_state_mark_open_context_bad(struct nfs4_state *state) static void nfs4_state_mark_recovery_failed(struct nfs4_state *state, int error) { set_bit(NFS_STATE_RECOVERY_FAILED, &state->flags); - nfs4_state_mark_open_context_bad(state); + nfs4_state_mark_open_context_bad(state, error); } @@ -1512,6 +1529,7 @@ restart: switch (status) { case 0: break; + case -ETIMEDOUT: case -ESTALE: case -NFS4ERR_ADMIN_REVOKED: case -NFS4ERR_STALE_STATEID: @@ -1605,6 +1623,7 @@ static int __nfs4_reclaim_open_state(struct nfs4_state_owner *sp, struct nfs4_st static int nfs4_reclaim_open_state(struct nfs4_state_owner *sp, const struct nfs4_state_recovery_ops *ops) { struct nfs4_state *state; + unsigned int loop = 0; int status = 0; /* Note: we rely on the sp->so_states list being ordered @@ -1631,8 +1650,10 @@ restart: switch (status) { default: - if (status >= 0) + if (status >= 0) { + loop = 0; break; + } printk(KERN_ERR "NFS: %s: unhandled error %d\n", __func__, status); /* Fall through */ case -ENOENT: @@ -1646,6 +1667,10 @@ restart: break; case -EAGAIN: ssleep(1); + if (loop++ < 10) { + set_bit(ops->state_flag_bit, &state->flags); + break; + } /* Fall through */ case -NFS4ERR_ADMIN_REVOKED: case -NFS4ERR_STALE_STATEID: @@ -1658,11 +1683,13 @@ restart: case -NFS4ERR_EXPIRED: case -NFS4ERR_NO_GRACE: nfs4_state_mark_reclaim_nograce(sp->so_server->nfs_client, state); + /* Fall through */ case -NFS4ERR_STALE_CLIENTID: case -NFS4ERR_BADSESSION: case -NFS4ERR_BADSLOT: case -NFS4ERR_BAD_HIGH_SLOT: case -NFS4ERR_CONN_NOT_BOUND_TO_SESSION: + case -ETIMEDOUT: goto out_err; } nfs4_put_open_state(state); @@ -1856,12 +1883,13 @@ static int nfs4_do_reclaim(struct nfs_client *clp, const struct nfs4_state_recov struct nfs4_state_owner *sp; struct nfs_server *server; struct rb_node *pos; + LIST_HEAD(freeme); int status = 0; restart: rcu_read_lock(); list_for_each_entry_rcu(server, &clp->cl_superblocks, client_link) { - nfs4_purge_state_owners(server); + nfs4_purge_state_owners(server, &freeme); spin_lock(&clp->cl_lock); for (pos = rb_first(&server->state_owners); pos != NULL; @@ -1890,6 +1918,7 @@ restart: spin_unlock(&clp->cl_lock); } rcu_read_unlock(); + nfs4_free_state_owners(&freeme); return 0; } @@ -1945,7 +1974,6 @@ static int nfs4_handle_reclaim_lease_error(struct nfs_client *clp, int status) return -EPERM; case -EACCES: case -NFS4ERR_DELAY: - case -ETIMEDOUT: case -EAGAIN: ssleep(1); break; @@ -2574,7 +2602,7 @@ static void nfs4_state_manager(struct nfs_client *clp) } /* Now recover expired state... */ - if (test_and_clear_bit(NFS4CLNT_RECLAIM_NOGRACE, &clp->cl_state)) { + if (test_bit(NFS4CLNT_RECLAIM_NOGRACE, &clp->cl_state)) { section = "reclaim nograce"; status = nfs4_do_reclaim(clp, clp->cl_mvops->nograce_recovery_ops); @@ -2582,6 +2610,7 @@ static void nfs4_state_manager(struct nfs_client *clp) continue; if (status < 0) goto out_error; + clear_bit(NFS4CLNT_RECLAIM_NOGRACE, &clp->cl_state); } nfs4_end_drain_session(clp); diff --git a/fs/nfs/pnfs.c b/fs/nfs/pnfs.c index 75bd5b552ba4..4525d5acae38 100644 --- a/fs/nfs/pnfs.c +++ b/fs/nfs/pnfs.c @@ -1903,12 +1903,6 @@ lookup_again: goto out_unlock; } - if (!nfs4_valid_open_stateid(ctx->state)) { - trace_pnfs_update_layout(ino, pos, count, iomode, lo, lseg, - PNFS_UPDATE_LAYOUT_INVALID_OPEN); - goto out_unlock; - } - /* * Choose a stateid for the LAYOUTGET. If we don't have a layout * stateid, or it has been invalidated, then we must use the open @@ -1939,6 +1933,7 @@ lookup_again: iomode == IOMODE_RW ? FMODE_WRITE : FMODE_READ, NULL, &stateid, NULL); if (status != 0) { + lseg = ERR_PTR(status); trace_pnfs_update_layout(ino, pos, count, iomode, lo, lseg, PNFS_UPDATE_LAYOUT_INVALID_OPEN); diff --git a/fs/nfs/super.c b/fs/nfs/super.c index 628631e2e34f..703f595dce90 100644 --- a/fs/nfs/super.c +++ b/fs/nfs/super.c @@ -2260,6 +2260,7 @@ nfs_compare_remount_data(struct nfs_server *nfss, data->acdirmin != nfss->acdirmin / HZ || data->acdirmax != nfss->acdirmax / HZ || data->timeo != (10U * nfss->client->cl_timeout->to_initval / HZ) || + (data->options & NFS_OPTION_FSCACHE) != (nfss->options & NFS_OPTION_FSCACHE) || data->nfs_server.port != nfss->port || data->nfs_server.addrlen != nfss->nfs_client->cl_addrlen || !rpc_cmp_addr((struct sockaddr *)&data->nfs_server.address, diff --git a/fs/seq_file.c b/fs/seq_file.c index 04f09689cd6d..1600034a929b 100644 --- a/fs/seq_file.c +++ b/fs/seq_file.c @@ -119,6 +119,7 @@ static int traverse(struct seq_file *m, loff_t offset) } if (seq_has_overflowed(m)) goto Eoverflow; + p = m->op->next(m, p, &m->index); if (pos + m->count > offset) { m->from = offset - pos; m->count -= m->from; @@ -126,7 +127,6 @@ static int traverse(struct seq_file *m, loff_t offset) } pos += m->count; m->count = 0; - p = m->op->next(m, p, &m->index); if (pos == offset) break; } diff --git a/fs/xfs/libxfs/xfs_bmap.c b/fs/xfs/libxfs/xfs_bmap.c index baf0b72c0a37..07aad70f3931 100644 --- a/fs/xfs/libxfs/xfs_bmap.c +++ b/fs/xfs/libxfs/xfs_bmap.c @@ -3835,15 +3835,28 @@ xfs_bmapi_read( XFS_STATS_INC(mp, xs_blk_mapr); ifp = XFS_IFORK_PTR(ip, whichfork); + if (!ifp) { + /* No CoW fork? Return a hole. */ + if (whichfork == XFS_COW_FORK) { + mval->br_startoff = bno; + mval->br_startblock = HOLESTARTBLOCK; + mval->br_blockcount = len; + mval->br_state = XFS_EXT_NORM; + *nmap = 1; + return 0; + } - /* No CoW fork? Return a hole. */ - if (whichfork == XFS_COW_FORK && !ifp) { - mval->br_startoff = bno; - mval->br_startblock = HOLESTARTBLOCK; - mval->br_blockcount = len; - mval->br_state = XFS_EXT_NORM; - *nmap = 1; - return 0; + /* + * A missing attr ifork implies that the inode says we're in + * extents or btree format but failed to pass the inode fork + * verifier while trying to load it. Treat that as a file + * corruption too. + */ +#ifdef DEBUG + xfs_alert(mp, "%s: inode %llu missing fork %d", + __func__, ip->i_ino, whichfork); +#endif /* DEBUG */ + return -EFSCORRUPTED; } if (!(ifp->if_flags & XFS_IFEXTENTS)) { diff --git a/fs/xfs/libxfs/xfs_da_btree.c b/fs/xfs/libxfs/xfs_da_btree.c index d1c77fd0815d..0bf56e94bfe9 100644 --- a/fs/xfs/libxfs/xfs_da_btree.c +++ b/fs/xfs/libxfs/xfs_da_btree.c @@ -487,10 +487,8 @@ xfs_da3_split( ASSERT(state->path.active == 0); oldblk = &state->path.blk[0]; error = xfs_da3_root_split(state, oldblk, addblk); - if (error) { - addblk->bp = NULL; - return error; /* GROT: dir is inconsistent */ - } + if (error) + goto out; /* * Update pointers to the node which used to be block 0 and just got @@ -505,7 +503,10 @@ xfs_da3_split( */ node = oldblk->bp->b_addr; if (node->hdr.info.forw) { - ASSERT(be32_to_cpu(node->hdr.info.forw) == addblk->blkno); + if (be32_to_cpu(node->hdr.info.forw) != addblk->blkno) { + error = -EFSCORRUPTED; + goto out; + } node = addblk->bp->b_addr; node->hdr.info.back = cpu_to_be32(oldblk->blkno); xfs_trans_log_buf(state->args->trans, addblk->bp, @@ -514,15 +515,19 @@ xfs_da3_split( } node = oldblk->bp->b_addr; if (node->hdr.info.back) { - ASSERT(be32_to_cpu(node->hdr.info.back) == addblk->blkno); + if (be32_to_cpu(node->hdr.info.back) != addblk->blkno) { + error = -EFSCORRUPTED; + goto out; + } node = addblk->bp->b_addr; node->hdr.info.forw = cpu_to_be32(oldblk->blkno); xfs_trans_log_buf(state->args->trans, addblk->bp, XFS_DA_LOGRANGE(node, &node->hdr.info, sizeof(node->hdr.info))); } +out: addblk->bp = NULL; - return 0; + return error; } /* diff --git a/fs/xfs/libxfs/xfs_dir2_node.c b/fs/xfs/libxfs/xfs_dir2_node.c index afcc6642690a..1fc44efc344d 100644 --- a/fs/xfs/libxfs/xfs_dir2_node.c +++ b/fs/xfs/libxfs/xfs_dir2_node.c @@ -741,7 +741,8 @@ xfs_dir2_leafn_lookup_for_entry( ents = dp->d_ops->leaf_ents_p(leaf); xfs_dir3_leaf_check(dp, bp); - ASSERT(leafhdr.count > 0); + if (leafhdr.count <= 0) + return -EFSCORRUPTED; /* * Look up the hash value in the leaf entries. diff --git a/fs/xfs/xfs_log.c b/fs/xfs/xfs_log.c index 00e9f5c388d3..7fc3c1ad36bc 100644 --- a/fs/xfs/xfs_log.c +++ b/fs/xfs/xfs_log.c @@ -429,10 +429,7 @@ xfs_log_reserve( ASSERT(*ticp == NULL); tic = xlog_ticket_alloc(log, unit_bytes, cnt, client, permanent, - KM_SLEEP | KM_MAYFAIL); - if (!tic) - return -ENOMEM; - + KM_SLEEP); *ticp = tic; xlog_grant_push_ail(log, tic->t_cnt ? tic->t_unit_res * tic->t_cnt |