summaryrefslogtreecommitdiff
path: root/fs
diff options
context:
space:
mode:
authorJens Axboe <jens.axboe@oracle.com>2010-02-22 15:48:51 +0300
committerJens Axboe <jens.axboe@oracle.com>2010-02-22 15:48:51 +0300
commitf11cbd74c5ff3614f6390b4de67a6ffdc614c378 (patch)
tree6a30920ade9eeaac5bf6d6263b5d09712e882eb0 /fs
parent429c42c9d246f5bda868495c09974312a0177328 (diff)
parentaea187c46f7d03ce985e55eb1398d0776a15b928 (diff)
downloadlinux-f11cbd74c5ff3614f6390b4de67a6ffdc614c378.tar.xz
Merge branch 'master' into for-2.6.34
Diffstat (limited to 'fs')
-rw-r--r--fs/9p/v9fs.c33
-rw-r--r--fs/9p/v9fs_vfs.h1
-rw-r--r--fs/9p/vfs_file.c19
-rw-r--r--fs/9p/vfs_inode.c43
-rw-r--r--fs/9p/vfs_super.c3
-rw-r--r--fs/affs/affs.h2
-rw-r--r--fs/affs/namei.c7
-rw-r--r--fs/affs/super.c31
-rw-r--r--fs/affs/symlink.c7
-rw-r--r--fs/befs/linuxvfs.c1
-rw-r--r--fs/bfs/inode.c43
-rw-r--r--fs/binfmt_aout.c1
-rw-r--r--fs/binfmt_elf.c27
-rw-r--r--fs/binfmt_elf_fdpic.c20
-rw-r--r--fs/binfmt_flat.c1
-rw-r--r--fs/binfmt_som.c1
-rw-r--r--fs/bio-integrity.c3
-rw-r--r--fs/bio.c9
-rw-r--r--fs/block_dev.c7
-rw-r--r--fs/btrfs/acl.c13
-rw-r--r--fs/btrfs/ctree.h1
-rw-r--r--fs/btrfs/disk-io.c13
-rw-r--r--fs/btrfs/extent-tree.c40
-rw-r--r--fs/btrfs/extent_io.c3
-rw-r--r--fs/btrfs/extent_map.c14
-rw-r--r--fs/btrfs/file.c104
-rw-r--r--fs/btrfs/inode.c72
-rw-r--r--fs/btrfs/ordered-data.c2
-rw-r--r--fs/btrfs/relocation.c7
-rw-r--r--fs/btrfs/super.c9
-rw-r--r--fs/btrfs/volumes.c17
-rw-r--r--fs/cachefiles/namei.c12
-rw-r--r--fs/cifs/CHANGES8
-rw-r--r--fs/cifs/cifs_dfs_ref.c3
-rw-r--r--fs/cifs/cifsfs.c3
-rw-r--r--fs/cifs/cifsfs.h2
-rw-r--r--fs/cifs/cifsglob.h1
-rw-r--r--fs/cifs/connect.c43
-rw-r--r--fs/cifs/inode.c12
-rw-r--r--fs/cifs/readdir.c8
-rw-r--r--fs/cifs/sess.c11
-rw-r--r--fs/compat_ioctl.c12
-rw-r--r--fs/configfs/symlink.c4
-rw-r--r--fs/debugfs/inode.c11
-rw-r--r--fs/ecryptfs/crypto.c4
-rw-r--r--fs/ecryptfs/file.c17
-rw-r--r--fs/ecryptfs/inode.c158
-rw-r--r--fs/ecryptfs/main.c4
-rw-r--r--fs/eventfd.c89
-rw-r--r--fs/exec.c57
-rw-r--r--fs/exofs/inode.c17
-rw-r--r--fs/exofs/pnfs.h10
-rw-r--r--fs/ext4/ext4.h11
-rw-r--r--fs/ext4/ext4_extents.h3
-rw-r--r--fs/ext4/extents.c70
-rw-r--r--fs/ext4/inode.c150
-rw-r--r--fs/ext4/super.c1
-rw-r--r--fs/fcntl.c102
-rw-r--r--fs/file_table.c1
-rw-r--r--fs/fs-writeback.c1
-rw-r--r--fs/fuse/file.c3
-rw-r--r--fs/gfs2/bmap.c2
-rw-r--r--fs/gfs2/file.c38
-rw-r--r--fs/gfs2/glock.c4
-rw-r--r--fs/gfs2/glock.h2
-rw-r--r--fs/gfs2/incore.h2
-rw-r--r--fs/gfs2/lock_dlm.c11
-rw-r--r--fs/gfs2/meta_io.c2
-rw-r--r--fs/gfs2/ops_fstype.c14
-rw-r--r--fs/gfs2/ops_inode.c9
-rw-r--r--fs/gfs2/rgrp.c8
-rw-r--r--fs/gfs2/super.c1
-rw-r--r--fs/gfs2/xattr.c21
-rw-r--r--fs/hppfs/hppfs.c18
-rw-r--r--fs/namei.c43
-rw-r--r--fs/namespace.c14
-rw-r--r--fs/nfs/dir.c1
-rw-r--r--fs/nfs/direct.c3
-rw-r--r--fs/nfs/file.c2
-rw-r--r--fs/nfs/fscache.c9
-rw-r--r--fs/nfs/inode.c4
-rw-r--r--fs/nfs/mount_clnt.c2
-rw-r--r--fs/nfs/nfs2xdr.c2
-rw-r--r--fs/nfs/nfs4_fs.h2
-rw-r--r--fs/nfs/nfs4proc.c78
-rw-r--r--fs/nfs/nfs4state.c2
-rw-r--r--fs/nfs/nfs4xdr.c6
-rw-r--r--fs/nfs/pagelist.c17
-rw-r--r--fs/nfs/super.c15
-rw-r--r--fs/nfs/sysctl.c2
-rw-r--r--fs/nfs/write.c6
-rw-r--r--fs/nfsd/export.c10
-rw-r--r--fs/nfsd/vfs.c8
-rw-r--r--fs/nilfs2/bmap.c4
-rw-r--r--fs/nilfs2/cpfile.c31
-rw-r--r--fs/nilfs2/direct.c17
-rw-r--r--fs/nilfs2/ioctl.c2
-rw-r--r--fs/nilfs2/segment.c2
-rw-r--r--fs/notify/inotify/inotify_fsnotify.c2
-rw-r--r--fs/notify/inotify/inotify_user.c4
-rw-r--r--fs/ocfs2/aops.c4
-rw-r--r--fs/ocfs2/buffer_head_io.c2
-rw-r--r--fs/ocfs2/cluster/heartbeat.c6
-rw-r--r--fs/ocfs2/cluster/tcp.c10
-rw-r--r--fs/ocfs2/cluster/tcp_internal.h4
-rw-r--r--fs/ocfs2/dlm/dlmapi.h2
-rw-r--r--fs/ocfs2/dlm/dlmast.c2
-rw-r--r--fs/ocfs2/dlm/dlmconvert.c2
-rw-r--r--fs/ocfs2/dlm/dlmdebug.c2
-rw-r--r--fs/ocfs2/dlm/dlmdomain.c2
-rw-r--r--fs/ocfs2/dlm/dlmlock.c2
-rw-r--r--fs/ocfs2/dlm/dlmmaster.c38
-rw-r--r--fs/ocfs2/dlm/dlmrecovery.c147
-rw-r--r--fs/ocfs2/dlm/dlmunlock.c8
-rw-r--r--fs/ocfs2/dlmglue.c85
-rw-r--r--fs/ocfs2/export.c2
-rw-r--r--fs/ocfs2/extent_map.c2
-rw-r--r--fs/ocfs2/file.c35
-rw-r--r--fs/ocfs2/inode.c4
-rw-r--r--fs/ocfs2/ioctl.c14
-rw-r--r--fs/ocfs2/journal.c2
-rw-r--r--fs/ocfs2/ocfs2.h4
-rw-r--r--fs/ocfs2/ocfs2_fs.h11
-rw-r--r--fs/ocfs2/refcounttree.c12
-rw-r--r--fs/ocfs2/stack_o2cb.c12
-rw-r--r--fs/ocfs2/super.c2
-rw-r--r--fs/ocfs2/symlink.c10
-rw-r--r--fs/ocfs2/uptodate.c4
-rw-r--r--fs/proc/array.c89
-rw-r--r--fs/proc/base.c25
-rw-r--r--fs/proc/task_mmu.c3
-rw-r--r--fs/quota/dquot.c3
-rw-r--r--fs/ramfs/file-nommu.c26
-rw-r--r--fs/reiserfs/bitmap.c3
-rw-r--r--fs/reiserfs/inode.c26
-rw-r--r--fs/reiserfs/ioctl.c3
-rw-r--r--fs/reiserfs/journal.c20
-rw-r--r--fs/reiserfs/lock.c9
-rw-r--r--fs/reiserfs/namei.c7
-rw-r--r--fs/reiserfs/xattr.c38
-rw-r--r--fs/reiserfs/xattr_acl.c2
-rw-r--r--fs/romfs/super.c1
-rw-r--r--fs/sysfs/dir.c14
-rw-r--r--fs/sysfs/inode.c35
-rw-r--r--fs/sysfs/sysfs.h15
-rw-r--r--fs/ubifs/gc.c96
-rw-r--r--fs/xfs/linux-2.6/xfs_acl.c3
-rw-r--r--fs/xfs/linux-2.6/xfs_super.c14
-rw-r--r--fs/xfs/linux-2.6/xfs_sync.c183
-rw-r--r--fs/xfs/linux-2.6/xfs_sync.h2
-rw-r--r--fs/xfs/linux-2.6/xfs_trace.h1145
-rw-r--r--fs/xfs/quota/xfs_qm_syscalls.c2
-rw-r--r--fs/xfs/xfs_alloc.c44
-rw-r--r--fs/xfs/xfs_dfrag.c106
-rw-r--r--fs/xfs/xfs_iget.c1
-rw-r--r--fs/xfs/xfs_inode.c17
-rw-r--r--fs/xfs/xfs_rtalloc.c2
-rw-r--r--fs/xfs/xfs_vnodeops.c93
158 files changed, 2478 insertions, 1781 deletions
diff --git a/fs/9p/v9fs.c b/fs/9p/v9fs.c
index cf62b05e296a..7d6c2139891d 100644
--- a/fs/9p/v9fs.c
+++ b/fs/9p/v9fs.c
@@ -84,7 +84,7 @@ static const match_table_t tokens = {
static int v9fs_parse_options(struct v9fs_session_info *v9ses, char *opts)
{
- char *options;
+ char *options, *tmp_options;
substring_t args[MAX_OPT_ARGS];
char *p;
int option = 0;
@@ -102,9 +102,12 @@ static int v9fs_parse_options(struct v9fs_session_info *v9ses, char *opts)
if (!opts)
return 0;
- options = kstrdup(opts, GFP_KERNEL);
- if (!options)
+ tmp_options = kstrdup(opts, GFP_KERNEL);
+ if (!tmp_options) {
+ ret = -ENOMEM;
goto fail_option_alloc;
+ }
+ options = tmp_options;
while ((p = strsep(&options, ",")) != NULL) {
int token;
@@ -159,8 +162,12 @@ static int v9fs_parse_options(struct v9fs_session_info *v9ses, char *opts)
break;
case Opt_cache:
s = match_strdup(&args[0]);
- if (!s)
- goto fail_option_alloc;
+ if (!s) {
+ ret = -ENOMEM;
+ P9_DPRINTK(P9_DEBUG_ERROR,
+ "problem allocating copy of cache arg\n");
+ goto free_and_return;
+ }
if (strcmp(s, "loose") == 0)
v9ses->cache = CACHE_LOOSE;
@@ -173,8 +180,12 @@ static int v9fs_parse_options(struct v9fs_session_info *v9ses, char *opts)
case Opt_access:
s = match_strdup(&args[0]);
- if (!s)
- goto fail_option_alloc;
+ if (!s) {
+ ret = -ENOMEM;
+ P9_DPRINTK(P9_DEBUG_ERROR,
+ "problem allocating copy of access arg\n");
+ goto free_and_return;
+ }
v9ses->flags &= ~V9FS_ACCESS_MASK;
if (strcmp(s, "user") == 0)
@@ -194,13 +205,11 @@ static int v9fs_parse_options(struct v9fs_session_info *v9ses, char *opts)
continue;
}
}
- kfree(options);
- return ret;
+free_and_return:
+ kfree(tmp_options);
fail_option_alloc:
- P9_DPRINTK(P9_DEBUG_ERROR,
- "failed to allocate copy of option argument\n");
- return -ENOMEM;
+ return ret;
}
/**
diff --git a/fs/9p/v9fs_vfs.h b/fs/9p/v9fs_vfs.h
index 3a7560e35865..ed835836e0dc 100644
--- a/fs/9p/v9fs_vfs.h
+++ b/fs/9p/v9fs_vfs.h
@@ -60,3 +60,4 @@ void v9fs_dentry_release(struct dentry *);
int v9fs_uflags2omode(int uflags, int extended);
ssize_t v9fs_file_readn(struct file *, char *, char __user *, u32, u64);
+void v9fs_blank_wstat(struct p9_wstat *wstat);
diff --git a/fs/9p/vfs_file.c b/fs/9p/vfs_file.c
index 3902bf43a088..74a0461a9ac0 100644
--- a/fs/9p/vfs_file.c
+++ b/fs/9p/vfs_file.c
@@ -257,6 +257,23 @@ v9fs_file_write(struct file *filp, const char __user * data,
return total;
}
+static int v9fs_file_fsync(struct file *filp, struct dentry *dentry,
+ int datasync)
+{
+ struct p9_fid *fid;
+ struct p9_wstat wstat;
+ int retval;
+
+ P9_DPRINTK(P9_DEBUG_VFS, "filp %p dentry %p datasync %x\n", filp,
+ dentry, datasync);
+
+ fid = filp->private_data;
+ v9fs_blank_wstat(&wstat);
+
+ retval = p9_client_wstat(fid, &wstat);
+ return retval;
+}
+
static const struct file_operations v9fs_cached_file_operations = {
.llseek = generic_file_llseek,
.read = do_sync_read,
@@ -266,6 +283,7 @@ static const struct file_operations v9fs_cached_file_operations = {
.release = v9fs_dir_release,
.lock = v9fs_file_lock,
.mmap = generic_file_readonly_mmap,
+ .fsync = v9fs_file_fsync,
};
const struct file_operations v9fs_file_operations = {
@@ -276,4 +294,5 @@ const struct file_operations v9fs_file_operations = {
.release = v9fs_dir_release,
.lock = v9fs_file_lock,
.mmap = generic_file_readonly_mmap,
+ .fsync = v9fs_file_fsync,
};
diff --git a/fs/9p/vfs_inode.c b/fs/9p/vfs_inode.c
index 18f74ec4dce9..a407fa3388c0 100644
--- a/fs/9p/vfs_inode.c
+++ b/fs/9p/vfs_inode.c
@@ -176,7 +176,7 @@ int v9fs_uflags2omode(int uflags, int extended)
*
*/
-static void
+void
v9fs_blank_wstat(struct p9_wstat *wstat)
{
wstat->type = ~0;
@@ -1001,44 +1001,6 @@ done:
}
/**
- * v9fs_vfs_readlink - read a symlink's location
- * @dentry: dentry for symlink
- * @buffer: buffer to load symlink location into
- * @buflen: length of buffer
- *
- */
-
-static int v9fs_vfs_readlink(struct dentry *dentry, char __user * buffer,
- int buflen)
-{
- int retval;
- int ret;
- char *link = __getname();
-
- if (unlikely(!link))
- return -ENOMEM;
-
- if (buflen > PATH_MAX)
- buflen = PATH_MAX;
-
- P9_DPRINTK(P9_DEBUG_VFS, " dentry: %s (%p)\n", dentry->d_name.name,
- dentry);
-
- retval = v9fs_readlink(dentry, link, buflen);
-
- if (retval > 0) {
- if ((ret = copy_to_user(buffer, link, retval)) != 0) {
- P9_DPRINTK(P9_DEBUG_ERROR,
- "problem copying to user: %d\n", ret);
- retval = ret;
- }
- }
-
- __putname(link);
- return retval;
-}
-
-/**
* v9fs_vfs_follow_link - follow a symlink path
* @dentry: dentry for symlink
* @nd: nameidata
@@ -1230,7 +1192,6 @@ static const struct inode_operations v9fs_dir_inode_operations_ext = {
.rmdir = v9fs_vfs_rmdir,
.mknod = v9fs_vfs_mknod,
.rename = v9fs_vfs_rename,
- .readlink = v9fs_vfs_readlink,
.getattr = v9fs_vfs_getattr,
.setattr = v9fs_vfs_setattr,
};
@@ -1253,7 +1214,7 @@ static const struct inode_operations v9fs_file_inode_operations = {
};
static const struct inode_operations v9fs_symlink_inode_operations = {
- .readlink = v9fs_vfs_readlink,
+ .readlink = generic_readlink,
.follow_link = v9fs_vfs_follow_link,
.put_link = v9fs_vfs_put_link,
.getattr = v9fs_vfs_getattr,
diff --git a/fs/9p/vfs_super.c b/fs/9p/vfs_super.c
index 14a86448572c..69357c0d9899 100644
--- a/fs/9p/vfs_super.c
+++ b/fs/9p/vfs_super.c
@@ -188,7 +188,8 @@ static void v9fs_kill_super(struct super_block *s)
P9_DPRINTK(P9_DEBUG_VFS, " %p\n", s);
- v9fs_dentry_release(s->s_root); /* clunk root */
+ if (s->s_root)
+ v9fs_dentry_release(s->s_root); /* clunk root */
kill_anon_super(s);
diff --git a/fs/affs/affs.h b/fs/affs/affs.h
index e511dc621a2e..0e40caaba456 100644
--- a/fs/affs/affs.h
+++ b/fs/affs/affs.h
@@ -106,8 +106,8 @@ struct affs_sb_info {
u32 s_last_bmap;
struct buffer_head *s_bmap_bh;
char *s_prefix; /* Prefix for volumes and assigns. */
- int s_prefix_len; /* Length of prefix. */
char s_volume[32]; /* Volume prefix for absolute symlinks. */
+ spinlock_t symlink_lock; /* protects the previous two */
};
#define SF_INTL 0x0001 /* International filesystem. */
diff --git a/fs/affs/namei.c b/fs/affs/namei.c
index 960d336ec694..d70bbbac6b7b 100644
--- a/fs/affs/namei.c
+++ b/fs/affs/namei.c
@@ -341,10 +341,13 @@ affs_symlink(struct inode *dir, struct dentry *dentry, const char *symname)
p = (char *)AFFS_HEAD(bh)->table;
lc = '/';
if (*symname == '/') {
+ struct affs_sb_info *sbi = AFFS_SB(sb);
while (*symname == '/')
symname++;
- while (AFFS_SB(sb)->s_volume[i]) /* Cannot overflow */
- *p++ = AFFS_SB(sb)->s_volume[i++];
+ spin_lock(&sbi->symlink_lock);
+ while (sbi->s_volume[i]) /* Cannot overflow */
+ *p++ = sbi->s_volume[i++];
+ spin_unlock(&sbi->symlink_lock);
}
while (i < maxlen && (c = *symname++)) {
if (c == '.' && lc == '/' && *symname == '.' && symname[1] == '/') {
diff --git a/fs/affs/super.c b/fs/affs/super.c
index 104fdcb3a7fc..d41e9673cd97 100644
--- a/fs/affs/super.c
+++ b/fs/affs/super.c
@@ -203,7 +203,7 @@ parse_options(char *options, uid_t *uid, gid_t *gid, int *mode, int *reserved, s
switch (token) {
case Opt_bs:
if (match_int(&args[0], &n))
- return -EINVAL;
+ return 0;
if (n != 512 && n != 1024 && n != 2048
&& n != 4096) {
printk ("AFFS: Invalid blocksize (512, 1024, 2048, 4096 allowed)\n");
@@ -213,7 +213,7 @@ parse_options(char *options, uid_t *uid, gid_t *gid, int *mode, int *reserved, s
break;
case Opt_mode:
if (match_octal(&args[0], &option))
- return 1;
+ return 0;
*mode = option & 0777;
*mount_opts |= SF_SETMODE;
break;
@@ -221,8 +221,6 @@ parse_options(char *options, uid_t *uid, gid_t *gid, int *mode, int *reserved, s
*mount_opts |= SF_MUFS;
break;
case Opt_prefix:
- /* Free any previous prefix */
- kfree(*prefix);
*prefix = match_strdup(&args[0]);
if (!*prefix)
return 0;
@@ -233,21 +231,21 @@ parse_options(char *options, uid_t *uid, gid_t *gid, int *mode, int *reserved, s
break;
case Opt_reserved:
if (match_int(&args[0], reserved))
- return 1;
+ return 0;
break;
case Opt_root:
if (match_int(&args[0], root))
- return 1;
+ return 0;
break;
case Opt_setgid:
if (match_int(&args[0], &option))
- return 1;
+ return 0;
*gid = option;
*mount_opts |= SF_SETGID;
break;
case Opt_setuid:
if (match_int(&args[0], &option))
- return -EINVAL;
+ return 0;
*uid = option;
*mount_opts |= SF_SETUID;
break;
@@ -311,11 +309,14 @@ static int affs_fill_super(struct super_block *sb, void *data, int silent)
return -ENOMEM;
sb->s_fs_info = sbi;
mutex_init(&sbi->s_bmlock);
+ spin_lock_init(&sbi->symlink_lock);
if (!parse_options(data,&uid,&gid,&i,&reserved,&root_block,
&blocksize,&sbi->s_prefix,
sbi->s_volume, &mount_flags)) {
printk(KERN_ERR "AFFS: Error parsing options\n");
+ kfree(sbi->s_prefix);
+ kfree(sbi);
return -EINVAL;
}
/* N.B. after this point s_prefix must be released */
@@ -516,14 +517,18 @@ affs_remount(struct super_block *sb, int *flags, char *data)
unsigned long mount_flags;
int res = 0;
char *new_opts = kstrdup(data, GFP_KERNEL);
+ char volume[32];
+ char *prefix = NULL;
pr_debug("AFFS: remount(flags=0x%x,opts=\"%s\")\n",*flags,data);
*flags |= MS_NODIRATIME;
+ memcpy(volume, sbi->s_volume, 32);
if (!parse_options(data, &uid, &gid, &mode, &reserved, &root_block,
- &blocksize, &sbi->s_prefix, sbi->s_volume,
+ &blocksize, &prefix, volume,
&mount_flags)) {
+ kfree(prefix);
kfree(new_opts);
return -EINVAL;
}
@@ -534,6 +539,14 @@ affs_remount(struct super_block *sb, int *flags, char *data)
sbi->s_mode = mode;
sbi->s_uid = uid;
sbi->s_gid = gid;
+ /* protect against readers */
+ spin_lock(&sbi->symlink_lock);
+ if (prefix) {
+ kfree(sbi->s_prefix);
+ sbi->s_prefix = prefix;
+ }
+ memcpy(sbi->s_volume, volume, 32);
+ spin_unlock(&sbi->symlink_lock);
if ((*flags & MS_RDONLY) == (sb->s_flags & MS_RDONLY)) {
unlock_kernel();
diff --git a/fs/affs/symlink.c b/fs/affs/symlink.c
index 41782539c907..ee00f08c4f53 100644
--- a/fs/affs/symlink.c
+++ b/fs/affs/symlink.c
@@ -20,7 +20,6 @@ static int affs_symlink_readpage(struct file *file, struct page *page)
int i, j;
char c;
char lc;
- char *pf;
pr_debug("AFFS: follow_link(ino=%lu)\n",inode->i_ino);
@@ -32,11 +31,15 @@ static int affs_symlink_readpage(struct file *file, struct page *page)
j = 0;
lf = (struct slink_front *)bh->b_data;
lc = 0;
- pf = AFFS_SB(inode->i_sb)->s_prefix ? AFFS_SB(inode->i_sb)->s_prefix : "/";
if (strchr(lf->symname,':')) { /* Handle assign or volume name */
+ struct affs_sb_info *sbi = AFFS_SB(inode->i_sb);
+ char *pf;
+ spin_lock(&sbi->symlink_lock);
+ pf = sbi->s_prefix ? sbi->s_prefix : "/";
while (i < 1023 && (c = pf[i]))
link[i++] = c;
+ spin_unlock(&sbi->symlink_lock);
while (i < 1023 && lf->symname[j] != ':')
link[i++] = lf->symname[j++];
if (i < 1023)
diff --git a/fs/befs/linuxvfs.c b/fs/befs/linuxvfs.c
index 33baf27fac78..34ddda888e63 100644
--- a/fs/befs/linuxvfs.c
+++ b/fs/befs/linuxvfs.c
@@ -873,6 +873,7 @@ befs_fill_super(struct super_block *sb, void *data, int silent)
brelse(bh);
unacquire_priv_sbp:
+ kfree(befs_sb->mount_opts.iocharset);
kfree(sb->s_fs_info);
unacquire_none:
diff --git a/fs/bfs/inode.c b/fs/bfs/inode.c
index 6f60336c6628..8f3d9fd89604 100644
--- a/fs/bfs/inode.c
+++ b/fs/bfs/inode.c
@@ -353,35 +353,35 @@ static int bfs_fill_super(struct super_block *s, void *data, int silent)
struct inode *inode;
unsigned i, imap_len;
struct bfs_sb_info *info;
- long ret = -EINVAL;
+ int ret = -EINVAL;
unsigned long i_sblock, i_eblock, i_eoff, s_size;
info = kzalloc(sizeof(*info), GFP_KERNEL);
if (!info)
return -ENOMEM;
+ mutex_init(&info->bfs_lock);
s->s_fs_info = info;
sb_set_blocksize(s, BFS_BSIZE);
- bh = sb_bread(s, 0);
- if(!bh)
+ info->si_sbh = sb_bread(s, 0);
+ if (!info->si_sbh)
goto out;
- bfs_sb = (struct bfs_super_block *)bh->b_data;
+ bfs_sb = (struct bfs_super_block *)info->si_sbh->b_data;
if (le32_to_cpu(bfs_sb->s_magic) != BFS_MAGIC) {
if (!silent)
printf("No BFS filesystem on %s (magic=%08x)\n",
s->s_id, le32_to_cpu(bfs_sb->s_magic));
- goto out;
+ goto out1;
}
if (BFS_UNCLEAN(bfs_sb, s) && !silent)
printf("%s is unclean, continuing\n", s->s_id);
s->s_magic = BFS_MAGIC;
- info->si_sbh = bh;
if (le32_to_cpu(bfs_sb->s_start) > le32_to_cpu(bfs_sb->s_end)) {
printf("Superblock is corrupted\n");
- goto out;
+ goto out1;
}
info->si_lasti = (le32_to_cpu(bfs_sb->s_start) - BFS_BSIZE) /
@@ -390,7 +390,7 @@ static int bfs_fill_super(struct super_block *s, void *data, int silent)
imap_len = (info->si_lasti / 8) + 1;
info->si_imap = kzalloc(imap_len, GFP_KERNEL);
if (!info->si_imap)
- goto out;
+ goto out1;
for (i = 0; i < BFS_ROOT_INO; i++)
set_bit(i, info->si_imap);
@@ -398,15 +398,13 @@ static int bfs_fill_super(struct super_block *s, void *data, int silent)
inode = bfs_iget(s, BFS_ROOT_INO);
if (IS_ERR(inode)) {
ret = PTR_ERR(inode);
- kfree(info->si_imap);
- goto out;
+ goto out2;
}
s->s_root = d_alloc_root(inode);
if (!s->s_root) {
iput(inode);
ret = -ENOMEM;
- kfree(info->si_imap);
- goto out;
+ goto out2;
}
info->si_blocks = (le32_to_cpu(bfs_sb->s_end) + 1) >> BFS_BSIZE_BITS;
@@ -419,10 +417,8 @@ static int bfs_fill_super(struct super_block *s, void *data, int silent)
bh = sb_bread(s, info->si_blocks - 1);
if (!bh) {
printf("Last block not available: %lu\n", info->si_blocks - 1);
- iput(inode);
ret = -EIO;
- kfree(info->si_imap);
- goto out;
+ goto out3;
}
brelse(bh);
@@ -459,11 +455,8 @@ static int bfs_fill_super(struct super_block *s, void *data, int silent)
printf("Inode 0x%08x corrupted\n", i);
brelse(bh);
- s->s_root = NULL;
- kfree(info->si_imap);
- kfree(info);
- s->s_fs_info = NULL;
- return -EIO;
+ ret = -EIO;
+ goto out3;
}
if (!di->i_ino) {
@@ -483,11 +476,17 @@ static int bfs_fill_super(struct super_block *s, void *data, int silent)
s->s_dirt = 1;
}
dump_imap("read_super", s);
- mutex_init(&info->bfs_lock);
return 0;
+out3:
+ dput(s->s_root);
+ s->s_root = NULL;
+out2:
+ kfree(info->si_imap);
+out1:
+ brelse(info->si_sbh);
out:
- brelse(bh);
+ mutex_destroy(&info->bfs_lock);
kfree(info);
s->s_fs_info = NULL;
return ret;
diff --git a/fs/binfmt_aout.c b/fs/binfmt_aout.c
index 346b69405363..fdd397099172 100644
--- a/fs/binfmt_aout.c
+++ b/fs/binfmt_aout.c
@@ -264,6 +264,7 @@ static int load_aout_binary(struct linux_binprm * bprm, struct pt_regs * regs)
#else
set_personality(PER_LINUX);
#endif
+ setup_new_exec(bprm);
current->mm->end_code = ex.a_text +
(current->mm->start_code = N_TXTADDR(ex));
diff --git a/fs/binfmt_elf.c b/fs/binfmt_elf.c
index edd90c49003c..fd5b2ea5d299 100644
--- a/fs/binfmt_elf.c
+++ b/fs/binfmt_elf.c
@@ -662,27 +662,6 @@ static int load_elf_binary(struct linux_binprm *bprm, struct pt_regs *regs)
if (elf_interpreter[elf_ppnt->p_filesz - 1] != '\0')
goto out_free_interp;
- /*
- * The early SET_PERSONALITY here is so that the lookup
- * for the interpreter happens in the namespace of the
- * to-be-execed image. SET_PERSONALITY can select an
- * alternate root.
- *
- * However, SET_PERSONALITY is NOT allowed to switch
- * this task into the new images's memory mapping
- * policy - that is, TASK_SIZE must still evaluate to
- * that which is appropriate to the execing application.
- * This is because exit_mmap() needs to have TASK_SIZE
- * evaluate to the size of the old image.
- *
- * So if (say) a 64-bit application is execing a 32-bit
- * application it is the architecture's responsibility
- * to defer changing the value of TASK_SIZE until the
- * switch really is going to happen - do this in
- * flush_thread(). - akpm
- */
- SET_PERSONALITY(loc->elf_ex);
-
interpreter = open_exec(elf_interpreter);
retval = PTR_ERR(interpreter);
if (IS_ERR(interpreter))
@@ -730,9 +709,6 @@ static int load_elf_binary(struct linux_binprm *bprm, struct pt_regs *regs)
/* Verify the interpreter has a valid arch */
if (!elf_check_arch(&loc->interp_elf_ex))
goto out_free_dentry;
- } else {
- /* Executables without an interpreter also need a personality */
- SET_PERSONALITY(loc->elf_ex);
}
/* Flush all traces of the currently running executable */
@@ -752,7 +728,8 @@ static int load_elf_binary(struct linux_binprm *bprm, struct pt_regs *regs)
if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
current->flags |= PF_RANDOMIZE;
- arch_pick_mmap_layout(current->mm);
+
+ setup_new_exec(bprm);
/* Do this so that we can load the interpreter, if need be. We will
change some of these later */
diff --git a/fs/binfmt_elf_fdpic.c b/fs/binfmt_elf_fdpic.c
index c25256a5c5b0..18d77297ccc8 100644
--- a/fs/binfmt_elf_fdpic.c
+++ b/fs/binfmt_elf_fdpic.c
@@ -171,6 +171,9 @@ static int load_elf_fdpic_binary(struct linux_binprm *bprm,
#ifdef ELF_FDPIC_PLAT_INIT
unsigned long dynaddr;
#endif
+#ifndef CONFIG_MMU
+ unsigned long stack_prot;
+#endif
struct file *interpreter = NULL; /* to shut gcc up */
char *interpreter_name = NULL;
int executable_stack;
@@ -316,6 +319,11 @@ static int load_elf_fdpic_binary(struct linux_binprm *bprm,
* defunct, deceased, etc. after this point we have to exit via
* error_kill */
set_personality(PER_LINUX_FDPIC);
+ if (elf_read_implies_exec(&exec_params.hdr, executable_stack))
+ current->personality |= READ_IMPLIES_EXEC;
+
+ setup_new_exec(bprm);
+
set_binfmt(&elf_fdpic_format);
current->mm->start_code = 0;
@@ -377,9 +385,13 @@ static int load_elf_fdpic_binary(struct linux_binprm *bprm,
if (stack_size < PAGE_SIZE * 2)
stack_size = PAGE_SIZE * 2;
+ stack_prot = PROT_READ | PROT_WRITE;
+ if (executable_stack == EXSTACK_ENABLE_X ||
+ (executable_stack == EXSTACK_DEFAULT && VM_STACK_FLAGS & VM_EXEC))
+ stack_prot |= PROT_EXEC;
+
down_write(&current->mm->mmap_sem);
- current->mm->start_brk = do_mmap(NULL, 0, stack_size,
- PROT_READ | PROT_WRITE | PROT_EXEC,
+ current->mm->start_brk = do_mmap(NULL, 0, stack_size, stack_prot,
MAP_PRIVATE | MAP_ANONYMOUS |
MAP_UNINITIALIZED | MAP_GROWSDOWN,
0);
@@ -1798,11 +1810,11 @@ static int elf_fdpic_core_dump(struct coredump_params *cprm)
ELF_CORE_WRITE_EXTRA_DATA;
#endif
- if (file->f_pos != offset) {
+ if (cprm->file->f_pos != offset) {
/* Sanity check */
printk(KERN_WARNING
"elf_core_dump: file->f_pos (%lld) != offset (%lld)\n",
- file->f_pos, offset);
+ cprm->file->f_pos, offset);
}
end_coredump:
diff --git a/fs/binfmt_flat.c b/fs/binfmt_flat.c
index d4a00ea1054c..42c6b4a54445 100644
--- a/fs/binfmt_flat.c
+++ b/fs/binfmt_flat.c
@@ -519,6 +519,7 @@ static int load_flat_file(struct linux_binprm * bprm,
/* OK, This is the point of no return */
set_personality(PER_LINUX_32BIT);
+ setup_new_exec(bprm);
}
/*
diff --git a/fs/binfmt_som.c b/fs/binfmt_som.c
index 2a9b5330cc5e..cc8560f6c9b0 100644
--- a/fs/binfmt_som.c
+++ b/fs/binfmt_som.c
@@ -227,6 +227,7 @@ load_som_binary(struct linux_binprm * bprm, struct pt_regs * regs)
/* OK, This is the point of no return */
current->flags &= ~PF_FORKNOEXEC;
current->personality = PER_HPUX;
+ setup_new_exec(bprm);
/* Set the task size for HP-UX processes such that
* the gateway page is outside the address space.
diff --git a/fs/bio-integrity.c b/fs/bio-integrity.c
index 49a34e7f7306..a16f29e888cd 100644
--- a/fs/bio-integrity.c
+++ b/fs/bio-integrity.c
@@ -61,7 +61,7 @@ static inline unsigned int vecs_to_idx(unsigned int nr)
static inline int use_bip_pool(unsigned int idx)
{
- if (idx == BIOVEC_NR_POOLS)
+ if (idx == BIOVEC_MAX_IDX)
return 1;
return 0;
@@ -95,6 +95,7 @@ struct bio_integrity_payload *bio_integrity_alloc_bioset(struct bio *bio,
/* Use mempool if lower order alloc failed or max vecs were requested */
if (bip == NULL) {
+ idx = BIOVEC_MAX_IDX; /* so we free the payload properly later */
bip = mempool_alloc(bs->bio_integrity_pool, gfp_mask);
if (unlikely(bip == NULL)) {
diff --git a/fs/bio.c b/fs/bio.c
index 76e6713abf94..88094afc29ea 100644
--- a/fs/bio.c
+++ b/fs/bio.c
@@ -78,7 +78,7 @@ static struct kmem_cache *bio_find_or_create_slab(unsigned int extra_size)
i = 0;
while (i < bio_slab_nr) {
- struct bio_slab *bslab = &bio_slabs[i];
+ bslab = &bio_slabs[i];
if (!bslab->slab && entry == -1)
entry = i;
@@ -542,13 +542,18 @@ static int __bio_add_page(struct request_queue *q, struct bio *bio, struct page
if (page == prev->bv_page &&
offset == prev->bv_offset + prev->bv_len) {
+ unsigned int prev_bv_len = prev->bv_len;
prev->bv_len += len;
if (q->merge_bvec_fn) {
struct bvec_merge_data bvm = {
+ /* prev_bvec is already charged in
+ bi_size, discharge it in order to
+ simulate merging updated prev_bvec
+ as new bvec. */
.bi_bdev = bio->bi_bdev,
.bi_sector = bio->bi_sector,
- .bi_size = bio->bi_size,
+ .bi_size = bio->bi_size - prev_bv_len,
.bi_rw = bio->bi_rw,
};
diff --git a/fs/block_dev.c b/fs/block_dev.c
index 73d6a735b8f3..d11d0289f3d2 100644
--- a/fs/block_dev.c
+++ b/fs/block_dev.c
@@ -246,7 +246,8 @@ struct super_block *freeze_bdev(struct block_device *bdev)
if (!sb)
goto out;
if (sb->s_flags & MS_RDONLY) {
- deactivate_locked_super(sb);
+ sb->s_frozen = SB_FREEZE_TRANS;
+ up_write(&sb->s_umount);
mutex_unlock(&bdev->bd_fsfreeze_mutex);
return sb;
}
@@ -307,7 +308,7 @@ int thaw_bdev(struct block_device *bdev, struct super_block *sb)
BUG_ON(sb->s_bdev != bdev);
down_write(&sb->s_umount);
if (sb->s_flags & MS_RDONLY)
- goto out_deactivate;
+ goto out_unfrozen;
if (sb->s_op->unfreeze_fs) {
error = sb->s_op->unfreeze_fs(sb);
@@ -321,11 +322,11 @@ int thaw_bdev(struct block_device *bdev, struct super_block *sb)
}
}
+out_unfrozen:
sb->s_frozen = SB_UNFROZEN;
smp_wmb();
wake_up(&sb->s_wait_unfrozen);
-out_deactivate:
if (sb)
deactivate_locked_super(sb);
out_unlock:
diff --git a/fs/btrfs/acl.c b/fs/btrfs/acl.c
index 2e9e69987a82..6df6d6ed74fd 100644
--- a/fs/btrfs/acl.c
+++ b/fs/btrfs/acl.c
@@ -112,12 +112,14 @@ static int btrfs_set_acl(struct btrfs_trans_handle *trans,
switch (type) {
case ACL_TYPE_ACCESS:
mode = inode->i_mode;
- ret = posix_acl_equiv_mode(acl, &mode);
- if (ret < 0)
- return ret;
- ret = 0;
- inode->i_mode = mode;
name = POSIX_ACL_XATTR_ACCESS;
+ if (acl) {
+ ret = posix_acl_equiv_mode(acl, &mode);
+ if (ret < 0)
+ return ret;
+ inode->i_mode = mode;
+ }
+ ret = 0;
break;
case ACL_TYPE_DEFAULT:
if (!S_ISDIR(inode->i_mode))
@@ -242,6 +244,7 @@ int btrfs_init_acl(struct btrfs_trans_handle *trans,
ACL_TYPE_ACCESS);
}
}
+ posix_acl_release(clone);
}
failed:
posix_acl_release(acl);
diff --git a/fs/btrfs/ctree.h b/fs/btrfs/ctree.h
index 9f806dd04c27..2aa8ec6a0981 100644
--- a/fs/btrfs/ctree.h
+++ b/fs/btrfs/ctree.h
@@ -1161,6 +1161,7 @@ struct btrfs_root {
#define BTRFS_MOUNT_SSD_SPREAD (1 << 8)
#define BTRFS_MOUNT_NOSSD (1 << 9)
#define BTRFS_MOUNT_DISCARD (1 << 10)
+#define BTRFS_MOUNT_FORCE_COMPRESS (1 << 11)
#define btrfs_clear_opt(o, opt) ((o) &= ~BTRFS_MOUNT_##opt)
#define btrfs_set_opt(o, opt) ((o) |= BTRFS_MOUNT_##opt)
diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c
index 009e3bd18f23..2b59201b955c 100644
--- a/fs/btrfs/disk-io.c
+++ b/fs/btrfs/disk-io.c
@@ -1982,7 +1982,12 @@ struct btrfs_root *open_ctree(struct super_block *sb,
if (!(sb->s_flags & MS_RDONLY)) {
ret = btrfs_recover_relocation(tree_root);
- BUG_ON(ret);
+ if (ret < 0) {
+ printk(KERN_WARNING
+ "btrfs: failed to recover relocation\n");
+ err = -EINVAL;
+ goto fail_trans_kthread;
+ }
}
location.objectid = BTRFS_FS_TREE_OBJECTID;
@@ -1993,6 +1998,12 @@ struct btrfs_root *open_ctree(struct super_block *sb,
if (!fs_info->fs_root)
goto fail_trans_kthread;
+ if (!(sb->s_flags & MS_RDONLY)) {
+ down_read(&fs_info->cleanup_work_sem);
+ btrfs_orphan_cleanup(fs_info->fs_root);
+ up_read(&fs_info->cleanup_work_sem);
+ }
+
return tree_root;
fail_trans_kthread:
diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c
index 56e50137d0e6..559f72489b3b 100644
--- a/fs/btrfs/extent-tree.c
+++ b/fs/btrfs/extent-tree.c
@@ -83,6 +83,17 @@ static int block_group_bits(struct btrfs_block_group_cache *cache, u64 bits)
return (cache->flags & bits) == bits;
}
+void btrfs_get_block_group(struct btrfs_block_group_cache *cache)
+{
+ atomic_inc(&cache->count);
+}
+
+void btrfs_put_block_group(struct btrfs_block_group_cache *cache)
+{
+ if (atomic_dec_and_test(&cache->count))
+ kfree(cache);
+}
+
/*
* this adds the block group to the fs_info rb tree for the block group
* cache
@@ -156,7 +167,7 @@ block_group_cache_tree_search(struct btrfs_fs_info *info, u64 bytenr,
}
}
if (ret)
- atomic_inc(&ret->count);
+ btrfs_get_block_group(ret);
spin_unlock(&info->block_group_cache_lock);
return ret;
@@ -407,6 +418,8 @@ err:
put_caching_control(caching_ctl);
atomic_dec(&block_group->space_info->caching_threads);
+ btrfs_put_block_group(block_group);
+
return 0;
}
@@ -447,6 +460,7 @@ static int cache_block_group(struct btrfs_block_group_cache *cache)
up_write(&fs_info->extent_commit_sem);
atomic_inc(&cache->space_info->caching_threads);
+ btrfs_get_block_group(cache);
tsk = kthread_run(caching_kthread, cache, "btrfs-cache-%llu\n",
cache->key.objectid);
@@ -486,12 +500,6 @@ struct btrfs_block_group_cache *btrfs_lookup_block_group(
return cache;
}
-void btrfs_put_block_group(struct btrfs_block_group_cache *cache)
-{
- if (atomic_dec_and_test(&cache->count))
- kfree(cache);
-}
-
static struct btrfs_space_info *__find_space_info(struct btrfs_fs_info *info,
u64 flags)
{
@@ -2582,7 +2590,7 @@ next_block_group(struct btrfs_root *root,
if (node) {
cache = rb_entry(node, struct btrfs_block_group_cache,
cache_node);
- atomic_inc(&cache->count);
+ btrfs_get_block_group(cache);
} else
cache = NULL;
spin_unlock(&root->fs_info->block_group_cache_lock);
@@ -4227,7 +4235,7 @@ search:
u64 offset;
int cached;
- atomic_inc(&block_group->count);
+ btrfs_get_block_group(block_group);
search_start = block_group->key.objectid;
have_block_group:
@@ -4315,7 +4323,7 @@ have_block_group:
btrfs_put_block_group(block_group);
block_group = last_ptr->block_group;
- atomic_inc(&block_group->count);
+ btrfs_get_block_group(block_group);
spin_unlock(&last_ptr->lock);
spin_unlock(&last_ptr->refill_lock);
@@ -5394,10 +5402,6 @@ static noinline int walk_down_tree(struct btrfs_trans_handle *trans,
int ret;
while (level >= 0) {
- if (path->slots[level] >=
- btrfs_header_nritems(path->nodes[level]))
- break;
-
ret = walk_down_proc(trans, root, path, wc, lookup_info);
if (ret > 0)
break;
@@ -5405,6 +5409,10 @@ static noinline int walk_down_tree(struct btrfs_trans_handle *trans,
if (level == 0)
break;
+ if (path->slots[level] >=
+ btrfs_header_nritems(path->nodes[level]))
+ break;
+
ret = do_walk_down(trans, root, path, wc, &lookup_info);
if (ret > 0) {
path->slots[level]++;
@@ -7395,9 +7403,7 @@ int btrfs_free_block_groups(struct btrfs_fs_info *info)
wait_block_group_cache_done(block_group);
btrfs_remove_free_space_cache(block_group);
-
- WARN_ON(atomic_read(&block_group->count) != 1);
- kfree(block_group);
+ btrfs_put_block_group(block_group);
spin_lock(&info->block_group_cache_lock);
}
diff --git a/fs/btrfs/extent_io.c b/fs/btrfs/extent_io.c
index 96577e8bf9fd..b177ed319612 100644
--- a/fs/btrfs/extent_io.c
+++ b/fs/btrfs/extent_io.c
@@ -3165,10 +3165,9 @@ struct extent_buffer *alloc_extent_buffer(struct extent_io_tree *tree,
spin_unlock(&tree->buffer_lock);
goto free_eb;
}
- spin_unlock(&tree->buffer_lock);
-
/* add one reference for the tree */
atomic_inc(&eb->refs);
+ spin_unlock(&tree->buffer_lock);
return eb;
free_eb:
diff --git a/fs/btrfs/extent_map.c b/fs/btrfs/extent_map.c
index 46bea0f4dc7b..428fcac45f90 100644
--- a/fs/btrfs/extent_map.c
+++ b/fs/btrfs/extent_map.c
@@ -155,20 +155,6 @@ static struct rb_node *__tree_search(struct rb_root *root, u64 offset,
return NULL;
}
-/*
- * look for an offset in the tree, and if it can't be found, return
- * the first offset we can find smaller than 'offset'.
- */
-static inline struct rb_node *tree_search(struct rb_root *root, u64 offset)
-{
- struct rb_node *prev;
- struct rb_node *ret;
- ret = __tree_search(root, offset, &prev, NULL);
- if (!ret)
- return prev;
- return ret;
-}
-
/* check to see if two extent_map structs are adjacent and safe to merge */
static int mergable_maps(struct extent_map *prev, struct extent_map *next)
{
diff --git a/fs/btrfs/file.c b/fs/btrfs/file.c
index feaa13b105d9..6ed434ac037f 100644
--- a/fs/btrfs/file.c
+++ b/fs/btrfs/file.c
@@ -506,7 +506,8 @@ next_slot:
}
static int extent_mergeable(struct extent_buffer *leaf, int slot,
- u64 objectid, u64 bytenr, u64 *start, u64 *end)
+ u64 objectid, u64 bytenr, u64 orig_offset,
+ u64 *start, u64 *end)
{
struct btrfs_file_extent_item *fi;
struct btrfs_key key;
@@ -522,6 +523,7 @@ static int extent_mergeable(struct extent_buffer *leaf, int slot,
fi = btrfs_item_ptr(leaf, slot, struct btrfs_file_extent_item);
if (btrfs_file_extent_type(leaf, fi) != BTRFS_FILE_EXTENT_REG ||
btrfs_file_extent_disk_bytenr(leaf, fi) != bytenr ||
+ btrfs_file_extent_offset(leaf, fi) != key.offset - orig_offset ||
btrfs_file_extent_compression(leaf, fi) ||
btrfs_file_extent_encryption(leaf, fi) ||
btrfs_file_extent_other_encoding(leaf, fi))
@@ -561,6 +563,7 @@ int btrfs_mark_extent_written(struct btrfs_trans_handle *trans,
u64 split;
int del_nr = 0;
int del_slot = 0;
+ int recow;
int ret;
btrfs_drop_extent_cache(inode, start, end - 1, 0);
@@ -568,6 +571,7 @@ int btrfs_mark_extent_written(struct btrfs_trans_handle *trans,
path = btrfs_alloc_path();
BUG_ON(!path);
again:
+ recow = 0;
split = start;
key.objectid = inode->i_ino;
key.type = BTRFS_EXTENT_DATA_KEY;
@@ -591,12 +595,60 @@ again:
bytenr = btrfs_file_extent_disk_bytenr(leaf, fi);
num_bytes = btrfs_file_extent_disk_num_bytes(leaf, fi);
orig_offset = key.offset - btrfs_file_extent_offset(leaf, fi);
+ memcpy(&new_key, &key, sizeof(new_key));
+
+ if (start == key.offset && end < extent_end) {
+ other_start = 0;
+ other_end = start;
+ if (extent_mergeable(leaf, path->slots[0] - 1,
+ inode->i_ino, bytenr, orig_offset,
+ &other_start, &other_end)) {
+ new_key.offset = end;
+ btrfs_set_item_key_safe(trans, root, path, &new_key);
+ fi = btrfs_item_ptr(leaf, path->slots[0],
+ struct btrfs_file_extent_item);
+ btrfs_set_file_extent_num_bytes(leaf, fi,
+ extent_end - end);
+ btrfs_set_file_extent_offset(leaf, fi,
+ end - orig_offset);
+ fi = btrfs_item_ptr(leaf, path->slots[0] - 1,
+ struct btrfs_file_extent_item);
+ btrfs_set_file_extent_num_bytes(leaf, fi,
+ end - other_start);
+ btrfs_mark_buffer_dirty(leaf);
+ goto out;
+ }
+ }
+
+ if (start > key.offset && end == extent_end) {
+ other_start = end;
+ other_end = 0;
+ if (extent_mergeable(leaf, path->slots[0] + 1,
+ inode->i_ino, bytenr, orig_offset,
+ &other_start, &other_end)) {
+ fi = btrfs_item_ptr(leaf, path->slots[0],
+ struct btrfs_file_extent_item);
+ btrfs_set_file_extent_num_bytes(leaf, fi,
+ start - key.offset);
+ path->slots[0]++;
+ new_key.offset = start;
+ btrfs_set_item_key_safe(trans, root, path, &new_key);
+
+ fi = btrfs_item_ptr(leaf, path->slots[0],
+ struct btrfs_file_extent_item);
+ btrfs_set_file_extent_num_bytes(leaf, fi,
+ other_end - start);
+ btrfs_set_file_extent_offset(leaf, fi,
+ start - orig_offset);
+ btrfs_mark_buffer_dirty(leaf);
+ goto out;
+ }
+ }
while (start > key.offset || end < extent_end) {
if (key.offset == start)
split = end;
- memcpy(&new_key, &key, sizeof(new_key));
new_key.offset = split;
ret = btrfs_duplicate_item(trans, root, path, &new_key);
if (ret == -EAGAIN) {
@@ -631,15 +683,18 @@ again:
path->slots[0]--;
extent_end = end;
}
+ recow = 1;
}
- fi = btrfs_item_ptr(leaf, path->slots[0],
- struct btrfs_file_extent_item);
-
other_start = end;
other_end = 0;
- if (extent_mergeable(leaf, path->slots[0] + 1, inode->i_ino,
- bytenr, &other_start, &other_end)) {
+ if (extent_mergeable(leaf, path->slots[0] + 1,
+ inode->i_ino, bytenr, orig_offset,
+ &other_start, &other_end)) {
+ if (recow) {
+ btrfs_release_path(root, path);
+ goto again;
+ }
extent_end = other_end;
del_slot = path->slots[0] + 1;
del_nr++;
@@ -650,8 +705,13 @@ again:
}
other_start = 0;
other_end = start;
- if (extent_mergeable(leaf, path->slots[0] - 1, inode->i_ino,
- bytenr, &other_start, &other_end)) {
+ if (extent_mergeable(leaf, path->slots[0] - 1,
+ inode->i_ino, bytenr, orig_offset,
+ &other_start, &other_end)) {
+ if (recow) {
+ btrfs_release_path(root, path);
+ goto again;
+ }
key.offset = other_start;
del_slot = path->slots[0];
del_nr++;
@@ -661,21 +721,23 @@ again:
BUG_ON(ret);
}
if (del_nr == 0) {
+ fi = btrfs_item_ptr(leaf, path->slots[0],
+ struct btrfs_file_extent_item);
btrfs_set_file_extent_type(leaf, fi,
BTRFS_FILE_EXTENT_REG);
btrfs_mark_buffer_dirty(leaf);
- goto out;
- }
-
- fi = btrfs_item_ptr(leaf, del_slot - 1,
- struct btrfs_file_extent_item);
- btrfs_set_file_extent_type(leaf, fi, BTRFS_FILE_EXTENT_REG);
- btrfs_set_file_extent_num_bytes(leaf, fi,
- extent_end - key.offset);
- btrfs_mark_buffer_dirty(leaf);
+ } else {
+ fi = btrfs_item_ptr(leaf, del_slot - 1,
+ struct btrfs_file_extent_item);
+ btrfs_set_file_extent_type(leaf, fi,
+ BTRFS_FILE_EXTENT_REG);
+ btrfs_set_file_extent_num_bytes(leaf, fi,
+ extent_end - key.offset);
+ btrfs_mark_buffer_dirty(leaf);
- ret = btrfs_del_items(trans, root, path, del_slot, del_nr);
- BUG_ON(ret);
+ ret = btrfs_del_items(trans, root, path, del_slot, del_nr);
+ BUG_ON(ret);
+ }
out:
btrfs_free_path(path);
return 0;
@@ -1073,7 +1135,7 @@ int btrfs_sync_file(struct file *file, struct dentry *dentry, int datasync)
}
mutex_lock(&dentry->d_inode->i_mutex);
out:
- return ret > 0 ? EIO : ret;
+ return ret > 0 ? -EIO : ret;
}
static const struct vm_operations_struct btrfs_file_vm_ops = {
diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c
index 5440bab23635..4deb280f8969 100644
--- a/fs/btrfs/inode.c
+++ b/fs/btrfs/inode.c
@@ -483,7 +483,8 @@ again:
nr_pages_ret = 0;
/* flag the file so we don't compress in the future */
- BTRFS_I(inode)->flags |= BTRFS_INODE_NOCOMPRESS;
+ if (!btrfs_test_opt(root, FORCE_COMPRESS))
+ BTRFS_I(inode)->flags |= BTRFS_INODE_NOCOMPRESS;
}
if (will_compress) {
*num_added += 1;
@@ -1680,24 +1681,6 @@ static int insert_reserved_file_extent(struct btrfs_trans_handle *trans,
* before we start the transaction. It limits the amount of btree
* reads required while inside the transaction.
*/
-static noinline void reada_csum(struct btrfs_root *root,
- struct btrfs_path *path,
- struct btrfs_ordered_extent *ordered_extent)
-{
- struct btrfs_ordered_sum *sum;
- u64 bytenr;
-
- sum = list_entry(ordered_extent->list.next, struct btrfs_ordered_sum,
- list);
- bytenr = sum->sums[0].bytenr;
-
- /*
- * we don't care about the results, the point of this search is
- * just to get the btree leaves into ram
- */
- btrfs_lookup_csum(NULL, root->fs_info->csum_root, path, bytenr, 0);
-}
-
/* as ordered data IO finishes, this gets called so we can finish
* an ordered extent if the range of bytes in the file it covers are
* fully written.
@@ -1708,7 +1691,6 @@ static int btrfs_finish_ordered_io(struct inode *inode, u64 start, u64 end)
struct btrfs_trans_handle *trans;
struct btrfs_ordered_extent *ordered_extent = NULL;
struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree;
- struct btrfs_path *path;
int compressed = 0;
int ret;
@@ -1716,32 +1698,9 @@ static int btrfs_finish_ordered_io(struct inode *inode, u64 start, u64 end)
if (!ret)
return 0;
- /*
- * before we join the transaction, try to do some of our IO.
- * This will limit the amount of IO that we have to do with
- * the transaction running. We're unlikely to need to do any
- * IO if the file extents are new, the disk_i_size checks
- * covers the most common case.
- */
- if (start < BTRFS_I(inode)->disk_i_size) {
- path = btrfs_alloc_path();
- if (path) {
- ret = btrfs_lookup_file_extent(NULL, root, path,
- inode->i_ino,
- start, 0);
- ordered_extent = btrfs_lookup_ordered_extent(inode,
- start);
- if (!list_empty(&ordered_extent->list)) {
- btrfs_release_path(root, path);
- reada_csum(root, path, ordered_extent);
- }
- btrfs_free_path(path);
- }
- }
-
- if (!ordered_extent)
- ordered_extent = btrfs_lookup_ordered_extent(inode, start);
+ ordered_extent = btrfs_lookup_ordered_extent(inode, start);
BUG_ON(!ordered_extent);
+
if (test_bit(BTRFS_ORDERED_NOCOW, &ordered_extent->flags)) {
BUG_ON(!list_empty(&ordered_extent->list));
ret = btrfs_ordered_update_i_size(inode, 0, ordered_extent);
@@ -3995,7 +3954,11 @@ skip:
/* Reached end of directory/root. Bump pos past the last item. */
if (key_type == BTRFS_DIR_INDEX_KEY)
- filp->f_pos = INT_LIMIT(off_t);
+ /*
+ * 32-bit glibc will use getdents64, but then strtol -
+ * so the last number we can serve is this.
+ */
+ filp->f_pos = 0x7fffffff;
else
filp->f_pos++;
nopos:
@@ -5789,7 +5752,7 @@ out_fail:
}
static int prealloc_file_range(struct inode *inode, u64 start, u64 end,
- u64 alloc_hint, int mode)
+ u64 alloc_hint, int mode, loff_t actual_len)
{
struct btrfs_trans_handle *trans;
struct btrfs_root *root = BTRFS_I(inode)->root;
@@ -5798,6 +5761,7 @@ static int prealloc_file_range(struct inode *inode, u64 start, u64 end,
u64 cur_offset = start;
u64 num_bytes = end - start;
int ret = 0;
+ u64 i_size;
while (num_bytes > 0) {
alloc_size = min(num_bytes, root->fs_info->max_extent);
@@ -5835,9 +5799,15 @@ static int prealloc_file_range(struct inode *inode, u64 start, u64 end,
inode->i_ctime = CURRENT_TIME;
BTRFS_I(inode)->flags |= BTRFS_INODE_PREALLOC;
if (!(mode & FALLOC_FL_KEEP_SIZE) &&
- cur_offset > inode->i_size) {
- i_size_write(inode, cur_offset);
- btrfs_ordered_update_i_size(inode, cur_offset, NULL);
+ (actual_len > inode->i_size) &&
+ (cur_offset > inode->i_size)) {
+
+ if (cur_offset > actual_len)
+ i_size = actual_len;
+ else
+ i_size = cur_offset;
+ i_size_write(inode, i_size);
+ btrfs_ordered_update_i_size(inode, i_size, NULL);
}
ret = btrfs_update_inode(trans, root, inode);
@@ -5930,7 +5900,7 @@ static long btrfs_fallocate(struct inode *inode, int mode,
!test_bit(EXTENT_FLAG_PREALLOC, &em->flags))) {
ret = prealloc_file_range(inode,
cur_offset, last_byte,
- alloc_hint, mode);
+ alloc_hint, mode, offset+len);
if (ret < 0) {
free_extent_map(em);
break;
diff --git a/fs/btrfs/ordered-data.c b/fs/btrfs/ordered-data.c
index b10a49d4bc6a..5c2a9e78a949 100644
--- a/fs/btrfs/ordered-data.c
+++ b/fs/btrfs/ordered-data.c
@@ -626,6 +626,8 @@ int btrfs_ordered_update_i_size(struct inode *inode, u64 offset,
if (ordered)
offset = entry_end(ordered);
+ else
+ offset = ALIGN(offset, BTRFS_I(inode)->root->sectorsize);
mutex_lock(&tree->mutex);
disk_i_size = BTRFS_I(inode)->disk_i_size;
diff --git a/fs/btrfs/relocation.c b/fs/btrfs/relocation.c
index a9728680eca8..ab7ab5318745 100644
--- a/fs/btrfs/relocation.c
+++ b/fs/btrfs/relocation.c
@@ -3281,8 +3281,10 @@ static noinline_for_stack int relocate_block_group(struct reloc_control *rc)
return -ENOMEM;
path = btrfs_alloc_path();
- if (!path)
+ if (!path) {
+ kfree(cluster);
return -ENOMEM;
+ }
rc->extents_found = 0;
rc->extents_skipped = 0;
@@ -3762,7 +3764,8 @@ out:
BTRFS_DATA_RELOC_TREE_OBJECTID);
if (IS_ERR(fs_root))
err = PTR_ERR(fs_root);
- btrfs_orphan_cleanup(fs_root);
+ else
+ btrfs_orphan_cleanup(fs_root);
}
return err;
}
diff --git a/fs/btrfs/super.c b/fs/btrfs/super.c
index 3f9b45704fcd..8a1ea6e64575 100644
--- a/fs/btrfs/super.c
+++ b/fs/btrfs/super.c
@@ -66,7 +66,8 @@ enum {
Opt_degraded, Opt_subvol, Opt_device, Opt_nodatasum, Opt_nodatacow,
Opt_max_extent, Opt_max_inline, Opt_alloc_start, Opt_nobarrier,
Opt_ssd, Opt_nossd, Opt_ssd_spread, Opt_thread_pool, Opt_noacl,
- Opt_compress, Opt_notreelog, Opt_ratio, Opt_flushoncommit,
+ Opt_compress, Opt_compress_force, Opt_notreelog, Opt_ratio,
+ Opt_flushoncommit,
Opt_discard, Opt_err,
};
@@ -82,6 +83,7 @@ static match_table_t tokens = {
{Opt_alloc_start, "alloc_start=%s"},
{Opt_thread_pool, "thread_pool=%d"},
{Opt_compress, "compress"},
+ {Opt_compress_force, "compress-force"},
{Opt_ssd, "ssd"},
{Opt_ssd_spread, "ssd_spread"},
{Opt_nossd, "nossd"},
@@ -173,6 +175,11 @@ int btrfs_parse_options(struct btrfs_root *root, char *options)
printk(KERN_INFO "btrfs: use compression\n");
btrfs_set_opt(info->mount_opt, COMPRESS);
break;
+ case Opt_compress_force:
+ printk(KERN_INFO "btrfs: forcing compression\n");
+ btrfs_set_opt(info->mount_opt, FORCE_COMPRESS);
+ btrfs_set_opt(info->mount_opt, COMPRESS);
+ break;
case Opt_ssd:
printk(KERN_INFO "btrfs: use ssd allocation scheme\n");
btrfs_set_opt(info->mount_opt, SSD);
diff --git a/fs/btrfs/volumes.c b/fs/btrfs/volumes.c
index 198cff28766d..41ecbb2347f2 100644
--- a/fs/btrfs/volumes.c
+++ b/fs/btrfs/volumes.c
@@ -1135,7 +1135,7 @@ int btrfs_rm_device(struct btrfs_root *root, char *device_path)
root->fs_info->avail_metadata_alloc_bits;
if ((all_avail & BTRFS_BLOCK_GROUP_RAID10) &&
- root->fs_info->fs_devices->rw_devices <= 4) {
+ root->fs_info->fs_devices->num_devices <= 4) {
printk(KERN_ERR "btrfs: unable to go below four devices "
"on raid10\n");
ret = -EINVAL;
@@ -1143,7 +1143,7 @@ int btrfs_rm_device(struct btrfs_root *root, char *device_path)
}
if ((all_avail & BTRFS_BLOCK_GROUP_RAID1) &&
- root->fs_info->fs_devices->rw_devices <= 2) {
+ root->fs_info->fs_devices->num_devices <= 2) {
printk(KERN_ERR "btrfs: unable to go below two "
"devices on raid1\n");
ret = -EINVAL;
@@ -1434,8 +1434,8 @@ int btrfs_init_new_device(struct btrfs_root *root, char *device_path)
return -EINVAL;
bdev = open_bdev_exclusive(device_path, 0, root->fs_info->bdev_holder);
- if (!bdev)
- return -EIO;
+ if (IS_ERR(bdev))
+ return PTR_ERR(bdev);
if (root->fs_info->fs_devices->seeding) {
seeding_dev = 1;
@@ -2538,6 +2538,11 @@ int btrfs_chunk_readonly(struct btrfs_root *root, u64 chunk_offset)
if (!em)
return 1;
+ if (btrfs_test_opt(root, DEGRADED)) {
+ free_extent_map(em);
+ return 0;
+ }
+
map = (struct map_lookup *)em->bdev;
for (i = 0; i < map->num_stripes; i++) {
if (!map->stripes[i].dev->writeable) {
@@ -2649,8 +2654,10 @@ again:
em = lookup_extent_mapping(em_tree, logical, *length);
read_unlock(&em_tree->lock);
- if (!em && unplug_page)
+ if (!em && unplug_page) {
+ kfree(multi);
return 0;
+ }
if (!em) {
printk(KERN_CRIT "unable to find logical %llu len %llu\n",
diff --git a/fs/cachefiles/namei.c b/fs/cachefiles/namei.c
index 14ac4806e291..eeb4986ea7db 100644
--- a/fs/cachefiles/namei.c
+++ b/fs/cachefiles/namei.c
@@ -348,7 +348,17 @@ int cachefiles_delete_object(struct cachefiles_cache *cache,
dir = dget_parent(object->dentry);
mutex_lock_nested(&dir->d_inode->i_mutex, I_MUTEX_PARENT);
- ret = cachefiles_bury_object(cache, dir, object->dentry);
+
+ /* we need to check that our parent is _still_ our parent - it may have
+ * been renamed */
+ if (dir == object->dentry->d_parent) {
+ ret = cachefiles_bury_object(cache, dir, object->dentry);
+ } else {
+ /* it got moved, presumably by cachefilesd culling it, so it's
+ * no longer in the key path and we can ignore it */
+ mutex_unlock(&dir->d_inode->i_mutex);
+ ret = 0;
+ }
dput(dir);
_leave(" = %d", ret);
diff --git a/fs/cifs/CHANGES b/fs/cifs/CHANGES
index 094ea65afc85..49503d2edc7e 100644
--- a/fs/cifs/CHANGES
+++ b/fs/cifs/CHANGES
@@ -1,3 +1,7 @@
+Version 1.62
+------------
+Add sockopt=TCP_NODELAY mount option.
+
Version 1.61
------------
Fix append problem to Samba servers (files opened with O_APPEND could
@@ -5,7 +9,9 @@ have duplicated data). Fix oops in cifs_lookup. Workaround problem
mounting to OS/400 Netserve. Fix oops in cifs_get_tcp_session.
Disable use of server inode numbers when server only
partially supports them (e.g. for one server querying inode numbers on
-FindFirst fails but QPathInfo queries works).
+FindFirst fails but QPathInfo queries works). Fix oops with dfs in
+cifs_put_smb_ses. Fix mmap to work on directio mounts (needed
+for OpenOffice when on forcedirectio mount e.g.)
Version 1.60
-------------
diff --git a/fs/cifs/cifs_dfs_ref.c b/fs/cifs/cifs_dfs_ref.c
index fea9e898c4ba..b44ce0a0711c 100644
--- a/fs/cifs/cifs_dfs_ref.c
+++ b/fs/cifs/cifs_dfs_ref.c
@@ -269,7 +269,7 @@ static int add_mount_helper(struct vfsmount *newmnt, struct nameidata *nd,
int err;
mntget(newmnt);
- err = do_add_mount(newmnt, &nd->path, nd->path.mnt->mnt_flags, mntlist);
+ err = do_add_mount(newmnt, &nd->path, nd->path.mnt->mnt_flags | MNT_SHRINKABLE, mntlist);
switch (err) {
case 0:
path_put(&nd->path);
@@ -371,7 +371,6 @@ cifs_dfs_follow_mountpoint(struct dentry *dentry, struct nameidata *nd)
if (IS_ERR(mnt))
goto out_err;
- nd->path.mnt->mnt_flags |= MNT_SHRINKABLE;
rc = add_mount_helper(mnt, nd, &cifs_dfs_automount_list);
out:
diff --git a/fs/cifs/cifsfs.c b/fs/cifs/cifsfs.c
index 29f1da761bbf..8c6a03627176 100644
--- a/fs/cifs/cifsfs.c
+++ b/fs/cifs/cifsfs.c
@@ -758,7 +758,7 @@ const struct file_operations cifs_file_ops = {
};
const struct file_operations cifs_file_direct_ops = {
- /* no mmap, no aio, no readv -
+ /* no aio, no readv -
BB reevaluate whether they can be done with directio, no cache */
.read = cifs_user_read,
.write = cifs_user_write,
@@ -767,6 +767,7 @@ const struct file_operations cifs_file_direct_ops = {
.lock = cifs_lock,
.fsync = cifs_fsync,
.flush = cifs_flush,
+ .mmap = cifs_file_mmap,
.splice_read = generic_file_splice_read,
#ifdef CONFIG_CIFS_POSIX
.unlocked_ioctl = cifs_ioctl,
diff --git a/fs/cifs/cifsfs.h b/fs/cifs/cifsfs.h
index ac2b24c192f8..78c1b86d55f6 100644
--- a/fs/cifs/cifsfs.h
+++ b/fs/cifs/cifsfs.h
@@ -113,5 +113,5 @@ extern long cifs_ioctl(struct file *filep, unsigned int cmd, unsigned long arg);
extern const struct export_operations cifs_export_ops;
#endif /* EXPERIMENTAL */
-#define CIFS_VERSION "1.61"
+#define CIFS_VERSION "1.62"
#endif /* _CIFSFS_H */
diff --git a/fs/cifs/cifsglob.h b/fs/cifs/cifsglob.h
index 4b35f7ec0583..ed751bb657db 100644
--- a/fs/cifs/cifsglob.h
+++ b/fs/cifs/cifsglob.h
@@ -149,6 +149,7 @@ struct TCP_Server_Info {
bool svlocal:1; /* local server or remote */
bool noblocksnd; /* use blocking sendmsg */
bool noautotune; /* do not autotune send buf sizes */
+ bool tcp_nodelay;
atomic_t inFlight; /* number of requests on the wire to server */
#ifdef CONFIG_CIFS_STATS2
atomic_t inSend; /* requests trying to send */
diff --git a/fs/cifs/connect.c b/fs/cifs/connect.c
index 63ea83ff687f..2e9e09ca0e30 100644
--- a/fs/cifs/connect.c
+++ b/fs/cifs/connect.c
@@ -98,7 +98,7 @@ struct smb_vol {
bool nostrictsync:1; /* do not force expensive SMBflush on every sync */
unsigned int rsize;
unsigned int wsize;
- unsigned int sockopt;
+ bool sockopt_tcp_nodelay:1;
unsigned short int port;
char *prepath;
};
@@ -1142,9 +1142,11 @@ cifs_parse_mount_options(char *options, const char *devname,
simple_strtoul(value, &value, 0);
}
} else if (strnicmp(data, "sockopt", 5) == 0) {
- if (value && *value) {
- vol->sockopt =
- simple_strtoul(value, &value, 0);
+ if (!value || !*value) {
+ cERROR(1, ("no socket option specified"));
+ continue;
+ } else if (strnicmp(value, "TCP_NODELAY", 11) == 0) {
+ vol->sockopt_tcp_nodelay = 1;
}
} else if (strnicmp(data, "netbiosname", 4) == 0) {
if (!value || !*value || (*value == ' ')) {
@@ -1514,6 +1516,7 @@ cifs_get_tcp_session(struct smb_vol *volume_info)
tcp_ses->noblocksnd = volume_info->noblocksnd;
tcp_ses->noautotune = volume_info->noautotune;
+ tcp_ses->tcp_nodelay = volume_info->sockopt_tcp_nodelay;
atomic_set(&tcp_ses->inFlight, 0);
init_waitqueue_head(&tcp_ses->response_q);
init_waitqueue_head(&tcp_ses->request_q);
@@ -1764,6 +1767,7 @@ static int
ipv4_connect(struct TCP_Server_Info *server)
{
int rc = 0;
+ int val;
bool connected = false;
__be16 orig_port = 0;
struct socket *socket = server->ssocket;
@@ -1845,6 +1849,14 @@ ipv4_connect(struct TCP_Server_Info *server)
socket->sk->sk_rcvbuf = 140 * 1024;
}
+ if (server->tcp_nodelay) {
+ val = 1;
+ rc = kernel_setsockopt(socket, SOL_TCP, TCP_NODELAY,
+ (char *)&val, sizeof(val));
+ if (rc)
+ cFYI(1, ("set TCP_NODELAY socket option error %d", rc));
+ }
+
cFYI(1, ("sndbuf %d rcvbuf %d rcvtimeo 0x%lx",
socket->sk->sk_sndbuf,
socket->sk->sk_rcvbuf, socket->sk->sk_rcvtimeo));
@@ -1916,6 +1928,7 @@ static int
ipv6_connect(struct TCP_Server_Info *server)
{
int rc = 0;
+ int val;
bool connected = false;
__be16 orig_port = 0;
struct socket *socket = server->ssocket;
@@ -1987,6 +2000,15 @@ ipv6_connect(struct TCP_Server_Info *server)
*/
socket->sk->sk_rcvtimeo = 7 * HZ;
socket->sk->sk_sndtimeo = 5 * HZ;
+
+ if (server->tcp_nodelay) {
+ val = 1;
+ rc = kernel_setsockopt(socket, SOL_TCP, TCP_NODELAY,
+ (char *)&val, sizeof(val));
+ if (rc)
+ cFYI(1, ("set TCP_NODELAY socket option error %d", rc));
+ }
+
server->ssocket = socket;
return rc;
@@ -2287,12 +2309,12 @@ int
cifs_mount(struct super_block *sb, struct cifs_sb_info *cifs_sb,
char *mount_data_global, const char *devname)
{
- int rc = 0;
+ int rc;
int xid;
struct smb_vol *volume_info;
- struct cifsSesInfo *pSesInfo = NULL;
- struct cifsTconInfo *tcon = NULL;
- struct TCP_Server_Info *srvTcp = NULL;
+ struct cifsSesInfo *pSesInfo;
+ struct cifsTconInfo *tcon;
+ struct TCP_Server_Info *srvTcp;
char *full_path;
char *mount_data = mount_data_global;
#ifdef CONFIG_CIFS_DFS_UPCALL
@@ -2301,6 +2323,10 @@ cifs_mount(struct super_block *sb, struct cifs_sb_info *cifs_sb,
int referral_walks_count = 0;
try_mount_again:
#endif
+ rc = 0;
+ tcon = NULL;
+ pSesInfo = NULL;
+ srvTcp = NULL;
full_path = NULL;
xid = GetXid();
@@ -2597,6 +2623,7 @@ remote_path_check:
cleanup_volume_info(&volume_info);
referral_walks_count++;
+ FreeXid(xid);
goto try_mount_again;
}
#else /* No DFS support, return error on mount */
diff --git a/fs/cifs/inode.c b/fs/cifs/inode.c
index cf18ee765590..e3fda978f481 100644
--- a/fs/cifs/inode.c
+++ b/fs/cifs/inode.c
@@ -1762,8 +1762,18 @@ cifs_setattr_unix(struct dentry *direntry, struct iattr *attrs)
CIFS_MOUNT_MAP_SPECIAL_CHR);
}
- if (!rc)
+ if (!rc) {
rc = inode_setattr(inode, attrs);
+
+ /* force revalidate when any of these times are set since some
+ of the fs types (eg ext3, fat) do not have fine enough
+ time granularity to match protocol, and we do not have a
+ a way (yet) to query the server fs's time granularity (and
+ whether it rounds times down).
+ */
+ if (!rc && (attrs->ia_valid & (ATTR_MTIME | ATTR_CTIME)))
+ cifsInode->time = 0;
+ }
out:
kfree(args);
kfree(full_path);
diff --git a/fs/cifs/readdir.c b/fs/cifs/readdir.c
index f84062f9a985..c343b14ba2d3 100644
--- a/fs/cifs/readdir.c
+++ b/fs/cifs/readdir.c
@@ -77,6 +77,11 @@ cifs_readdir_lookup(struct dentry *parent, struct qstr *name,
cFYI(1, ("For %s", name->name));
+ if (parent->d_op && parent->d_op->d_hash)
+ parent->d_op->d_hash(parent, name);
+ else
+ name->hash = full_name_hash(name->name, name->len);
+
dentry = d_lookup(parent, name);
if (dentry) {
/* FIXME: check for inode number changes? */
@@ -666,12 +671,11 @@ static int cifs_get_name_from_search_buf(struct qstr *pqst,
min(len, max_len), nlt,
cifs_sb->mnt_cifs_flags &
CIFS_MOUNT_MAP_SPECIAL_CHR);
+ pqst->len -= nls_nullsize(nlt);
} else {
pqst->name = filename;
pqst->len = len;
}
- pqst->hash = full_name_hash(pqst->name, pqst->len);
-/* cFYI(1, ("filldir on %s",pqst->name)); */
return rc;
}
diff --git a/fs/cifs/sess.c b/fs/cifs/sess.c
index 7085a6275c4c..aaa9c1c5a5bd 100644
--- a/fs/cifs/sess.c
+++ b/fs/cifs/sess.c
@@ -223,9 +223,9 @@ static void unicode_ssetup_strings(char **pbcc_area, struct cifsSesInfo *ses,
/* null user mount */
*bcc_ptr = 0;
*(bcc_ptr+1) = 0;
- } else { /* 300 should be long enough for any conceivable user name */
+ } else {
bytes_ret = cifs_strtoUCS((__le16 *) bcc_ptr, ses->userName,
- 300, nls_cp);
+ MAX_USERNAME_SIZE, nls_cp);
}
bcc_ptr += 2 * bytes_ret;
bcc_ptr += 2; /* account for null termination */
@@ -246,11 +246,10 @@ static void ascii_ssetup_strings(char **pbcc_area, struct cifsSesInfo *ses,
/* copy user */
if (ses->userName == NULL) {
/* BB what about null user mounts - check that we do this BB */
- } else { /* 300 should be long enough for any conceivable user name */
- strncpy(bcc_ptr, ses->userName, 300);
+ } else {
+ strncpy(bcc_ptr, ses->userName, MAX_USERNAME_SIZE);
}
- /* BB improve check for overflow */
- bcc_ptr += strnlen(ses->userName, 300);
+ bcc_ptr += strnlen(ses->userName, MAX_USERNAME_SIZE);
*bcc_ptr = 0;
bcc_ptr++; /* account for null termination */
diff --git a/fs/compat_ioctl.c b/fs/compat_ioctl.c
index 332dd00f0894..0ca9ec4a79c3 100644
--- a/fs/compat_ioctl.c
+++ b/fs/compat_ioctl.c
@@ -301,6 +301,12 @@ static int sg_ioctl_trans(unsigned int fd, unsigned int cmd,
u32 data;
void __user *dxferp;
int err;
+ int interface_id;
+
+ if (get_user(interface_id, &sgio32->interface_id))
+ return -EFAULT;
+ if (interface_id != 'S')
+ return sys_ioctl(fd, cmd, (unsigned long)sgio32);
if (get_user(iovec_count, &sgio32->iovec_count))
return -EFAULT;
@@ -936,6 +942,7 @@ COMPATIBLE_IOCTL(TCSETSF)
COMPATIBLE_IOCTL(TIOCLINUX)
COMPATIBLE_IOCTL(TIOCSBRK)
COMPATIBLE_IOCTL(TIOCCBRK)
+COMPATIBLE_IOCTL(TIOCGSID)
COMPATIBLE_IOCTL(TIOCGICOUNT)
/* Little t */
COMPATIBLE_IOCTL(TIOCGETD)
@@ -1005,6 +1012,9 @@ COMPATIBLE_IOCTL(SCSI_IOCTL_SEND_COMMAND)
COMPATIBLE_IOCTL(SCSI_IOCTL_PROBE_HOST)
COMPATIBLE_IOCTL(SCSI_IOCTL_GET_PCI)
#endif
+/* Big V (don't complain on serial console) */
+IGNORE_IOCTL(VT_OPENQRY)
+IGNORE_IOCTL(VT_GETMODE)
/* Little p (/dev/rtc, /dev/envctrl, etc.) */
COMPATIBLE_IOCTL(RTC_AIE_ON)
COMPATIBLE_IOCTL(RTC_AIE_OFF)
@@ -1035,6 +1045,8 @@ COMPATIBLE_IOCTL(FIOQSIZE)
#ifdef CONFIG_BLOCK
/* loop */
IGNORE_IOCTL(LOOP_CLR_FD)
+/* md calls this on random blockdevs */
+IGNORE_IOCTL(RAID_VERSION)
/* SG stuff */
COMPATIBLE_IOCTL(SG_SET_TIMEOUT)
COMPATIBLE_IOCTL(SG_GET_TIMEOUT)
diff --git a/fs/configfs/symlink.c b/fs/configfs/symlink.c
index c8afa6b1d91d..32a5f46b1157 100644
--- a/fs/configfs/symlink.c
+++ b/fs/configfs/symlink.c
@@ -121,8 +121,10 @@ static int get_target(const char *symname, struct path *path,
ret = -ENOENT;
path_put(path);
}
- } else
+ } else {
ret = -EPERM;
+ path_put(path);
+ }
}
return ret;
diff --git a/fs/debugfs/inode.c b/fs/debugfs/inode.c
index b486169f42bf..274ac865bae8 100644
--- a/fs/debugfs/inode.c
+++ b/fs/debugfs/inode.c
@@ -160,15 +160,8 @@ static int debugfs_create_by_name(const char *name, mode_t mode,
* block. A pointer to that is in the struct vfsmount that we
* have around.
*/
- if (!parent) {
- if (debugfs_mount && debugfs_mount->mnt_sb) {
- parent = debugfs_mount->mnt_sb->s_root;
- }
- }
- if (!parent) {
- pr_debug("debugfs: Ah! can not find a parent!\n");
- return -EFAULT;
- }
+ if (!parent)
+ parent = debugfs_mount->mnt_sb->s_root;
*dentry = NULL;
mutex_lock(&parent->d_inode->i_mutex);
diff --git a/fs/ecryptfs/crypto.c b/fs/ecryptfs/crypto.c
index fbb6e5eed697..7cb0a59f4b9d 100644
--- a/fs/ecryptfs/crypto.c
+++ b/fs/ecryptfs/crypto.c
@@ -1748,7 +1748,7 @@ ecryptfs_process_key_cipher(struct crypto_blkcipher **key_tfm,
char *cipher_name, size_t *key_size)
{
char dummy_key[ECRYPTFS_MAX_KEY_BYTES];
- char *full_alg_name;
+ char *full_alg_name = NULL;
int rc;
*key_tfm = NULL;
@@ -1763,7 +1763,6 @@ ecryptfs_process_key_cipher(struct crypto_blkcipher **key_tfm,
if (rc)
goto out;
*key_tfm = crypto_alloc_blkcipher(full_alg_name, 0, CRYPTO_ALG_ASYNC);
- kfree(full_alg_name);
if (IS_ERR(*key_tfm)) {
rc = PTR_ERR(*key_tfm);
printk(KERN_ERR "Unable to allocate crypto cipher with name "
@@ -1786,6 +1785,7 @@ ecryptfs_process_key_cipher(struct crypto_blkcipher **key_tfm,
goto out;
}
out:
+ kfree(full_alg_name);
return rc;
}
diff --git a/fs/ecryptfs/file.c b/fs/ecryptfs/file.c
index 9e944057001b..678172b61be2 100644
--- a/fs/ecryptfs/file.c
+++ b/fs/ecryptfs/file.c
@@ -158,7 +158,7 @@ static int ecryptfs_open(struct inode *inode, struct file *file)
struct dentry *ecryptfs_dentry = file->f_path.dentry;
/* Private value of ecryptfs_dentry allocated in
* ecryptfs_lookup() */
- struct dentry *lower_dentry = ecryptfs_dentry_to_lower(ecryptfs_dentry);
+ struct dentry *lower_dentry;
struct ecryptfs_file_info *file_info;
mount_crypt_stat = &ecryptfs_superblock_to_private(
@@ -191,13 +191,6 @@ static int ecryptfs_open(struct inode *inode, struct file *file)
| ECRYPTFS_ENCRYPTED);
}
mutex_unlock(&crypt_stat->cs_mutex);
- if ((ecryptfs_inode_to_private(inode)->lower_file->f_flags & O_RDONLY)
- && !(file->f_flags & O_RDONLY)) {
- rc = -EPERM;
- printk(KERN_WARNING "%s: Lower persistent file is RO; eCryptfs "
- "file must hence be opened RO\n", __func__);
- goto out;
- }
if (!ecryptfs_inode_to_private(inode)->lower_file) {
rc = ecryptfs_init_persistent_file(ecryptfs_dentry);
if (rc) {
@@ -208,6 +201,13 @@ static int ecryptfs_open(struct inode *inode, struct file *file)
goto out;
}
}
+ if ((ecryptfs_inode_to_private(inode)->lower_file->f_flags & O_RDONLY)
+ && !(file->f_flags & O_RDONLY)) {
+ rc = -EPERM;
+ printk(KERN_WARNING "%s: Lower persistent file is RO; eCryptfs "
+ "file must hence be opened RO\n", __func__);
+ goto out;
+ }
ecryptfs_set_file_lower(
file, ecryptfs_inode_to_private(inode)->lower_file);
if (S_ISDIR(ecryptfs_dentry->d_inode->i_mode)) {
@@ -299,7 +299,6 @@ static int ecryptfs_ioctl(struct inode *inode, struct file *file,
const struct file_operations ecryptfs_dir_fops = {
.readdir = ecryptfs_readdir,
.ioctl = ecryptfs_ioctl,
- .mmap = generic_file_mmap,
.open = ecryptfs_open,
.flush = ecryptfs_flush,
.release = ecryptfs_release,
diff --git a/fs/ecryptfs/inode.c b/fs/ecryptfs/inode.c
index 429ca0b3ba08..4a430ab4115c 100644
--- a/fs/ecryptfs/inode.c
+++ b/fs/ecryptfs/inode.c
@@ -282,7 +282,8 @@ int ecryptfs_lookup_and_interpose_lower(struct dentry *ecryptfs_dentry,
goto out;
}
rc = ecryptfs_interpose(lower_dentry, ecryptfs_dentry,
- ecryptfs_dir_inode->i_sb, 1);
+ ecryptfs_dir_inode->i_sb,
+ ECRYPTFS_INTERPOSE_FLAG_D_ADD);
if (rc) {
printk(KERN_ERR "%s: Error interposing; rc = [%d]\n",
__func__, rc);
@@ -463,9 +464,6 @@ out_lock:
unlock_dir(lower_dir_dentry);
dput(lower_new_dentry);
dput(lower_old_dentry);
- d_drop(lower_old_dentry);
- d_drop(new_dentry);
- d_drop(old_dentry);
return rc;
}
@@ -614,6 +612,7 @@ ecryptfs_rename(struct inode *old_dir, struct dentry *old_dentry,
struct dentry *lower_new_dentry;
struct dentry *lower_old_dir_dentry;
struct dentry *lower_new_dir_dentry;
+ struct dentry *trap = NULL;
lower_old_dentry = ecryptfs_dentry_to_lower(old_dentry);
lower_new_dentry = ecryptfs_dentry_to_lower(new_dentry);
@@ -621,7 +620,17 @@ ecryptfs_rename(struct inode *old_dir, struct dentry *old_dentry,
dget(lower_new_dentry);
lower_old_dir_dentry = dget_parent(lower_old_dentry);
lower_new_dir_dentry = dget_parent(lower_new_dentry);
- lock_rename(lower_old_dir_dentry, lower_new_dir_dentry);
+ trap = lock_rename(lower_old_dir_dentry, lower_new_dir_dentry);
+ /* source should not be ancestor of target */
+ if (trap == lower_old_dentry) {
+ rc = -EINVAL;
+ goto out_lock;
+ }
+ /* target should not be ancestor of source */
+ if (trap == lower_new_dentry) {
+ rc = -ENOTEMPTY;
+ goto out_lock;
+ }
rc = vfs_rename(lower_old_dir_dentry->d_inode, lower_old_dentry,
lower_new_dir_dentry->d_inode, lower_new_dentry);
if (rc)
@@ -715,31 +724,31 @@ static void *ecryptfs_follow_link(struct dentry *dentry, struct nameidata *nd)
/* Released in ecryptfs_put_link(); only release here on error */
buf = kmalloc(len, GFP_KERNEL);
if (!buf) {
- rc = -ENOMEM;
+ buf = ERR_PTR(-ENOMEM);
goto out;
}
old_fs = get_fs();
set_fs(get_ds());
rc = dentry->d_inode->i_op->readlink(dentry, (char __user *)buf, len);
set_fs(old_fs);
- if (rc < 0)
- goto out_free;
- else
+ if (rc < 0) {
+ kfree(buf);
+ buf = ERR_PTR(rc);
+ } else
buf[rc] = '\0';
- rc = 0;
- nd_set_link(nd, buf);
- goto out;
-out_free:
- kfree(buf);
out:
- return ERR_PTR(rc);
+ nd_set_link(nd, buf);
+ return NULL;
}
static void
ecryptfs_put_link(struct dentry *dentry, struct nameidata *nd, void *ptr)
{
- /* Free the char* */
- kfree(nd_get_link(nd));
+ char *buf = nd_get_link(nd);
+ if (!IS_ERR(buf)) {
+ /* Free the char* */
+ kfree(buf);
+ }
}
/**
@@ -772,18 +781,23 @@ upper_size_to_lower_size(struct ecryptfs_crypt_stat *crypt_stat,
}
/**
- * ecryptfs_truncate
+ * truncate_upper
* @dentry: The ecryptfs layer dentry
- * @new_length: The length to expand the file to
+ * @ia: Address of the ecryptfs inode's attributes
+ * @lower_ia: Address of the lower inode's attributes
*
* Function to handle truncations modifying the size of the file. Note
* that the file sizes are interpolated. When expanding, we are simply
- * writing strings of 0's out. When truncating, we need to modify the
- * underlying file size according to the page index interpolations.
+ * writing strings of 0's out. When truncating, we truncate the upper
+ * inode and update the lower_ia according to the page index
+ * interpolations. If ATTR_SIZE is set in lower_ia->ia_valid upon return,
+ * the caller must use lower_ia in a call to notify_change() to perform
+ * the truncation of the lower inode.
*
* Returns zero on success; non-zero otherwise
*/
-int ecryptfs_truncate(struct dentry *dentry, loff_t new_length)
+static int truncate_upper(struct dentry *dentry, struct iattr *ia,
+ struct iattr *lower_ia)
{
int rc = 0;
struct inode *inode = dentry->d_inode;
@@ -794,8 +808,10 @@ int ecryptfs_truncate(struct dentry *dentry, loff_t new_length)
loff_t lower_size_before_truncate;
loff_t lower_size_after_truncate;
- if (unlikely((new_length == i_size)))
+ if (unlikely((ia->ia_size == i_size))) {
+ lower_ia->ia_valid &= ~ATTR_SIZE;
goto out;
+ }
crypt_stat = &ecryptfs_inode_to_private(dentry->d_inode)->crypt_stat;
/* Set up a fake ecryptfs file, this is used to interface with
* the file in the underlying filesystem so that the
@@ -815,28 +831,30 @@ int ecryptfs_truncate(struct dentry *dentry, loff_t new_length)
&fake_ecryptfs_file,
ecryptfs_inode_to_private(dentry->d_inode)->lower_file);
/* Switch on growing or shrinking file */
- if (new_length > i_size) {
+ if (ia->ia_size > i_size) {
char zero[] = { 0x00 };
+ lower_ia->ia_valid &= ~ATTR_SIZE;
/* Write a single 0 at the last position of the file;
* this triggers code that will fill in 0's throughout
* the intermediate portion of the previous end of the
* file and the new and of the file */
rc = ecryptfs_write(&fake_ecryptfs_file, zero,
- (new_length - 1), 1);
- } else { /* new_length < i_size_read(inode) */
- /* We're chopping off all the pages down do the page
- * in which new_length is located. Fill in the end of
- * that page from (new_length & ~PAGE_CACHE_MASK) to
+ (ia->ia_size - 1), 1);
+ } else { /* ia->ia_size < i_size_read(inode) */
+ /* We're chopping off all the pages down to the page
+ * in which ia->ia_size is located. Fill in the end of
+ * that page from (ia->ia_size & ~PAGE_CACHE_MASK) to
* PAGE_CACHE_SIZE with zeros. */
size_t num_zeros = (PAGE_CACHE_SIZE
- - (new_length & ~PAGE_CACHE_MASK));
+ - (ia->ia_size & ~PAGE_CACHE_MASK));
if (!(crypt_stat->flags & ECRYPTFS_ENCRYPTED)) {
- rc = vmtruncate(inode, new_length);
+ rc = vmtruncate(inode, ia->ia_size);
if (rc)
goto out_free;
- rc = vmtruncate(lower_dentry->d_inode, new_length);
+ lower_ia->ia_size = ia->ia_size;
+ lower_ia->ia_valid |= ATTR_SIZE;
goto out_free;
}
if (num_zeros) {
@@ -848,7 +866,7 @@ int ecryptfs_truncate(struct dentry *dentry, loff_t new_length)
goto out_free;
}
rc = ecryptfs_write(&fake_ecryptfs_file, zeros_virt,
- new_length, num_zeros);
+ ia->ia_size, num_zeros);
kfree(zeros_virt);
if (rc) {
printk(KERN_ERR "Error attempting to zero out "
@@ -857,7 +875,7 @@ int ecryptfs_truncate(struct dentry *dentry, loff_t new_length)
goto out_free;
}
}
- vmtruncate(inode, new_length);
+ vmtruncate(inode, ia->ia_size);
rc = ecryptfs_write_inode_size_to_metadata(inode);
if (rc) {
printk(KERN_ERR "Problem with "
@@ -870,10 +888,12 @@ int ecryptfs_truncate(struct dentry *dentry, loff_t new_length)
lower_size_before_truncate =
upper_size_to_lower_size(crypt_stat, i_size);
lower_size_after_truncate =
- upper_size_to_lower_size(crypt_stat, new_length);
- if (lower_size_after_truncate < lower_size_before_truncate)
- vmtruncate(lower_dentry->d_inode,
- lower_size_after_truncate);
+ upper_size_to_lower_size(crypt_stat, ia->ia_size);
+ if (lower_size_after_truncate < lower_size_before_truncate) {
+ lower_ia->ia_size = lower_size_after_truncate;
+ lower_ia->ia_valid |= ATTR_SIZE;
+ } else
+ lower_ia->ia_valid &= ~ATTR_SIZE;
}
out_free:
if (ecryptfs_file_to_private(&fake_ecryptfs_file))
@@ -883,6 +903,33 @@ out:
return rc;
}
+/**
+ * ecryptfs_truncate
+ * @dentry: The ecryptfs layer dentry
+ * @new_length: The length to expand the file to
+ *
+ * Simple function that handles the truncation of an eCryptfs inode and
+ * its corresponding lower inode.
+ *
+ * Returns zero on success; non-zero otherwise
+ */
+int ecryptfs_truncate(struct dentry *dentry, loff_t new_length)
+{
+ struct iattr ia = { .ia_valid = ATTR_SIZE, .ia_size = new_length };
+ struct iattr lower_ia = { .ia_valid = 0 };
+ int rc;
+
+ rc = truncate_upper(dentry, &ia, &lower_ia);
+ if (!rc && lower_ia.ia_valid & ATTR_SIZE) {
+ struct dentry *lower_dentry = ecryptfs_dentry_to_lower(dentry);
+
+ mutex_lock(&lower_dentry->d_inode->i_mutex);
+ rc = notify_change(lower_dentry, &lower_ia);
+ mutex_unlock(&lower_dentry->d_inode->i_mutex);
+ }
+ return rc;
+}
+
static int
ecryptfs_permission(struct inode *inode, int mask)
{
@@ -905,6 +952,7 @@ static int ecryptfs_setattr(struct dentry *dentry, struct iattr *ia)
{
int rc = 0;
struct dentry *lower_dentry;
+ struct iattr lower_ia;
struct inode *inode;
struct inode *lower_inode;
struct ecryptfs_crypt_stat *crypt_stat;
@@ -943,15 +991,11 @@ static int ecryptfs_setattr(struct dentry *dentry, struct iattr *ia)
}
}
mutex_unlock(&crypt_stat->cs_mutex);
+ memcpy(&lower_ia, ia, sizeof(lower_ia));
+ if (ia->ia_valid & ATTR_FILE)
+ lower_ia.ia_file = ecryptfs_file_to_lower(ia->ia_file);
if (ia->ia_valid & ATTR_SIZE) {
- ecryptfs_printk(KERN_DEBUG,
- "ia->ia_valid = [0x%x] ATTR_SIZE" " = [0x%x]\n",
- ia->ia_valid, ATTR_SIZE);
- rc = ecryptfs_truncate(dentry, ia->ia_size);
- /* ecryptfs_truncate handles resizing of the lower file */
- ia->ia_valid &= ~ATTR_SIZE;
- ecryptfs_printk(KERN_DEBUG, "ia->ia_valid = [%x]\n",
- ia->ia_valid);
+ rc = truncate_upper(dentry, ia, &lower_ia);
if (rc < 0)
goto out;
}
@@ -960,17 +1004,32 @@ static int ecryptfs_setattr(struct dentry *dentry, struct iattr *ia)
* mode change is for clearing setuid/setgid bits. Allow lower fs
* to interpret this in its own way.
*/
- if (ia->ia_valid & (ATTR_KILL_SUID | ATTR_KILL_SGID))
- ia->ia_valid &= ~ATTR_MODE;
+ if (lower_ia.ia_valid & (ATTR_KILL_SUID | ATTR_KILL_SGID))
+ lower_ia.ia_valid &= ~ATTR_MODE;
mutex_lock(&lower_dentry->d_inode->i_mutex);
- rc = notify_change(lower_dentry, ia);
+ rc = notify_change(lower_dentry, &lower_ia);
mutex_unlock(&lower_dentry->d_inode->i_mutex);
out:
fsstack_copy_attr_all(inode, lower_inode);
return rc;
}
+int ecryptfs_getattr(struct vfsmount *mnt, struct dentry *dentry,
+ struct kstat *stat)
+{
+ struct kstat lower_stat;
+ int rc;
+
+ rc = vfs_getattr(ecryptfs_dentry_to_lower_mnt(dentry),
+ ecryptfs_dentry_to_lower(dentry), &lower_stat);
+ if (!rc) {
+ generic_fillattr(dentry->d_inode, stat);
+ stat->blocks = lower_stat.blocks;
+ }
+ return rc;
+}
+
int
ecryptfs_setxattr(struct dentry *dentry, const char *name, const void *value,
size_t size, int flags)
@@ -1100,6 +1159,7 @@ const struct inode_operations ecryptfs_dir_iops = {
const struct inode_operations ecryptfs_main_iops = {
.permission = ecryptfs_permission,
.setattr = ecryptfs_setattr,
+ .getattr = ecryptfs_getattr,
.setxattr = ecryptfs_setxattr,
.getxattr = ecryptfs_getxattr,
.listxattr = ecryptfs_listxattr,
diff --git a/fs/ecryptfs/main.c b/fs/ecryptfs/main.c
index 567bc4b9f70a..ea2f92101dfe 100644
--- a/fs/ecryptfs/main.c
+++ b/fs/ecryptfs/main.c
@@ -585,8 +585,8 @@ out:
* with as much information as it can before needing
* the lower filesystem.
* ecryptfs_read_super(): this accesses the lower filesystem and uses
- * ecryptfs_interpolate to perform most of the linking
- * ecryptfs_interpolate(): links the lower filesystem into ecryptfs
+ * ecryptfs_interpose to perform most of the linking
+ * ecryptfs_interpose(): links the lower filesystem into ecryptfs (inode.c)
*/
static int ecryptfs_get_sb(struct file_system_type *fs_type, int flags,
const char *dev_name, void *raw_data,
diff --git a/fs/eventfd.c b/fs/eventfd.c
index d26402ff06ea..7758cc382ef0 100644
--- a/fs/eventfd.c
+++ b/fs/eventfd.c
@@ -135,26 +135,71 @@ static unsigned int eventfd_poll(struct file *file, poll_table *wait)
return events;
}
-static ssize_t eventfd_read(struct file *file, char __user *buf, size_t count,
- loff_t *ppos)
+static void eventfd_ctx_do_read(struct eventfd_ctx *ctx, __u64 *cnt)
+{
+ *cnt = (ctx->flags & EFD_SEMAPHORE) ? 1 : ctx->count;
+ ctx->count -= *cnt;
+}
+
+/**
+ * eventfd_ctx_remove_wait_queue - Read the current counter and removes wait queue.
+ * @ctx: [in] Pointer to eventfd context.
+ * @wait: [in] Wait queue to be removed.
+ * @cnt: [out] Pointer to the 64bit conter value.
+ *
+ * Returns zero if successful, or the following error codes:
+ *
+ * -EAGAIN : The operation would have blocked.
+ *
+ * This is used to atomically remove a wait queue entry from the eventfd wait
+ * queue head, and read/reset the counter value.
+ */
+int eventfd_ctx_remove_wait_queue(struct eventfd_ctx *ctx, wait_queue_t *wait,
+ __u64 *cnt)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&ctx->wqh.lock, flags);
+ eventfd_ctx_do_read(ctx, cnt);
+ __remove_wait_queue(&ctx->wqh, wait);
+ if (*cnt != 0 && waitqueue_active(&ctx->wqh))
+ wake_up_locked_poll(&ctx->wqh, POLLOUT);
+ spin_unlock_irqrestore(&ctx->wqh.lock, flags);
+
+ return *cnt != 0 ? 0 : -EAGAIN;
+}
+EXPORT_SYMBOL_GPL(eventfd_ctx_remove_wait_queue);
+
+/**
+ * eventfd_ctx_read - Reads the eventfd counter or wait if it is zero.
+ * @ctx: [in] Pointer to eventfd context.
+ * @no_wait: [in] Different from zero if the operation should not block.
+ * @cnt: [out] Pointer to the 64bit conter value.
+ *
+ * Returns zero if successful, or the following error codes:
+ *
+ * -EAGAIN : The operation would have blocked but @no_wait was nonzero.
+ * -ERESTARTSYS : A signal interrupted the wait operation.
+ *
+ * If @no_wait is zero, the function might sleep until the eventfd internal
+ * counter becomes greater than zero.
+ */
+ssize_t eventfd_ctx_read(struct eventfd_ctx *ctx, int no_wait, __u64 *cnt)
{
- struct eventfd_ctx *ctx = file->private_data;
ssize_t res;
- __u64 ucnt = 0;
DECLARE_WAITQUEUE(wait, current);
- if (count < sizeof(ucnt))
- return -EINVAL;
spin_lock_irq(&ctx->wqh.lock);
+ *cnt = 0;
res = -EAGAIN;
if (ctx->count > 0)
- res = sizeof(ucnt);
- else if (!(file->f_flags & O_NONBLOCK)) {
+ res = 0;
+ else if (!no_wait) {
__add_wait_queue(&ctx->wqh, &wait);
- for (res = 0;;) {
+ for (;;) {
set_current_state(TASK_INTERRUPTIBLE);
if (ctx->count > 0) {
- res = sizeof(ucnt);
+ res = 0;
break;
}
if (signal_pending(current)) {
@@ -168,18 +213,32 @@ static ssize_t eventfd_read(struct file *file, char __user *buf, size_t count,
__remove_wait_queue(&ctx->wqh, &wait);
__set_current_state(TASK_RUNNING);
}
- if (likely(res > 0)) {
- ucnt = (ctx->flags & EFD_SEMAPHORE) ? 1 : ctx->count;
- ctx->count -= ucnt;
+ if (likely(res == 0)) {
+ eventfd_ctx_do_read(ctx, cnt);
if (waitqueue_active(&ctx->wqh))
wake_up_locked_poll(&ctx->wqh, POLLOUT);
}
spin_unlock_irq(&ctx->wqh.lock);
- if (res > 0 && put_user(ucnt, (__u64 __user *) buf))
- return -EFAULT;
return res;
}
+EXPORT_SYMBOL_GPL(eventfd_ctx_read);
+
+static ssize_t eventfd_read(struct file *file, char __user *buf, size_t count,
+ loff_t *ppos)
+{
+ struct eventfd_ctx *ctx = file->private_data;
+ ssize_t res;
+ __u64 cnt;
+
+ if (count < sizeof(cnt))
+ return -EINVAL;
+ res = eventfd_ctx_read(ctx, file->f_flags & O_NONBLOCK, &cnt);
+ if (res < 0)
+ return res;
+
+ return put_user(cnt, (__u64 __user *) buf) ? -EFAULT : sizeof(cnt);
+}
static ssize_t eventfd_write(struct file *file, const char __user *buf, size_t count,
loff_t *ppos)
diff --git a/fs/exec.c b/fs/exec.c
index 632b02e34ec7..e95c692ef0e4 100644
--- a/fs/exec.c
+++ b/fs/exec.c
@@ -571,6 +571,9 @@ int setup_arg_pages(struct linux_binprm *bprm,
struct vm_area_struct *prev = NULL;
unsigned long vm_flags;
unsigned long stack_base;
+ unsigned long stack_size;
+ unsigned long stack_expand;
+ unsigned long rlim_stack;
#ifdef CONFIG_STACK_GROWSUP
/* Limit stack size to 1GB */
@@ -627,10 +630,24 @@ int setup_arg_pages(struct linux_binprm *bprm,
goto out_unlock;
}
+ stack_expand = EXTRA_STACK_VM_PAGES * PAGE_SIZE;
+ stack_size = vma->vm_end - vma->vm_start;
+ /*
+ * Align this down to a page boundary as expand_stack
+ * will align it up.
+ */
+ rlim_stack = rlimit(RLIMIT_STACK) & PAGE_MASK;
+ rlim_stack = min(rlim_stack, stack_size);
#ifdef CONFIG_STACK_GROWSUP
- stack_base = vma->vm_end + EXTRA_STACK_VM_PAGES * PAGE_SIZE;
+ if (stack_size + stack_expand > rlim_stack)
+ stack_base = vma->vm_start + rlim_stack;
+ else
+ stack_base = vma->vm_end + stack_expand;
#else
- stack_base = vma->vm_start - EXTRA_STACK_VM_PAGES * PAGE_SIZE;
+ if (stack_size + stack_expand > rlim_stack)
+ stack_base = vma->vm_end - rlim_stack;
+ else
+ stack_base = vma->vm_start - stack_expand;
#endif
ret = expand_stack(vma, stack_base);
if (ret)
@@ -941,9 +958,7 @@ void set_task_comm(struct task_struct *tsk, char *buf)
int flush_old_exec(struct linux_binprm * bprm)
{
- char * name;
- int i, ch, retval;
- char tcomm[sizeof(current->comm)];
+ int retval;
/*
* Make sure we have a private signal table and that
@@ -964,6 +979,25 @@ int flush_old_exec(struct linux_binprm * bprm)
bprm->mm = NULL; /* We're using it now */
+ current->flags &= ~PF_RANDOMIZE;
+ flush_thread();
+ current->personality &= ~bprm->per_clear;
+
+ return 0;
+
+out:
+ return retval;
+}
+EXPORT_SYMBOL(flush_old_exec);
+
+void setup_new_exec(struct linux_binprm * bprm)
+{
+ int i, ch;
+ char * name;
+ char tcomm[sizeof(current->comm)];
+
+ arch_pick_mmap_layout(current->mm);
+
/* This is the point of no return */
current->sas_ss_sp = current->sas_ss_size = 0;
@@ -985,9 +1019,6 @@ int flush_old_exec(struct linux_binprm * bprm)
tcomm[i] = '\0';
set_task_comm(current, tcomm);
- current->flags &= ~PF_RANDOMIZE;
- flush_thread();
-
/* Set the new mm task size. We have to do that late because it may
* depend on TIF_32BIT which is only updated in flush_thread() on
* some architectures like powerpc
@@ -1003,8 +1034,6 @@ int flush_old_exec(struct linux_binprm * bprm)
set_dumpable(current->mm, suid_dumpable);
}
- current->personality &= ~bprm->per_clear;
-
/*
* Flush performance counters when crossing a
* security domain:
@@ -1019,14 +1048,8 @@ int flush_old_exec(struct linux_binprm * bprm)
flush_signal_handlers(current, 0);
flush_old_files(current->files);
-
- return 0;
-
-out:
- return retval;
}
-
-EXPORT_SYMBOL(flush_old_exec);
+EXPORT_SYMBOL(setup_new_exec);
/*
* Prepare credentials and lock ->cred_guard_mutex.
diff --git a/fs/exofs/inode.c b/fs/exofs/inode.c
index 698a8636d39c..2afbcebeda71 100644
--- a/fs/exofs/inode.c
+++ b/fs/exofs/inode.c
@@ -738,13 +738,28 @@ static int exofs_write_begin_export(struct file *file,
fsdata);
}
+static int exofs_write_end(struct file *file, struct address_space *mapping,
+ loff_t pos, unsigned len, unsigned copied,
+ struct page *page, void *fsdata)
+{
+ struct inode *inode = mapping->host;
+ /* According to comment in simple_write_end i_mutex is held */
+ loff_t i_size = inode->i_size;
+ int ret;
+
+ ret = simple_write_end(file, mapping,pos, len, copied, page, fsdata);
+ if (i_size != inode->i_size)
+ mark_inode_dirty(inode);
+ return ret;
+}
+
const struct address_space_operations exofs_aops = {
.readpage = exofs_readpage,
.readpages = exofs_readpages,
.writepage = exofs_writepage,
.writepages = exofs_writepages,
.write_begin = exofs_write_begin_export,
- .write_end = simple_write_end,
+ .write_end = exofs_write_end,
};
/******************************************************************************
diff --git a/fs/exofs/pnfs.h b/fs/exofs/pnfs.h
index 423033addd1f..c52e9888b8ab 100644
--- a/fs/exofs/pnfs.h
+++ b/fs/exofs/pnfs.h
@@ -15,13 +15,7 @@
#ifndef __EXOFS_PNFS_H__
#define __EXOFS_PNFS_H__
-#if defined(CONFIG_PNFS)
-
-
-/* FIXME: move this file to: linux/exportfs/pnfs_osd_xdr.h */
-#include "../nfs/objlayout/pnfs_osd_xdr.h"
-
-#else /* defined(CONFIG_PNFS) */
+#if ! defined(__PNFS_OSD_XDR_H__)
enum pnfs_iomode {
IOMODE_READ = 1,
@@ -46,6 +40,6 @@ struct pnfs_osd_data_map {
u32 odm_raid_algorithm;
};
-#endif /* else defined(CONFIG_PNFS) */
+#endif /* ! defined(__PNFS_OSD_XDR_H__) */
#endif /* __EXOFS_PNFS_H__ */
diff --git a/fs/ext4/ext4.h b/fs/ext4/ext4.h
index 56f9271ee8cc..874d169a193e 100644
--- a/fs/ext4/ext4.h
+++ b/fs/ext4/ext4.h
@@ -361,14 +361,11 @@ struct ext4_new_group_data {
so set the magic i_delalloc_reserve_flag after taking the
inode allocation semaphore for */
#define EXT4_GET_BLOCKS_DELALLOC_RESERVE 0x0004
- /* Call ext4_da_update_reserve_space() after successfully
- allocating the blocks */
-#define EXT4_GET_BLOCKS_UPDATE_RESERVE_SPACE 0x0008
/* caller is from the direct IO path, request to creation of an
unitialized extents if not allocated, split the uninitialized
extent if blocks has been preallocated already*/
-#define EXT4_GET_BLOCKS_DIO 0x0010
-#define EXT4_GET_BLOCKS_CONVERT 0x0020
+#define EXT4_GET_BLOCKS_DIO 0x0008
+#define EXT4_GET_BLOCKS_CONVERT 0x0010
#define EXT4_GET_BLOCKS_DIO_CREATE_EXT (EXT4_GET_BLOCKS_DIO|\
EXT4_GET_BLOCKS_CREATE_UNINIT_EXT)
/* Convert extent to initialized after direct IO complete */
@@ -699,6 +696,8 @@ struct ext4_inode_info {
unsigned int i_reserved_meta_blocks;
unsigned int i_allocated_meta_blocks;
unsigned short i_delalloc_reserved_flag;
+ sector_t i_da_metadata_calc_last_lblock;
+ int i_da_metadata_calc_len;
/* on-disk additional length */
__u16 i_extra_isize;
@@ -1441,6 +1440,8 @@ extern int ext4_block_truncate_page(handle_t *handle,
extern int ext4_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf);
extern qsize_t *ext4_get_reserved_space(struct inode *inode);
extern int flush_aio_dio_completed_IO(struct inode *inode);
+extern void ext4_da_update_reserve_space(struct inode *inode,
+ int used, int quota_claim);
/* ioctl.c */
extern long ext4_ioctl(struct file *, unsigned int, unsigned long);
extern long ext4_compat_ioctl(struct file *, unsigned int, unsigned long);
diff --git a/fs/ext4/ext4_extents.h b/fs/ext4/ext4_extents.h
index 2ca686454e87..bdb6ce7e2eb4 100644
--- a/fs/ext4/ext4_extents.h
+++ b/fs/ext4/ext4_extents.h
@@ -225,7 +225,8 @@ static inline void ext4_ext_mark_initialized(struct ext4_extent *ext)
ext->ee_len = cpu_to_le16(ext4_ext_get_actual_len(ext));
}
-extern int ext4_ext_calc_metadata_amount(struct inode *inode, int blocks);
+extern int ext4_ext_calc_metadata_amount(struct inode *inode,
+ sector_t lblocks);
extern ext4_fsblk_t ext_pblock(struct ext4_extent *ex);
extern ext4_fsblk_t idx_pblock(struct ext4_extent_idx *);
extern void ext4_ext_store_pblock(struct ext4_extent *, ext4_fsblk_t);
diff --git a/fs/ext4/extents.c b/fs/ext4/extents.c
index 91ae46098ea4..765a4826b118 100644
--- a/fs/ext4/extents.c
+++ b/fs/ext4/extents.c
@@ -296,29 +296,44 @@ static inline int ext4_ext_space_root_idx(struct inode *inode, int check)
* to allocate @blocks
* Worse case is one block per extent
*/
-int ext4_ext_calc_metadata_amount(struct inode *inode, int blocks)
+int ext4_ext_calc_metadata_amount(struct inode *inode, sector_t lblock)
{
- int lcap, icap, rcap, leafs, idxs, num;
- int newextents = blocks;
-
- rcap = ext4_ext_space_root_idx(inode, 0);
- lcap = ext4_ext_space_block(inode, 0);
- icap = ext4_ext_space_block_idx(inode, 0);
+ struct ext4_inode_info *ei = EXT4_I(inode);
+ int idxs, num = 0;
- /* number of new leaf blocks needed */
- num = leafs = (newextents + lcap - 1) / lcap;
+ idxs = ((inode->i_sb->s_blocksize - sizeof(struct ext4_extent_header))
+ / sizeof(struct ext4_extent_idx));
/*
- * Worse case, we need separate index block(s)
- * to link all new leaf blocks
+ * If the new delayed allocation block is contiguous with the
+ * previous da block, it can share index blocks with the
+ * previous block, so we only need to allocate a new index
+ * block every idxs leaf blocks. At ldxs**2 blocks, we need
+ * an additional index block, and at ldxs**3 blocks, yet
+ * another index blocks.
*/
- idxs = (leafs + icap - 1) / icap;
- do {
- num += idxs;
- idxs = (idxs + icap - 1) / icap;
- } while (idxs > rcap);
+ if (ei->i_da_metadata_calc_len &&
+ ei->i_da_metadata_calc_last_lblock+1 == lblock) {
+ if ((ei->i_da_metadata_calc_len % idxs) == 0)
+ num++;
+ if ((ei->i_da_metadata_calc_len % (idxs*idxs)) == 0)
+ num++;
+ if ((ei->i_da_metadata_calc_len % (idxs*idxs*idxs)) == 0) {
+ num++;
+ ei->i_da_metadata_calc_len = 0;
+ } else
+ ei->i_da_metadata_calc_len++;
+ ei->i_da_metadata_calc_last_lblock++;
+ return num;
+ }
- return num;
+ /*
+ * In the worst case we need a new set of index blocks at
+ * every level of the inode's extent tree.
+ */
+ ei->i_da_metadata_calc_len = 1;
+ ei->i_da_metadata_calc_last_lblock = lblock;
+ return ext_depth(inode) + 1;
}
static int
@@ -3117,7 +3132,19 @@ out:
unmap_underlying_metadata_blocks(inode->i_sb->s_bdev,
newblock + max_blocks,
allocated - max_blocks);
+ allocated = max_blocks;
}
+
+ /*
+ * If we have done fallocate with the offset that is already
+ * delayed allocated, we would have block reservation
+ * and quota reservation done in the delayed write path.
+ * But fallocate would have already updated quota and block
+ * count for this offset. So cancel these reservation
+ */
+ if (flags & EXT4_GET_BLOCKS_DELALLOC_RESERVE)
+ ext4_da_update_reserve_space(inode, allocated, 0);
+
map_out:
set_buffer_mapped(bh_result);
out1:
@@ -3353,9 +3380,18 @@ int ext4_ext_get_blocks(handle_t *handle, struct inode *inode,
/* previous routine could use block we allocated */
newblock = ext_pblock(&newex);
allocated = ext4_ext_get_actual_len(&newex);
+ if (allocated > max_blocks)
+ allocated = max_blocks;
set_buffer_new(bh_result);
/*
+ * Update reserved blocks/metadata blocks after successful
+ * block allocation which had been deferred till now.
+ */
+ if (flags & EXT4_GET_BLOCKS_DELALLOC_RESERVE)
+ ext4_da_update_reserve_space(inode, allocated, 1);
+
+ /*
* Cache the extent and update transaction to commit on fdatasync only
* when it is _not_ an uninitialized extent.
*/
diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c
index 84eeb8f515a3..e11952404e02 100644
--- a/fs/ext4/inode.c
+++ b/fs/ext4/inode.c
@@ -1009,49 +1009,56 @@ qsize_t *ext4_get_reserved_space(struct inode *inode)
return &EXT4_I(inode)->i_reserved_quota;
}
#endif
+
/*
* Calculate the number of metadata blocks need to reserve
- * to allocate @blocks for non extent file based file
+ * to allocate a new block at @lblocks for non extent file based file
*/
-static int ext4_indirect_calc_metadata_amount(struct inode *inode, int blocks)
+static int ext4_indirect_calc_metadata_amount(struct inode *inode,
+ sector_t lblock)
{
- int icap = EXT4_ADDR_PER_BLOCK(inode->i_sb);
- int ind_blks, dind_blks, tind_blks;
-
- /* number of new indirect blocks needed */
- ind_blks = (blocks + icap - 1) / icap;
+ struct ext4_inode_info *ei = EXT4_I(inode);
+ int dind_mask = EXT4_ADDR_PER_BLOCK(inode->i_sb) - 1;
+ int blk_bits;
- dind_blks = (ind_blks + icap - 1) / icap;
+ if (lblock < EXT4_NDIR_BLOCKS)
+ return 0;
- tind_blks = 1;
+ lblock -= EXT4_NDIR_BLOCKS;
- return ind_blks + dind_blks + tind_blks;
+ if (ei->i_da_metadata_calc_len &&
+ (lblock & dind_mask) == ei->i_da_metadata_calc_last_lblock) {
+ ei->i_da_metadata_calc_len++;
+ return 0;
+ }
+ ei->i_da_metadata_calc_last_lblock = lblock & dind_mask;
+ ei->i_da_metadata_calc_len = 1;
+ blk_bits = roundup_pow_of_two(lblock + 1);
+ return (blk_bits / EXT4_ADDR_PER_BLOCK_BITS(inode->i_sb)) + 1;
}
/*
* Calculate the number of metadata blocks need to reserve
- * to allocate given number of blocks
+ * to allocate a block located at @lblock
*/
-static int ext4_calc_metadata_amount(struct inode *inode, int blocks)
+static int ext4_calc_metadata_amount(struct inode *inode, sector_t lblock)
{
- if (!blocks)
- return 0;
-
if (EXT4_I(inode)->i_flags & EXT4_EXTENTS_FL)
- return ext4_ext_calc_metadata_amount(inode, blocks);
+ return ext4_ext_calc_metadata_amount(inode, lblock);
- return ext4_indirect_calc_metadata_amount(inode, blocks);
+ return ext4_indirect_calc_metadata_amount(inode, lblock);
}
/*
* Called with i_data_sem down, which is important since we can call
* ext4_discard_preallocations() from here.
*/
-static void ext4_da_update_reserve_space(struct inode *inode, int used)
+void ext4_da_update_reserve_space(struct inode *inode,
+ int used, int quota_claim)
{
struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
struct ext4_inode_info *ei = EXT4_I(inode);
- int mdb_free = 0;
+ int mdb_free = 0, allocated_meta_blocks = 0;
spin_lock(&ei->i_block_reservation_lock);
if (unlikely(used > ei->i_reserved_data_blocks)) {
@@ -1067,6 +1074,7 @@ static void ext4_da_update_reserve_space(struct inode *inode, int used)
ei->i_reserved_data_blocks -= used;
used += ei->i_allocated_meta_blocks;
ei->i_reserved_meta_blocks -= ei->i_allocated_meta_blocks;
+ allocated_meta_blocks = ei->i_allocated_meta_blocks;
ei->i_allocated_meta_blocks = 0;
percpu_counter_sub(&sbi->s_dirtyblocks_counter, used);
@@ -1076,16 +1084,31 @@ static void ext4_da_update_reserve_space(struct inode *inode, int used)
* only when we have written all of the delayed
* allocation blocks.
*/
- mdb_free = ei->i_allocated_meta_blocks;
+ mdb_free = ei->i_reserved_meta_blocks;
+ ei->i_reserved_meta_blocks = 0;
+ ei->i_da_metadata_calc_len = 0;
percpu_counter_sub(&sbi->s_dirtyblocks_counter, mdb_free);
- ei->i_allocated_meta_blocks = 0;
}
spin_unlock(&EXT4_I(inode)->i_block_reservation_lock);
/* Update quota subsystem */
- vfs_dq_claim_block(inode, used);
- if (mdb_free)
- vfs_dq_release_reservation_block(inode, mdb_free);
+ if (quota_claim) {
+ vfs_dq_claim_block(inode, used);
+ if (mdb_free)
+ vfs_dq_release_reservation_block(inode, mdb_free);
+ } else {
+ /*
+ * We did fallocate with an offset that is already delayed
+ * allocated. So on delayed allocated writeback we should
+ * not update the quota for allocated blocks. But then
+ * converting an fallocate region to initialized region would
+ * have caused a metadata allocation. So claim quota for
+ * that
+ */
+ if (allocated_meta_blocks)
+ vfs_dq_claim_block(inode, allocated_meta_blocks);
+ vfs_dq_release_reservation_block(inode, mdb_free + used);
+ }
/*
* If we have done all the pending block allocations and if
@@ -1285,18 +1308,20 @@ int ext4_get_blocks(handle_t *handle, struct inode *inode, sector_t block,
*/
EXT4_I(inode)->i_state &= ~EXT4_STATE_EXT_MIGRATE;
}
- }
+ /*
+ * Update reserved blocks/metadata blocks after successful
+ * block allocation which had been deferred till now. We don't
+ * support fallocate for non extent files. So we can update
+ * reserve space here.
+ */
+ if ((retval > 0) &&
+ (flags & EXT4_GET_BLOCKS_DELALLOC_RESERVE))
+ ext4_da_update_reserve_space(inode, retval, 1);
+ }
if (flags & EXT4_GET_BLOCKS_DELALLOC_RESERVE)
EXT4_I(inode)->i_delalloc_reserved_flag = 0;
- /*
- * Update reserved blocks/metadata blocks after successful
- * block allocation which had been deferred till now.
- */
- if ((retval > 0) && (flags & EXT4_GET_BLOCKS_UPDATE_RESERVE_SPACE))
- ext4_da_update_reserve_space(inode, retval);
-
up_write((&EXT4_I(inode)->i_data_sem));
if (retval > 0 && buffer_mapped(bh)) {
int ret = check_block_validity(inode, "file system "
@@ -1802,12 +1827,15 @@ static int ext4_journalled_write_end(struct file *file,
return ret ? ret : copied;
}
-static int ext4_da_reserve_space(struct inode *inode, int nrblocks)
+/*
+ * Reserve a single block located at lblock
+ */
+static int ext4_da_reserve_space(struct inode *inode, sector_t lblock)
{
int retries = 0;
struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
struct ext4_inode_info *ei = EXT4_I(inode);
- unsigned long md_needed, md_reserved, total = 0;
+ unsigned long md_needed, md_reserved;
/*
* recalculate the amount of metadata blocks to reserve
@@ -1817,8 +1845,7 @@ static int ext4_da_reserve_space(struct inode *inode, int nrblocks)
repeat:
spin_lock(&ei->i_block_reservation_lock);
md_reserved = ei->i_reserved_meta_blocks;
- md_needed = ext4_calc_metadata_amount(inode, nrblocks);
- total = md_needed + nrblocks;
+ md_needed = ext4_calc_metadata_amount(inode, lblock);
spin_unlock(&ei->i_block_reservation_lock);
/*
@@ -1826,31 +1853,19 @@ repeat:
* later. Real quota accounting is done at pages writeout
* time.
*/
- if (vfs_dq_reserve_block(inode, total)) {
- /*
- * We tend to badly over-estimate the amount of
- * metadata blocks which are needed, so if we have
- * reserved any metadata blocks, try to force out the
- * inode and see if we have any better luck.
- */
- if (md_reserved && retries++ <= 3)
- goto retry;
+ if (vfs_dq_reserve_block(inode, md_needed + 1))
return -EDQUOT;
- }
- if (ext4_claim_free_blocks(sbi, total)) {
- vfs_dq_release_reservation_block(inode, total);
+ if (ext4_claim_free_blocks(sbi, md_needed + 1)) {
+ vfs_dq_release_reservation_block(inode, md_needed + 1);
if (ext4_should_retry_alloc(inode->i_sb, &retries)) {
- retry:
- if (md_reserved)
- write_inode_now(inode, (retries == 3));
yield();
goto repeat;
}
return -ENOSPC;
}
spin_lock(&ei->i_block_reservation_lock);
- ei->i_reserved_data_blocks += nrblocks;
+ ei->i_reserved_data_blocks++;
ei->i_reserved_meta_blocks += md_needed;
spin_unlock(&ei->i_block_reservation_lock);
@@ -1889,8 +1904,9 @@ static void ext4_da_release_space(struct inode *inode, int to_free)
* only when we have written all of the delayed
* allocation blocks.
*/
- to_free += ei->i_allocated_meta_blocks;
- ei->i_allocated_meta_blocks = 0;
+ to_free += ei->i_reserved_meta_blocks;
+ ei->i_reserved_meta_blocks = 0;
+ ei->i_da_metadata_calc_len = 0;
}
/* update fs dirty blocks counter */
@@ -2203,10 +2219,10 @@ static int mpage_da_map_blocks(struct mpage_da_data *mpd)
* variables are updated after the blocks have been allocated.
*/
new.b_state = 0;
- get_blocks_flags = (EXT4_GET_BLOCKS_CREATE |
- EXT4_GET_BLOCKS_DELALLOC_RESERVE);
+ get_blocks_flags = EXT4_GET_BLOCKS_CREATE;
if (mpd->b_state & (1 << BH_Delay))
- get_blocks_flags |= EXT4_GET_BLOCKS_UPDATE_RESERVE_SPACE;
+ get_blocks_flags |= EXT4_GET_BLOCKS_DELALLOC_RESERVE;
+
blks = ext4_get_blocks(handle, mpd->inode, next, max_blocks,
&new, get_blocks_flags);
if (blks < 0) {
@@ -2504,7 +2520,7 @@ static int ext4_da_get_block_prep(struct inode *inode, sector_t iblock,
* XXX: __block_prepare_write() unmaps passed block,
* is it OK?
*/
- ret = ext4_da_reserve_space(inode, 1);
+ ret = ext4_da_reserve_space(inode, iblock);
if (ret)
/* not enough space to reserve */
return ret;
@@ -3022,7 +3038,7 @@ static int ext4_da_write_begin(struct file *file, struct address_space *mapping,
loff_t pos, unsigned len, unsigned flags,
struct page **pagep, void **fsdata)
{
- int ret, retries = 0;
+ int ret, retries = 0, quota_retries = 0;
struct page *page;
pgoff_t index;
unsigned from, to;
@@ -3081,6 +3097,22 @@ retry:
if (ret == -ENOSPC && ext4_should_retry_alloc(inode->i_sb, &retries))
goto retry;
+
+ if ((ret == -EDQUOT) &&
+ EXT4_I(inode)->i_reserved_meta_blocks &&
+ (quota_retries++ < 3)) {
+ /*
+ * Since we often over-estimate the number of meta
+ * data blocks required, we may sometimes get a
+ * spurios out of quota error even though there would
+ * be enough space once we write the data blocks and
+ * find out how many meta data blocks were _really_
+ * required. So try forcing the inode write to see if
+ * that helps.
+ */
+ write_inode_now(inode, (quota_retries == 3));
+ goto retry;
+ }
out:
return ret;
}
diff --git a/fs/ext4/super.c b/fs/ext4/super.c
index 7cccb35c0f4d..735c20d5fd56 100644
--- a/fs/ext4/super.c
+++ b/fs/ext4/super.c
@@ -702,6 +702,7 @@ static struct inode *ext4_alloc_inode(struct super_block *sb)
ei->i_reserved_data_blocks = 0;
ei->i_reserved_meta_blocks = 0;
ei->i_allocated_meta_blocks = 0;
+ ei->i_da_metadata_calc_len = 0;
ei->i_delalloc_reserved_flag = 0;
spin_lock_init(&(ei->i_block_reservation_lock));
#ifdef CONFIG_QUOTA
diff --git a/fs/fcntl.c b/fs/fcntl.c
index 2cf93ec40a67..97e01dc0d95f 100644
--- a/fs/fcntl.c
+++ b/fs/fcntl.c
@@ -618,60 +618,90 @@ static DEFINE_RWLOCK(fasync_lock);
static struct kmem_cache *fasync_cache __read_mostly;
/*
- * fasync_helper() is used by almost all character device drivers
- * to set up the fasync queue. It returns negative on error, 0 if it did
- * no changes and positive if it added/deleted the entry.
+ * Remove a fasync entry. If successfully removed, return
+ * positive and clear the FASYNC flag. If no entry exists,
+ * do nothing and return 0.
+ *
+ * NOTE! It is very important that the FASYNC flag always
+ * match the state "is the filp on a fasync list".
+ *
+ * We always take the 'filp->f_lock', in since fasync_lock
+ * needs to be irq-safe.
*/
-int fasync_helper(int fd, struct file * filp, int on, struct fasync_struct **fapp)
+static int fasync_remove_entry(struct file *filp, struct fasync_struct **fapp)
{
struct fasync_struct *fa, **fp;
- struct fasync_struct *new = NULL;
int result = 0;
- if (on) {
- new = kmem_cache_alloc(fasync_cache, GFP_KERNEL);
- if (!new)
- return -ENOMEM;
+ spin_lock(&filp->f_lock);
+ write_lock_irq(&fasync_lock);
+ for (fp = fapp; (fa = *fp) != NULL; fp = &fa->fa_next) {
+ if (fa->fa_file != filp)
+ continue;
+ *fp = fa->fa_next;
+ kmem_cache_free(fasync_cache, fa);
+ filp->f_flags &= ~FASYNC;
+ result = 1;
+ break;
}
+ write_unlock_irq(&fasync_lock);
+ spin_unlock(&filp->f_lock);
+ return result;
+}
+
+/*
+ * Add a fasync entry. Return negative on error, positive if
+ * added, and zero if did nothing but change an existing one.
+ *
+ * NOTE! It is very important that the FASYNC flag always
+ * match the state "is the filp on a fasync list".
+ */
+static int fasync_add_entry(int fd, struct file *filp, struct fasync_struct **fapp)
+{
+ struct fasync_struct *new, *fa, **fp;
+ int result = 0;
+
+ new = kmem_cache_alloc(fasync_cache, GFP_KERNEL);
+ if (!new)
+ return -ENOMEM;
- /*
- * We need to take f_lock first since it's not an IRQ-safe
- * lock.
- */
spin_lock(&filp->f_lock);
write_lock_irq(&fasync_lock);
for (fp = fapp; (fa = *fp) != NULL; fp = &fa->fa_next) {
- if (fa->fa_file == filp) {
- if(on) {
- fa->fa_fd = fd;
- kmem_cache_free(fasync_cache, new);
- } else {
- *fp = fa->fa_next;
- kmem_cache_free(fasync_cache, fa);
- result = 1;
- }
- goto out;
- }
+ if (fa->fa_file != filp)
+ continue;
+ fa->fa_fd = fd;
+ kmem_cache_free(fasync_cache, new);
+ goto out;
}
- if (on) {
- new->magic = FASYNC_MAGIC;
- new->fa_file = filp;
- new->fa_fd = fd;
- new->fa_next = *fapp;
- *fapp = new;
- result = 1;
- }
+ new->magic = FASYNC_MAGIC;
+ new->fa_file = filp;
+ new->fa_fd = fd;
+ new->fa_next = *fapp;
+ *fapp = new;
+ result = 1;
+ filp->f_flags |= FASYNC;
+
out:
- if (on)
- filp->f_flags |= FASYNC;
- else
- filp->f_flags &= ~FASYNC;
write_unlock_irq(&fasync_lock);
spin_unlock(&filp->f_lock);
return result;
}
+/*
+ * fasync_helper() is used by almost all character device drivers
+ * to set up the fasync queue, and for regular files by the file
+ * lease code. It returns negative on error, 0 if it did no changes
+ * and positive if it added/deleted the entry.
+ */
+int fasync_helper(int fd, struct file * filp, int on, struct fasync_struct **fapp)
+{
+ if (!on)
+ return fasync_remove_entry(filp, fapp);
+ return fasync_add_entry(fd, filp, fapp);
+}
+
EXPORT_SYMBOL(fasync_helper);
void __kill_fasync(struct fasync_struct *fa, int sig, int band)
diff --git a/fs/file_table.c b/fs/file_table.c
index 69652c5bd5f0..b98404b54383 100644
--- a/fs/file_table.c
+++ b/fs/file_table.c
@@ -253,6 +253,7 @@ void __fput(struct file *file)
if (file->f_op && file->f_op->release)
file->f_op->release(inode, file);
security_file_free(file);
+ ima_file_free(file);
if (unlikely(S_ISCHR(inode->i_mode) && inode->i_cdev != NULL))
cdev_put(inode->i_cdev);
fops_put(file->f_op);
diff --git a/fs/fs-writeback.c b/fs/fs-writeback.c
index f6c2155e0026..1a7c42c64ff4 100644
--- a/fs/fs-writeback.c
+++ b/fs/fs-writeback.c
@@ -242,6 +242,7 @@ static void bdi_sync_writeback(struct backing_dev_info *bdi,
/**
* bdi_start_writeback - start writeback
* @bdi: the backing device to write from
+ * @sb: write inodes from this super_block
* @nr_pages: the number of pages to write
*
* Description:
diff --git a/fs/fuse/file.c b/fs/fuse/file.c
index c18913a777ae..a9f5e137f1d3 100644
--- a/fs/fuse/file.c
+++ b/fs/fuse/file.c
@@ -828,6 +828,9 @@ static ssize_t fuse_fill_write_pages(struct fuse_req *req,
if (!page)
break;
+ if (mapping_writably_mapped(mapping))
+ flush_dcache_page(page);
+
pagefault_disable();
tmp = iov_iter_copy_from_user_atomic(page, ii, offset, bytes);
pagefault_enable();
diff --git a/fs/gfs2/bmap.c b/fs/gfs2/bmap.c
index 6d47379e794b..583e823307ae 100644
--- a/fs/gfs2/bmap.c
+++ b/fs/gfs2/bmap.c
@@ -541,7 +541,7 @@ static int gfs2_bmap_alloc(struct inode *inode, const sector_t lblock,
*ptr++ = cpu_to_be64(bn++);
break;
}
- } while (state != ALLOC_DATA);
+ } while ((state != ALLOC_DATA) || !dblock);
ip->i_height = height;
gfs2_add_inode_blocks(&ip->i_inode, alloced);
diff --git a/fs/gfs2/file.c b/fs/gfs2/file.c
index 4eb308aa3234..a6abbae8a278 100644
--- a/fs/gfs2/file.c
+++ b/fs/gfs2/file.c
@@ -569,6 +569,40 @@ static int gfs2_fsync(struct file *file, struct dentry *dentry, int datasync)
return ret;
}
+/**
+ * gfs2_file_aio_write - Perform a write to a file
+ * @iocb: The io context
+ * @iov: The data to write
+ * @nr_segs: Number of @iov segments
+ * @pos: The file position
+ *
+ * We have to do a lock/unlock here to refresh the inode size for
+ * O_APPEND writes, otherwise we can land up writing at the wrong
+ * offset. There is still a race, but provided the app is using its
+ * own file locking, this will make O_APPEND work as expected.
+ *
+ */
+
+static ssize_t gfs2_file_aio_write(struct kiocb *iocb, const struct iovec *iov,
+ unsigned long nr_segs, loff_t pos)
+{
+ struct file *file = iocb->ki_filp;
+
+ if (file->f_flags & O_APPEND) {
+ struct dentry *dentry = file->f_dentry;
+ struct gfs2_inode *ip = GFS2_I(dentry->d_inode);
+ struct gfs2_holder gh;
+ int ret;
+
+ ret = gfs2_glock_nq_init(ip->i_gl, LM_ST_SHARED, 0, &gh);
+ if (ret)
+ return ret;
+ gfs2_glock_dq_uninit(&gh);
+ }
+
+ return generic_file_aio_write(iocb, iov, nr_segs, pos);
+}
+
#ifdef CONFIG_GFS2_FS_LOCKING_DLM
/**
@@ -711,7 +745,7 @@ const struct file_operations gfs2_file_fops = {
.read = do_sync_read,
.aio_read = generic_file_aio_read,
.write = do_sync_write,
- .aio_write = generic_file_aio_write,
+ .aio_write = gfs2_file_aio_write,
.unlocked_ioctl = gfs2_ioctl,
.mmap = gfs2_mmap,
.open = gfs2_open,
@@ -741,7 +775,7 @@ const struct file_operations gfs2_file_fops_nolock = {
.read = do_sync_read,
.aio_read = generic_file_aio_read,
.write = do_sync_write,
- .aio_write = generic_file_aio_write,
+ .aio_write = gfs2_file_aio_write,
.unlocked_ioctl = gfs2_ioctl,
.mmap = gfs2_mmap,
.open = gfs2_open,
diff --git a/fs/gfs2/glock.c b/fs/gfs2/glock.c
index f455a03a09e2..f42663325931 100644
--- a/fs/gfs2/glock.c
+++ b/fs/gfs2/glock.c
@@ -769,6 +769,7 @@ int gfs2_glock_get(struct gfs2_sbd *sdp, u64 number,
if (!gl)
return -ENOMEM;
+ atomic_inc(&sdp->sd_glock_disposal);
gl->gl_flags = 0;
gl->gl_name = name;
atomic_set(&gl->gl_ref, 1);
@@ -1538,6 +1539,9 @@ void gfs2_gl_hash_clear(struct gfs2_sbd *sdp)
up_write(&gfs2_umount_flush_sem);
msleep(10);
}
+ flush_workqueue(glock_workqueue);
+ wait_event(sdp->sd_glock_wait, atomic_read(&sdp->sd_glock_disposal) == 0);
+ gfs2_dump_lockstate(sdp);
}
void gfs2_glock_finish_truncate(struct gfs2_inode *ip)
diff --git a/fs/gfs2/glock.h b/fs/gfs2/glock.h
index 13f0bd228132..c0262faf4725 100644
--- a/fs/gfs2/glock.h
+++ b/fs/gfs2/glock.h
@@ -123,7 +123,7 @@ struct lm_lockops {
int (*lm_mount) (struct gfs2_sbd *sdp, const char *fsname);
void (*lm_unmount) (struct gfs2_sbd *sdp);
void (*lm_withdraw) (struct gfs2_sbd *sdp);
- void (*lm_put_lock) (struct kmem_cache *cachep, void *gl);
+ void (*lm_put_lock) (struct kmem_cache *cachep, struct gfs2_glock *gl);
unsigned int (*lm_lock) (struct gfs2_glock *gl,
unsigned int req_state, unsigned int flags);
void (*lm_cancel) (struct gfs2_glock *gl);
diff --git a/fs/gfs2/incore.h b/fs/gfs2/incore.h
index 4792200978c8..bc0ad158e6b4 100644
--- a/fs/gfs2/incore.h
+++ b/fs/gfs2/incore.h
@@ -544,6 +544,8 @@ struct gfs2_sbd {
struct gfs2_holder sd_live_gh;
struct gfs2_glock *sd_rename_gl;
struct gfs2_glock *sd_trans_gl;
+ wait_queue_head_t sd_glock_wait;
+ atomic_t sd_glock_disposal;
/* Inode Stuff */
diff --git a/fs/gfs2/lock_dlm.c b/fs/gfs2/lock_dlm.c
index 46df988323bc..0e5e0e7022e5 100644
--- a/fs/gfs2/lock_dlm.c
+++ b/fs/gfs2/lock_dlm.c
@@ -21,6 +21,7 @@ static void gdlm_ast(void *arg)
{
struct gfs2_glock *gl = arg;
unsigned ret = gl->gl_state;
+ struct gfs2_sbd *sdp = gl->gl_sbd;
BUG_ON(gl->gl_lksb.sb_flags & DLM_SBF_DEMOTED);
@@ -30,6 +31,8 @@ static void gdlm_ast(void *arg)
switch (gl->gl_lksb.sb_status) {
case -DLM_EUNLOCK: /* Unlocked, so glock can be freed */
kmem_cache_free(gfs2_glock_cachep, gl);
+ if (atomic_dec_and_test(&sdp->sd_glock_disposal))
+ wake_up(&sdp->sd_glock_wait);
return;
case -DLM_ECANCEL: /* Cancel while getting lock */
ret |= LM_OUT_CANCELED;
@@ -164,14 +167,16 @@ static unsigned int gdlm_lock(struct gfs2_glock *gl,
return LM_OUT_ASYNC;
}
-static void gdlm_put_lock(struct kmem_cache *cachep, void *ptr)
+static void gdlm_put_lock(struct kmem_cache *cachep, struct gfs2_glock *gl)
{
- struct gfs2_glock *gl = ptr;
- struct lm_lockstruct *ls = &gl->gl_sbd->sd_lockstruct;
+ struct gfs2_sbd *sdp = gl->gl_sbd;
+ struct lm_lockstruct *ls = &sdp->sd_lockstruct;
int error;
if (gl->gl_lksb.sb_lkid == 0) {
kmem_cache_free(cachep, gl);
+ if (atomic_dec_and_test(&sdp->sd_glock_disposal))
+ wake_up(&sdp->sd_glock_wait);
return;
}
diff --git a/fs/gfs2/meta_io.c b/fs/gfs2/meta_io.c
index cb8d7a93d5ec..6f68a5f18eb8 100644
--- a/fs/gfs2/meta_io.c
+++ b/fs/gfs2/meta_io.c
@@ -121,7 +121,7 @@ struct inode *gfs2_aspace_get(struct gfs2_sbd *sdp)
if (aspace) {
mapping_set_gfp_mask(aspace->i_mapping, GFP_NOFS);
aspace->i_mapping->a_ops = &aspace_aops;
- aspace->i_size = ~0ULL;
+ aspace->i_size = MAX_LFS_FILESIZE;
ip = GFS2_I(aspace);
clear_bit(GIF_USER, &ip->i_flags);
insert_inode_hash(aspace);
diff --git a/fs/gfs2/ops_fstype.c b/fs/gfs2/ops_fstype.c
index edfee24f3636..a86ed6381566 100644
--- a/fs/gfs2/ops_fstype.c
+++ b/fs/gfs2/ops_fstype.c
@@ -82,6 +82,8 @@ static struct gfs2_sbd *init_sbd(struct super_block *sb)
gfs2_tune_init(&sdp->sd_tune);
+ init_waitqueue_head(&sdp->sd_glock_wait);
+ atomic_set(&sdp->sd_glock_disposal, 0);
spin_lock_init(&sdp->sd_statfs_spin);
spin_lock_init(&sdp->sd_rindex_spin);
@@ -723,7 +725,7 @@ static int init_journal(struct gfs2_sbd *sdp, int undo)
goto fail;
}
- error = -EINVAL;
+ error = -EUSERS;
if (!gfs2_jindex_size(sdp)) {
fs_err(sdp, "no journals!\n");
goto fail_jindex;
@@ -983,9 +985,17 @@ static const match_table_t nolock_tokens = {
{ Opt_err, NULL },
};
+static void nolock_put_lock(struct kmem_cache *cachep, struct gfs2_glock *gl)
+{
+ struct gfs2_sbd *sdp = gl->gl_sbd;
+ kmem_cache_free(cachep, gl);
+ if (atomic_dec_and_test(&sdp->sd_glock_disposal))
+ wake_up(&sdp->sd_glock_wait);
+}
+
static const struct lm_lockops nolock_ops = {
.lm_proto_name = "lock_nolock",
- .lm_put_lock = kmem_cache_free,
+ .lm_put_lock = nolock_put_lock,
.lm_tokens = &nolock_tokens,
};
diff --git a/fs/gfs2/ops_inode.c b/fs/gfs2/ops_inode.c
index 247436c10deb..84350e1be66d 100644
--- a/fs/gfs2/ops_inode.c
+++ b/fs/gfs2/ops_inode.c
@@ -748,7 +748,7 @@ static int gfs2_rename(struct inode *odir, struct dentry *odentry,
struct gfs2_rgrpd *nrgd;
unsigned int num_gh;
int dir_rename = 0;
- int alloc_required;
+ int alloc_required = 0;
unsigned int x;
int error;
@@ -867,7 +867,9 @@ static int gfs2_rename(struct inode *odir, struct dentry *odentry,
goto out_gunlock;
}
- alloc_required = error = gfs2_diradd_alloc_required(ndir, &ndentry->d_name);
+ if (nip == NULL)
+ alloc_required = gfs2_diradd_alloc_required(ndir, &ndentry->d_name);
+ error = alloc_required;
if (error < 0)
goto out_gunlock;
error = 0;
@@ -1086,7 +1088,8 @@ static void *gfs2_follow_link(struct dentry *dentry, struct nameidata *nd)
error = vfs_follow_link(nd, buf);
if (buf != array)
kfree(buf);
- }
+ } else
+ path_put(&nd->path);
return ERR_PTR(error);
}
diff --git a/fs/gfs2/rgrp.c b/fs/gfs2/rgrp.c
index 0608f490c295..503b842f3ba2 100644
--- a/fs/gfs2/rgrp.c
+++ b/fs/gfs2/rgrp.c
@@ -591,11 +591,7 @@ static int gfs2_ri_update(struct gfs2_inode *ip)
u64 rgrp_count = ip->i_disksize;
int error;
- if (do_div(rgrp_count, sizeof(struct gfs2_rindex))) {
- gfs2_consist_inode(ip);
- return -EIO;
- }
-
+ do_div(rgrp_count, sizeof(struct gfs2_rindex));
clear_rgrpdi(sdp);
file_ra_state_init(&ra_state, inode->i_mapping);
@@ -915,7 +911,7 @@ void gfs2_rgrp_repolish_clones(struct gfs2_rgrpd *rgd)
struct gfs2_alloc *gfs2_alloc_get(struct gfs2_inode *ip)
{
BUG_ON(ip->i_alloc != NULL);
- ip->i_alloc = kzalloc(sizeof(struct gfs2_alloc), GFP_KERNEL);
+ ip->i_alloc = kzalloc(sizeof(struct gfs2_alloc), GFP_NOFS);
return ip->i_alloc;
}
diff --git a/fs/gfs2/super.c b/fs/gfs2/super.c
index c282ad41f3d1..b9dd3da22c0a 100644
--- a/fs/gfs2/super.c
+++ b/fs/gfs2/super.c
@@ -21,6 +21,7 @@
#include <linux/gfs2_ondisk.h>
#include <linux/crc32.h>
#include <linux/time.h>
+#include <linux/wait.h>
#include "gfs2.h"
#include "incore.h"
diff --git a/fs/gfs2/xattr.c b/fs/gfs2/xattr.c
index 8a04108e0c22..c2ebdf2c01d4 100644
--- a/fs/gfs2/xattr.c
+++ b/fs/gfs2/xattr.c
@@ -1296,6 +1296,7 @@ fail:
int gfs2_xattr_acl_chmod(struct gfs2_inode *ip, struct iattr *attr, char *data)
{
+ struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
struct gfs2_ea_location el;
struct buffer_head *dibh;
int error;
@@ -1305,16 +1306,17 @@ int gfs2_xattr_acl_chmod(struct gfs2_inode *ip, struct iattr *attr, char *data)
return error;
if (GFS2_EA_IS_STUFFED(el.el_ea)) {
- error = gfs2_trans_begin(GFS2_SB(&ip->i_inode), RES_DINODE + RES_EATTR, 0);
- if (error)
- return error;
-
- gfs2_trans_add_bh(ip->i_gl, el.el_bh, 1);
- memcpy(GFS2_EA2DATA(el.el_ea), data,
- GFS2_EA_DATA_LEN(el.el_ea));
- } else
+ error = gfs2_trans_begin(sdp, RES_DINODE + RES_EATTR, 0);
+ if (error == 0) {
+ gfs2_trans_add_bh(ip->i_gl, el.el_bh, 1);
+ memcpy(GFS2_EA2DATA(el.el_ea), data,
+ GFS2_EA_DATA_LEN(el.el_ea));
+ }
+ } else {
error = ea_acl_chmod_unstuffed(ip, el.el_ea, data);
+ }
+ brelse(el.el_bh);
if (error)
return error;
@@ -1327,8 +1329,7 @@ int gfs2_xattr_acl_chmod(struct gfs2_inode *ip, struct iattr *attr, char *data)
brelse(dibh);
}
- gfs2_trans_end(GFS2_SB(&ip->i_inode));
-
+ gfs2_trans_end(sdp);
return error;
}
diff --git a/fs/hppfs/hppfs.c b/fs/hppfs/hppfs.c
index a5089a6dd67a..7239efc690d8 100644
--- a/fs/hppfs/hppfs.c
+++ b/fs/hppfs/hppfs.c
@@ -646,22 +646,27 @@ static const struct super_operations hppfs_sbops = {
static int hppfs_readlink(struct dentry *dentry, char __user *buffer,
int buflen)
{
- struct dentry *proc_dentry;
-
- proc_dentry = HPPFS_I(dentry->d_inode)->proc_dentry;
+ struct dentry *proc_dentry = HPPFS_I(dentry->d_inode)->proc_dentry;
return proc_dentry->d_inode->i_op->readlink(proc_dentry, buffer,
buflen);
}
static void *hppfs_follow_link(struct dentry *dentry, struct nameidata *nd)
{
- struct dentry *proc_dentry;
-
- proc_dentry = HPPFS_I(dentry->d_inode)->proc_dentry;
+ struct dentry *proc_dentry = HPPFS_I(dentry->d_inode)->proc_dentry;
return proc_dentry->d_inode->i_op->follow_link(proc_dentry, nd);
}
+static void hppfs_put_link(struct dentry *dentry, struct nameidata *nd,
+ void *cookie)
+{
+ struct dentry *proc_dentry = HPPFS_I(dentry->d_inode)->proc_dentry;
+
+ if (proc_dentry->d_inode->i_op->put_link)
+ proc_dentry->d_inode->i_op->put_link(proc_dentry, nd, cookie);
+}
+
static const struct inode_operations hppfs_dir_iops = {
.lookup = hppfs_lookup,
};
@@ -669,6 +674,7 @@ static const struct inode_operations hppfs_dir_iops = {
static const struct inode_operations hppfs_link_iops = {
.readlink = hppfs_readlink,
.follow_link = hppfs_follow_link,
+ .put_link = hppfs_put_link,
};
static struct inode *get_inode(struct super_block *sb, struct dentry *dentry)
diff --git a/fs/namei.c b/fs/namei.c
index b55440baf7ab..a4855af776a8 100644
--- a/fs/namei.c
+++ b/fs/namei.c
@@ -561,6 +561,7 @@ static __always_inline int __do_follow_link(struct path *path, struct nameidata
dget(dentry);
}
mntget(path->mnt);
+ nd->last_type = LAST_BIND;
cookie = dentry->d_inode->i_op->follow_link(dentry, nd);
error = PTR_ERR(cookie);
if (!IS_ERR(cookie)) {
@@ -822,6 +823,17 @@ fail:
}
/*
+ * This is a temporary kludge to deal with "automount" symlinks; proper
+ * solution is to trigger them on follow_mount(), so that do_lookup()
+ * would DTRT. To be killed before 2.6.34-final.
+ */
+static inline int follow_on_final(struct inode *inode, unsigned lookup_flags)
+{
+ return inode && unlikely(inode->i_op->follow_link) &&
+ ((lookup_flags & LOOKUP_FOLLOW) || S_ISDIR(inode->i_mode));
+}
+
+/*
* Name resolution.
* This is the basic name resolution function, turning a pathname into
* the final dentry. We expect 'base' to be positive and a directory.
@@ -941,8 +953,7 @@ last_component:
if (err)
break;
inode = next.dentry->d_inode;
- if ((lookup_flags & LOOKUP_FOLLOW)
- && inode && inode->i_op->follow_link) {
+ if (follow_on_final(inode, lookup_flags)) {
err = do_follow_link(&next, nd);
if (err)
goto return_err;
@@ -1603,11 +1614,12 @@ struct file *do_filp_open(int dfd, const char *pathname,
struct file *filp;
struct nameidata nd;
int error;
- struct path path, save;
+ struct path path;
struct dentry *dir;
int count = 0;
int will_truncate;
int flag = open_to_namei_flags(open_flag);
+ int force_reval = 0;
/*
* O_SYNC is implemented as __O_SYNC|O_DSYNC. As many places only
@@ -1619,7 +1631,7 @@ struct file *do_filp_open(int dfd, const char *pathname,
open_flag |= O_DSYNC;
if (!acc_mode)
- acc_mode = MAY_OPEN | ACC_MODE(flag);
+ acc_mode = MAY_OPEN | ACC_MODE(open_flag);
/* O_TRUNC implies we need access checks for write permissions */
if (flag & O_TRUNC)
@@ -1659,9 +1671,12 @@ struct file *do_filp_open(int dfd, const char *pathname,
/*
* Create - we need to know the parent.
*/
+reval:
error = path_init(dfd, pathname, LOOKUP_PARENT, &nd);
if (error)
return ERR_PTR(error);
+ if (force_reval)
+ nd.flags |= LOOKUP_REVAL;
error = path_walk(pathname, &nd);
if (error) {
if (nd.root.mnt)
@@ -1731,8 +1746,7 @@ do_last:
if (nd.root.mnt)
path_put(&nd.root);
if (!IS_ERR(filp)) {
- error = ima_path_check(&filp->f_path, filp->f_mode &
- (MAY_READ | MAY_WRITE | MAY_EXEC));
+ error = ima_file_check(filp, acc_mode);
if (error) {
fput(filp);
filp = ERR_PTR(error);
@@ -1792,8 +1806,7 @@ ok:
}
filp = nameidata_to_filp(&nd);
if (!IS_ERR(filp)) {
- error = ima_path_check(&filp->f_path, filp->f_mode &
- (MAY_READ | MAY_WRITE | MAY_EXEC));
+ error = ima_file_check(filp, acc_mode);
if (error) {
fput(filp);
filp = ERR_PTR(error);
@@ -1853,17 +1866,7 @@ do_link:
error = security_inode_follow_link(path.dentry, &nd);
if (error)
goto exit_dput;
- save = nd.path;
- path_get(&save);
error = __do_follow_link(&path, &nd);
- if (error == -ESTALE) {
- /* nd.path had been dropped */
- nd.path = save;
- path_get(&nd.path);
- nd.flags |= LOOKUP_REVAL;
- error = __do_follow_link(&path, &nd);
- }
- path_put(&save);
path_put(&path);
if (error) {
/* Does someone understand code flow here? Or it is only
@@ -1873,6 +1876,10 @@ do_link:
release_open_intent(&nd);
if (nd.root.mnt)
path_put(&nd.root);
+ if (error == -ESTALE && !force_reval) {
+ force_reval = 1;
+ goto reval;
+ }
return ERR_PTR(error);
}
nd.flags &= ~LOOKUP_PARENT;
diff --git a/fs/namespace.c b/fs/namespace.c
index 7d70d63ceb29..c768f733c8d6 100644
--- a/fs/namespace.c
+++ b/fs/namespace.c
@@ -965,10 +965,12 @@ EXPORT_SYMBOL(may_umount_tree);
int may_umount(struct vfsmount *mnt)
{
int ret = 1;
+ down_read(&namespace_sem);
spin_lock(&vfsmount_lock);
if (propagate_mount_busy(mnt, 2))
ret = 0;
spin_unlock(&vfsmount_lock);
+ up_read(&namespace_sem);
return ret;
}
@@ -1352,12 +1354,12 @@ static int attach_recursive_mnt(struct vfsmount *source_mnt,
if (err)
goto out_cleanup_ids;
+ spin_lock(&vfsmount_lock);
+
if (IS_MNT_SHARED(dest_mnt)) {
for (p = source_mnt; p; p = next_mnt(p, source_mnt))
set_mnt_shared(p);
}
-
- spin_lock(&vfsmount_lock);
if (parent_path) {
detach_mnt(source_mnt, parent_path);
attach_mnt(source_mnt, path);
@@ -1534,8 +1536,12 @@ static int do_remount(struct path *path, int flags, int mnt_flags,
err = change_mount_flags(path->mnt, flags);
else
err = do_remount_sb(sb, flags, data, 0);
- if (!err)
+ if (!err) {
+ spin_lock(&vfsmount_lock);
+ mnt_flags |= path->mnt->mnt_flags & MNT_PNODE_MASK;
path->mnt->mnt_flags = mnt_flags;
+ spin_unlock(&vfsmount_lock);
+ }
up_write(&sb->s_umount);
if (!err) {
security_sb_post_remount(path->mnt, flags, data);
@@ -1665,6 +1671,8 @@ int do_add_mount(struct vfsmount *newmnt, struct path *path,
{
int err;
+ mnt_flags &= ~(MNT_SHARED | MNT_WRITE_HOLD);
+
down_write(&namespace_sem);
/* Something was mounted here while we slept */
while (d_mountpoint(path->dentry) &&
diff --git a/fs/nfs/dir.c b/fs/nfs/dir.c
index 2c5ace4f00a7..3c7f03b669fb 100644
--- a/fs/nfs/dir.c
+++ b/fs/nfs/dir.c
@@ -1615,6 +1615,7 @@ static int nfs_rename(struct inode *old_dir, struct dentry *old_dentry,
goto out;
new_dentry = dentry;
+ rehash = NULL;
new_inode = NULL;
}
}
diff --git a/fs/nfs/direct.c b/fs/nfs/direct.c
index e1d415e97849..0d289823e856 100644
--- a/fs/nfs/direct.c
+++ b/fs/nfs/direct.c
@@ -342,6 +342,7 @@ static ssize_t nfs_direct_read_schedule_segment(struct nfs_direct_req *dreq,
data->res.fattr = &data->fattr;
data->res.eof = 0;
data->res.count = bytes;
+ nfs_fattr_init(&data->fattr);
msg.rpc_argp = &data->args;
msg.rpc_resp = &data->res;
@@ -575,6 +576,7 @@ static void nfs_direct_commit_schedule(struct nfs_direct_req *dreq)
data->res.count = 0;
data->res.fattr = &data->fattr;
data->res.verf = &data->verf;
+ nfs_fattr_init(&data->fattr);
NFS_PROTO(data->inode)->commit_setup(data, &msg);
@@ -766,6 +768,7 @@ static ssize_t nfs_direct_write_schedule_segment(struct nfs_direct_req *dreq,
data->res.fattr = &data->fattr;
data->res.count = bytes;
data->res.verf = &data->verf;
+ nfs_fattr_init(&data->fattr);
task_setup_data.task = &data->task;
task_setup_data.callback_data = data;
diff --git a/fs/nfs/file.c b/fs/nfs/file.c
index 6b891328f332..63f2071d6445 100644
--- a/fs/nfs/file.c
+++ b/fs/nfs/file.c
@@ -486,6 +486,8 @@ static int nfs_release_page(struct page *page, gfp_t gfp)
{
dfprintk(PAGECACHE, "NFS: release_page(%p)\n", page);
+ if (gfp & __GFP_WAIT)
+ nfs_wb_page(page->mapping->host, page);
/* If PagePrivate() is set, then the page is not freeable */
if (PagePrivate(page))
return 0;
diff --git a/fs/nfs/fscache.c b/fs/nfs/fscache.c
index fa588006588d..237874f1af23 100644
--- a/fs/nfs/fscache.c
+++ b/fs/nfs/fscache.c
@@ -354,12 +354,11 @@ void nfs_fscache_reset_inode_cookie(struct inode *inode)
*/
int nfs_fscache_release_page(struct page *page, gfp_t gfp)
{
- struct nfs_inode *nfsi = NFS_I(page->mapping->host);
- struct fscache_cookie *cookie = nfsi->fscache;
-
- BUG_ON(!cookie);
-
if (PageFsCache(page)) {
+ struct nfs_inode *nfsi = NFS_I(page->mapping->host);
+ struct fscache_cookie *cookie = nfsi->fscache;
+
+ BUG_ON(!cookie);
dfprintk(FSCACHE, "NFS: fscache releasepage (0x%p/0x%p/0x%p)\n",
cookie, page, nfsi);
diff --git a/fs/nfs/inode.c b/fs/nfs/inode.c
index faa091865ad0..f141bde7756a 100644
--- a/fs/nfs/inode.c
+++ b/fs/nfs/inode.c
@@ -1261,8 +1261,10 @@ static int nfs_update_inode(struct inode *inode, struct nfs_fattr *fattr)
if (fattr->valid & NFS_ATTR_FATTR_MODE) {
if ((inode->i_mode & S_IALLUGO) != (fattr->mode & S_IALLUGO)) {
+ umode_t newmode = inode->i_mode & S_IFMT;
+ newmode |= fattr->mode & S_IALLUGO;
+ inode->i_mode = newmode;
invalid |= NFS_INO_INVALID_ATTR|NFS_INO_INVALID_ACCESS|NFS_INO_INVALID_ACL;
- inode->i_mode = fattr->mode;
}
} else if (server->caps & NFS_CAP_MODE)
invalid |= save_cache_validity & (NFS_INO_INVALID_ATTR
diff --git a/fs/nfs/mount_clnt.c b/fs/nfs/mount_clnt.c
index 0adefc40cc89..59047f8d7d72 100644
--- a/fs/nfs/mount_clnt.c
+++ b/fs/nfs/mount_clnt.c
@@ -120,7 +120,7 @@ static struct {
{ .status = MNT3ERR_INVAL, .errno = -EINVAL, },
{ .status = MNT3ERR_NAMETOOLONG, .errno = -ENAMETOOLONG, },
{ .status = MNT3ERR_NOTSUPP, .errno = -ENOTSUPP, },
- { .status = MNT3ERR_SERVERFAULT, .errno = -ESERVERFAULT, },
+ { .status = MNT3ERR_SERVERFAULT, .errno = -EREMOTEIO, },
};
struct mountres {
diff --git a/fs/nfs/nfs2xdr.c b/fs/nfs/nfs2xdr.c
index 5e078b222b4e..7bc2da8efd4a 100644
--- a/fs/nfs/nfs2xdr.c
+++ b/fs/nfs/nfs2xdr.c
@@ -699,7 +699,7 @@ static struct {
{ NFSERR_BAD_COOKIE, -EBADCOOKIE },
{ NFSERR_NOTSUPP, -ENOTSUPP },
{ NFSERR_TOOSMALL, -ETOOSMALL },
- { NFSERR_SERVERFAULT, -ESERVERFAULT },
+ { NFSERR_SERVERFAULT, -EREMOTEIO },
{ NFSERR_BADTYPE, -EBADTYPE },
{ NFSERR_JUKEBOX, -EJUKEBOX },
{ -1, -EIO }
diff --git a/fs/nfs/nfs4_fs.h b/fs/nfs/nfs4_fs.h
index 865265bdca03..0c6fda33d66e 100644
--- a/fs/nfs/nfs4_fs.h
+++ b/fs/nfs/nfs4_fs.h
@@ -146,6 +146,7 @@ enum {
NFS_O_RDWR_STATE, /* OPEN stateid has read/write state */
NFS_STATE_RECLAIM_REBOOT, /* OPEN stateid server rebooted */
NFS_STATE_RECLAIM_NOGRACE, /* OPEN stateid needs to recover state */
+ NFS_STATE_POSIX_LOCKS, /* Posix locks are supported */
};
struct nfs4_state {
@@ -277,6 +278,7 @@ extern void nfs4_state_set_mode_locked(struct nfs4_state *, fmode_t);
extern void nfs4_schedule_state_recovery(struct nfs_client *);
extern void nfs4_schedule_state_manager(struct nfs_client *);
extern int nfs4_state_mark_reclaim_nograce(struct nfs_client *clp, struct nfs4_state *state);
+extern int nfs4_state_mark_reclaim_reboot(struct nfs_client *clp, struct nfs4_state *state);
extern void nfs41_handle_sequence_flag_errors(struct nfs_client *clp, u32 flags);
extern void nfs4_put_lock_state(struct nfs4_lock_state *lsp);
extern int nfs4_set_lock_state(struct nfs4_state *state, struct file_lock *fl);
diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c
index 198d51d17c13..375f0fae2c6a 100644
--- a/fs/nfs/nfs4proc.c
+++ b/fs/nfs/nfs4proc.c
@@ -249,19 +249,15 @@ static int nfs4_handle_exception(const struct nfs_server *server, int errorcode,
if (state == NULL)
break;
nfs4_state_mark_reclaim_nograce(clp, state);
- case -NFS4ERR_STALE_CLIENTID:
+ goto do_state_recovery;
case -NFS4ERR_STALE_STATEID:
- case -NFS4ERR_EXPIRED:
- nfs4_schedule_state_recovery(clp);
- ret = nfs4_wait_clnt_recover(clp);
- if (ret == 0)
- exception->retry = 1;
-#if !defined(CONFIG_NFS_V4_1)
- break;
-#else /* !defined(CONFIG_NFS_V4_1) */
- if (!nfs4_has_session(server->nfs_client))
+ if (state == NULL)
break;
- /* FALLTHROUGH */
+ nfs4_state_mark_reclaim_reboot(clp, state);
+ case -NFS4ERR_STALE_CLIENTID:
+ case -NFS4ERR_EXPIRED:
+ goto do_state_recovery;
+#if defined(CONFIG_NFS_V4_1)
case -NFS4ERR_BADSESSION:
case -NFS4ERR_BADSLOT:
case -NFS4ERR_BAD_HIGH_SLOT:
@@ -274,7 +270,7 @@ static int nfs4_handle_exception(const struct nfs_server *server, int errorcode,
nfs4_schedule_state_recovery(clp);
exception->retry = 1;
break;
-#endif /* !defined(CONFIG_NFS_V4_1) */
+#endif /* defined(CONFIG_NFS_V4_1) */
case -NFS4ERR_FILE_OPEN:
if (exception->timeout > HZ) {
/* We have retried a decent amount, time to
@@ -293,6 +289,12 @@ static int nfs4_handle_exception(const struct nfs_server *server, int errorcode,
}
/* We failed to handle the error */
return nfs4_map_errors(ret);
+do_state_recovery:
+ nfs4_schedule_state_recovery(clp);
+ ret = nfs4_wait_clnt_recover(clp);
+ if (ret == 0)
+ exception->retry = 1;
+ return ret;
}
@@ -1658,6 +1660,8 @@ static int _nfs4_do_open(struct inode *dir, struct path *path, fmode_t fmode, in
status = PTR_ERR(state);
if (IS_ERR(state))
goto err_opendata_put;
+ if ((opendata->o_res.rflags & NFS4_OPEN_RESULT_LOCKTYPE_POSIX) != 0)
+ set_bit(NFS_STATE_POSIX_LOCKS, &state->flags);
nfs4_opendata_put(opendata);
nfs4_put_state_owner(sp);
*res = state;
@@ -3422,15 +3426,14 @@ _nfs4_async_handle_error(struct rpc_task *task, const struct nfs_server *server,
if (state == NULL)
break;
nfs4_state_mark_reclaim_nograce(clp, state);
- case -NFS4ERR_STALE_CLIENTID:
+ goto do_state_recovery;
case -NFS4ERR_STALE_STATEID:
+ if (state == NULL)
+ break;
+ nfs4_state_mark_reclaim_reboot(clp, state);
+ case -NFS4ERR_STALE_CLIENTID:
case -NFS4ERR_EXPIRED:
- rpc_sleep_on(&clp->cl_rpcwaitq, task, NULL);
- nfs4_schedule_state_recovery(clp);
- if (test_bit(NFS4CLNT_MANAGER_RUNNING, &clp->cl_state) == 0)
- rpc_wake_up_queued_task(&clp->cl_rpcwaitq, task);
- task->tk_status = 0;
- return -EAGAIN;
+ goto do_state_recovery;
#if defined(CONFIG_NFS_V4_1)
case -NFS4ERR_BADSESSION:
case -NFS4ERR_BADSLOT:
@@ -3458,6 +3461,13 @@ _nfs4_async_handle_error(struct rpc_task *task, const struct nfs_server *server,
}
task->tk_status = nfs4_map_errors(task->tk_status);
return 0;
+do_state_recovery:
+ rpc_sleep_on(&clp->cl_rpcwaitq, task, NULL);
+ nfs4_schedule_state_recovery(clp);
+ if (test_bit(NFS4CLNT_MANAGER_RUNNING, &clp->cl_state) == 0)
+ rpc_wake_up_queued_task(&clp->cl_rpcwaitq, task);
+ task->tk_status = 0;
+ return -EAGAIN;
}
static int
@@ -4088,6 +4098,28 @@ static const struct rpc_call_ops nfs4_recover_lock_ops = {
.rpc_release = nfs4_lock_release,
};
+static void nfs4_handle_setlk_error(struct nfs_server *server, struct nfs4_lock_state *lsp, int new_lock_owner, int error)
+{
+ struct nfs_client *clp = server->nfs_client;
+ struct nfs4_state *state = lsp->ls_state;
+
+ switch (error) {
+ case -NFS4ERR_ADMIN_REVOKED:
+ case -NFS4ERR_BAD_STATEID:
+ case -NFS4ERR_EXPIRED:
+ if (new_lock_owner != 0 ||
+ (lsp->ls_flags & NFS_LOCK_INITIALIZED) != 0)
+ nfs4_state_mark_reclaim_nograce(clp, state);
+ lsp->ls_seqid.flags &= ~NFS_SEQID_CONFIRMED;
+ break;
+ case -NFS4ERR_STALE_STATEID:
+ if (new_lock_owner != 0 ||
+ (lsp->ls_flags & NFS_LOCK_INITIALIZED) != 0)
+ nfs4_state_mark_reclaim_reboot(clp, state);
+ lsp->ls_seqid.flags &= ~NFS_SEQID_CONFIRMED;
+ };
+}
+
static int _nfs4_do_setlk(struct nfs4_state *state, int cmd, struct file_lock *fl, int recovery_type)
{
struct nfs4_lockdata *data;
@@ -4126,6 +4158,9 @@ static int _nfs4_do_setlk(struct nfs4_state *state, int cmd, struct file_lock *f
ret = nfs4_wait_for_completion_rpc_task(task);
if (ret == 0) {
ret = data->rpc_status;
+ if (ret)
+ nfs4_handle_setlk_error(data->server, data->lsp,
+ data->arg.new_lock_owner, ret);
} else
data->cancelled = 1;
rpc_put_task(task);
@@ -4181,8 +4216,11 @@ static int _nfs4_proc_setlk(struct nfs4_state *state, int cmd, struct file_lock
{
struct nfs_inode *nfsi = NFS_I(state->inode);
unsigned char fl_flags = request->fl_flags;
- int status;
+ int status = -ENOLCK;
+ if ((fl_flags & FL_POSIX) &&
+ !test_bit(NFS_STATE_POSIX_LOCKS, &state->flags))
+ goto out;
/* Is this a delegated open? */
status = nfs4_set_lock_state(state, request);
if (status != 0)
diff --git a/fs/nfs/nfs4state.c b/fs/nfs/nfs4state.c
index 6d263ed79e92..c1e2733f4fa4 100644
--- a/fs/nfs/nfs4state.c
+++ b/fs/nfs/nfs4state.c
@@ -901,7 +901,7 @@ void nfs4_schedule_state_recovery(struct nfs_client *clp)
nfs4_schedule_state_manager(clp);
}
-static int nfs4_state_mark_reclaim_reboot(struct nfs_client *clp, struct nfs4_state *state)
+int nfs4_state_mark_reclaim_reboot(struct nfs_client *clp, struct nfs4_state *state)
{
set_bit(NFS_STATE_RECLAIM_REBOOT, &state->flags);
diff --git a/fs/nfs/nfs4xdr.c b/fs/nfs/nfs4xdr.c
index e437fd6a819f..5cd5184b56db 100644
--- a/fs/nfs/nfs4xdr.c
+++ b/fs/nfs/nfs4xdr.c
@@ -4631,7 +4631,7 @@ static int decode_sequence(struct xdr_stream *xdr,
* If the server returns different values for sessionID, slotID or
* sequence number, the server is looney tunes.
*/
- status = -ESERVERFAULT;
+ status = -EREMOTEIO;
if (memcmp(id.data, res->sr_session->sess_id.data,
NFS4_MAX_SESSIONID_LEN)) {
@@ -5774,7 +5774,7 @@ static struct {
{ NFS4ERR_BAD_COOKIE, -EBADCOOKIE },
{ NFS4ERR_NOTSUPP, -ENOTSUPP },
{ NFS4ERR_TOOSMALL, -ETOOSMALL },
- { NFS4ERR_SERVERFAULT, -ESERVERFAULT },
+ { NFS4ERR_SERVERFAULT, -EREMOTEIO },
{ NFS4ERR_BADTYPE, -EBADTYPE },
{ NFS4ERR_LOCKED, -EAGAIN },
{ NFS4ERR_SYMLINK, -ELOOP },
@@ -5801,7 +5801,7 @@ nfs4_stat_to_errno(int stat)
}
if (stat <= 10000 || stat > 10100) {
/* The server is looney tunes. */
- return -ESERVERFAULT;
+ return -EREMOTEIO;
}
/* If we cannot translate the error, the recovery routines should
* handle it.
diff --git a/fs/nfs/pagelist.c b/fs/nfs/pagelist.c
index e2975939126a..a12c45b65dd4 100644
--- a/fs/nfs/pagelist.c
+++ b/fs/nfs/pagelist.c
@@ -176,6 +176,12 @@ void nfs_release_request(struct nfs_page *req)
kref_put(&req->wb_kref, nfs_free_request);
}
+static int nfs_wait_bit_uninterruptible(void *word)
+{
+ io_schedule();
+ return 0;
+}
+
/**
* nfs_wait_on_request - Wait for a request to complete.
* @req: request to wait upon.
@@ -186,14 +192,9 @@ void nfs_release_request(struct nfs_page *req)
int
nfs_wait_on_request(struct nfs_page *req)
{
- int ret = 0;
-
- if (!test_bit(PG_BUSY, &req->wb_flags))
- goto out;
- ret = out_of_line_wait_on_bit(&req->wb_flags, PG_BUSY,
- nfs_wait_bit_killable, TASK_KILLABLE);
-out:
- return ret;
+ return wait_on_bit(&req->wb_flags, PG_BUSY,
+ nfs_wait_bit_uninterruptible,
+ TASK_UNINTERRUPTIBLE);
}
/**
diff --git a/fs/nfs/super.c b/fs/nfs/super.c
index ce907efc5508..f1afee4eea77 100644
--- a/fs/nfs/super.c
+++ b/fs/nfs/super.c
@@ -243,6 +243,7 @@ static int nfs_show_stats(struct seq_file *, struct vfsmount *);
static int nfs_get_sb(struct file_system_type *, int, const char *, void *, struct vfsmount *);
static int nfs_xdev_get_sb(struct file_system_type *fs_type,
int flags, const char *dev_name, void *raw_data, struct vfsmount *mnt);
+static void nfs_put_super(struct super_block *);
static void nfs_kill_super(struct super_block *);
static int nfs_remount(struct super_block *sb, int *flags, char *raw_data);
@@ -266,6 +267,7 @@ static const struct super_operations nfs_sops = {
.alloc_inode = nfs_alloc_inode,
.destroy_inode = nfs_destroy_inode,
.write_inode = nfs_write_inode,
+ .put_super = nfs_put_super,
.statfs = nfs_statfs,
.clear_inode = nfs_clear_inode,
.umount_begin = nfs_umount_begin,
@@ -335,6 +337,7 @@ static const struct super_operations nfs4_sops = {
.alloc_inode = nfs_alloc_inode,
.destroy_inode = nfs_destroy_inode,
.write_inode = nfs_write_inode,
+ .put_super = nfs_put_super,
.statfs = nfs_statfs,
.clear_inode = nfs4_clear_inode,
.umount_begin = nfs_umount_begin,
@@ -2258,6 +2261,17 @@ error_splat_super:
}
/*
+ * Ensure that we unregister the bdi before kill_anon_super
+ * releases the device name
+ */
+static void nfs_put_super(struct super_block *s)
+{
+ struct nfs_server *server = NFS_SB(s);
+
+ bdi_unregister(&server->backing_dev_info);
+}
+
+/*
* Destroy an NFS2/3 superblock
*/
static void nfs_kill_super(struct super_block *s)
@@ -2265,7 +2279,6 @@ static void nfs_kill_super(struct super_block *s)
struct nfs_server *server = NFS_SB(s);
kill_anon_super(s);
- bdi_unregister(&server->backing_dev_info);
nfs_fscache_release_super_cookie(s);
nfs_free_server(server);
}
diff --git a/fs/nfs/sysctl.c b/fs/nfs/sysctl.c
index 70e1fbbaaeab..ad4d2e787b20 100644
--- a/fs/nfs/sysctl.c
+++ b/fs/nfs/sysctl.c
@@ -15,8 +15,10 @@
#include "callback.h"
+#ifdef CONFIG_NFS_V4
static const int nfs_set_port_min = 0;
static const int nfs_set_port_max = 65535;
+#endif
static struct ctl_table_header *nfs_callback_sysctl_table;
static ctl_table nfs_cb_sysctls[] = {
diff --git a/fs/nfs/write.c b/fs/nfs/write.c
index d171696017f4..d63d964a0392 100644
--- a/fs/nfs/write.c
+++ b/fs/nfs/write.c
@@ -1233,7 +1233,7 @@ int nfs_writeback_done(struct rpc_task *task, struct nfs_write_data *data)
#if defined(CONFIG_NFS_V3) || defined(CONFIG_NFS_V4)
-void nfs_commitdata_release(void *data)
+static void nfs_commitdata_release(void *data)
{
struct nfs_write_data *wdata = data;
@@ -1541,6 +1541,7 @@ int nfs_wb_page_cancel(struct inode *inode, struct page *page)
break;
}
ret = nfs_wait_on_request(req);
+ nfs_release_request(req);
if (ret < 0)
goto out;
}
@@ -1597,8 +1598,7 @@ int nfs_migrate_page(struct address_space *mapping, struct page *newpage,
struct nfs_page *req;
int ret;
- if (PageFsCache(page))
- nfs_fscache_release_page(page, GFP_KERNEL);
+ nfs_fscache_release_page(page, GFP_KERNEL);
req = nfs_find_and_lock_request(page);
ret = PTR_ERR(req);
diff --git a/fs/nfsd/export.c b/fs/nfsd/export.c
index c487810a2366..a0c4016413f1 100644
--- a/fs/nfsd/export.c
+++ b/fs/nfsd/export.c
@@ -1316,19 +1316,11 @@ rqst_exp_parent(struct svc_rqst *rqstp, struct path *path)
static struct svc_export *find_fsidzero_export(struct svc_rqst *rqstp)
{
- struct svc_export *exp;
u32 fsidv[2];
mk_fsid(FSID_NUM, fsidv, 0, 0, 0, NULL);
- exp = rqst_exp_find(rqstp, FSID_NUM, fsidv);
- /*
- * We shouldn't have accepting an nfsv4 request at all if we
- * don't have a pseudoexport!:
- */
- if (IS_ERR(exp) && PTR_ERR(exp) == -ENOENT)
- exp = ERR_PTR(-ESERVERFAULT);
- return exp;
+ return rqst_exp_find(rqstp, FSID_NUM, fsidv);
}
/*
diff --git a/fs/nfsd/vfs.c b/fs/nfsd/vfs.c
index 7c2e337d05af..8715d194561a 100644
--- a/fs/nfsd/vfs.c
+++ b/fs/nfsd/vfs.c
@@ -752,6 +752,8 @@ nfsd_open(struct svc_rqst *rqstp, struct svc_fh *fhp, int type,
flags, current_cred());
if (IS_ERR(*filp))
host_err = PTR_ERR(*filp);
+ else
+ host_err = ima_file_check(*filp, access);
out_nfserr:
err = nfserrno(host_err);
out:
@@ -780,12 +782,9 @@ static inline int nfsd_dosync(struct file *filp, struct dentry *dp,
int (*fsync) (struct file *, struct dentry *, int);
int err;
- err = filemap_fdatawrite(inode->i_mapping);
+ err = filemap_write_and_wait(inode->i_mapping);
if (err == 0 && fop && (fsync = fop->fsync))
err = fsync(filp, dp, 0);
- if (err == 0)
- err = filemap_fdatawait(inode->i_mapping);
-
return err;
}
@@ -2130,7 +2129,6 @@ nfsd_permission(struct svc_rqst *rqstp, struct svc_export *exp,
*/
path.mnt = exp->ex_path.mnt;
path.dentry = dentry;
- err = ima_path_check(&path, acc & (MAY_READ | MAY_WRITE | MAY_EXEC));
nfsd_out:
return err? nfserrno(err) : 0;
}
diff --git a/fs/nilfs2/bmap.c b/fs/nilfs2/bmap.c
index f4a14ea2ed9c..effdbdbe6c11 100644
--- a/fs/nilfs2/bmap.c
+++ b/fs/nilfs2/bmap.c
@@ -417,8 +417,8 @@ __u64 nilfs_bmap_data_get_key(const struct nilfs_bmap *bmap,
key = page_index(bh->b_page) << (PAGE_CACHE_SHIFT -
bmap->b_inode->i_blkbits);
- for (pbh = page_buffers(bh->b_page); pbh != bh;
- pbh = pbh->b_this_page, key++);
+ for (pbh = page_buffers(bh->b_page); pbh != bh; pbh = pbh->b_this_page)
+ key++;
return key;
}
diff --git a/fs/nilfs2/cpfile.c b/fs/nilfs2/cpfile.c
index d5ad54e204a5..18737818db63 100644
--- a/fs/nilfs2/cpfile.c
+++ b/fs/nilfs2/cpfile.c
@@ -328,19 +328,24 @@ int nilfs_cpfile_delete_checkpoints(struct inode *cpfile,
tnicps += nicps;
nilfs_mdt_mark_buffer_dirty(cp_bh);
nilfs_mdt_mark_dirty(cpfile);
- if (!nilfs_cpfile_is_in_first(cpfile, cno) &&
- (count = nilfs_cpfile_block_sub_valid_checkpoints(
- cpfile, cp_bh, kaddr, nicps)) == 0) {
- /* make hole */
- kunmap_atomic(kaddr, KM_USER0);
- brelse(cp_bh);
- ret = nilfs_cpfile_delete_checkpoint_block(
- cpfile, cno);
- if (ret == 0)
- continue;
- printk(KERN_ERR "%s: cannot delete block\n",
- __func__);
- break;
+ if (!nilfs_cpfile_is_in_first(cpfile, cno)) {
+ count =
+ nilfs_cpfile_block_sub_valid_checkpoints(
+ cpfile, cp_bh, kaddr, nicps);
+ if (count == 0) {
+ /* make hole */
+ kunmap_atomic(kaddr, KM_USER0);
+ brelse(cp_bh);
+ ret =
+ nilfs_cpfile_delete_checkpoint_block(
+ cpfile, cno);
+ if (ret == 0)
+ continue;
+ printk(KERN_ERR
+ "%s: cannot delete block\n",
+ __func__);
+ break;
+ }
}
}
diff --git a/fs/nilfs2/direct.c b/fs/nilfs2/direct.c
index d369ac718277..236753df5cdf 100644
--- a/fs/nilfs2/direct.c
+++ b/fs/nilfs2/direct.c
@@ -51,11 +51,11 @@ static int nilfs_direct_lookup(const struct nilfs_bmap *bmap,
struct nilfs_direct *direct;
__u64 ptr;
- direct = (struct nilfs_direct *)bmap;
- if ((key > NILFS_DIRECT_KEY_MAX) ||
- (level != 1) || /* XXX: use macro for level 1 */
- ((ptr = nilfs_direct_get_ptr(direct, key)) ==
- NILFS_BMAP_INVALID_PTR))
+ direct = (struct nilfs_direct *)bmap; /* XXX: use macro for level 1 */
+ if (key > NILFS_DIRECT_KEY_MAX || level != 1)
+ return -ENOENT;
+ ptr = nilfs_direct_get_ptr(direct, key);
+ if (ptr == NILFS_BMAP_INVALID_PTR)
return -ENOENT;
if (ptrp != NULL)
@@ -73,9 +73,10 @@ static int nilfs_direct_lookup_contig(const struct nilfs_bmap *bmap,
sector_t blocknr;
int ret, cnt;
- if (key > NILFS_DIRECT_KEY_MAX ||
- (ptr = nilfs_direct_get_ptr(direct, key)) ==
- NILFS_BMAP_INVALID_PTR)
+ if (key > NILFS_DIRECT_KEY_MAX)
+ return -ENOENT;
+ ptr = nilfs_direct_get_ptr(direct, key);
+ if (ptr == NILFS_BMAP_INVALID_PTR)
return -ENOENT;
if (NILFS_BMAP_USE_VBN(bmap)) {
diff --git a/fs/nilfs2/ioctl.c b/fs/nilfs2/ioctl.c
index f6af76042d80..d6b2b83de363 100644
--- a/fs/nilfs2/ioctl.c
+++ b/fs/nilfs2/ioctl.c
@@ -480,7 +480,7 @@ static int nilfs_ioctl_clean_segments(struct inode *inode, struct file *filp,
unsigned int cmd, void __user *argp)
{
struct nilfs_argv argv[5];
- const static size_t argsz[5] = {
+ static const size_t argsz[5] = {
sizeof(struct nilfs_vdesc),
sizeof(struct nilfs_period),
sizeof(__u64),
diff --git a/fs/nilfs2/segment.c b/fs/nilfs2/segment.c
index 17584c524486..105b508b47a8 100644
--- a/fs/nilfs2/segment.c
+++ b/fs/nilfs2/segment.c
@@ -2829,7 +2829,7 @@ static void nilfs_segctor_destroy(struct nilfs_sc_info *sci)
|| sci->sc_seq_request != sci->sc_seq_done);
spin_unlock(&sci->sc_state_lock);
- if (flag || nilfs_segctor_confirm(sci))
+ if (flag || !nilfs_segctor_confirm(sci))
nilfs_segctor_write_out(sci);
WARN_ON(!list_empty(&sci->sc_copied_buffers));
diff --git a/fs/notify/inotify/inotify_fsnotify.c b/fs/notify/inotify/inotify_fsnotify.c
index c9ee67b442e1..1afb0a10229f 100644
--- a/fs/notify/inotify/inotify_fsnotify.c
+++ b/fs/notify/inotify/inotify_fsnotify.c
@@ -121,7 +121,7 @@ static int idr_callback(int id, void *p, void *data)
if (warned)
return 0;
- warned = false;
+ warned = true;
entry = p;
ientry = container_of(entry, struct inotify_inode_mark_entry, fsn_entry);
diff --git a/fs/notify/inotify/inotify_user.c b/fs/notify/inotify/inotify_user.c
index 8271cf05c957..a94e8bd8eb1f 100644
--- a/fs/notify/inotify/inotify_user.c
+++ b/fs/notify/inotify/inotify_user.c
@@ -552,7 +552,7 @@ retry:
spin_lock(&group->inotify_data.idr_lock);
ret = idr_get_new_above(&group->inotify_data.idr, &tmp_ientry->fsn_entry,
- group->inotify_data.last_wd,
+ group->inotify_data.last_wd+1,
&tmp_ientry->wd);
spin_unlock(&group->inotify_data.idr_lock);
if (ret) {
@@ -632,7 +632,7 @@ static struct fsnotify_group *inotify_new_group(struct user_struct *user, unsign
spin_lock_init(&group->inotify_data.idr_lock);
idr_init(&group->inotify_data.idr);
- group->inotify_data.last_wd = 1;
+ group->inotify_data.last_wd = 0;
group->inotify_data.user = user;
group->inotify_data.fa = NULL;
diff --git a/fs/ocfs2/aops.c b/fs/ocfs2/aops.c
index 3dae4a13f6e4..7e9df11260f4 100644
--- a/fs/ocfs2/aops.c
+++ b/fs/ocfs2/aops.c
@@ -599,7 +599,7 @@ bail:
return ret;
}
-/*
+/*
* ocfs2_dio_end_io is called by the dio core when a dio is finished. We're
* particularly interested in the aio/dio case. Like the core uses
* i_alloc_sem, we use the rw_lock DLM lock to protect io on one node from
@@ -670,7 +670,7 @@ static ssize_t ocfs2_direct_IO(int rw,
ret = blockdev_direct_IO_no_locking(rw, iocb, inode,
inode->i_sb->s_bdev, iov, offset,
- nr_segs,
+ nr_segs,
ocfs2_direct_IO_get_blocks,
ocfs2_dio_end_io);
diff --git a/fs/ocfs2/buffer_head_io.c b/fs/ocfs2/buffer_head_io.c
index d43d34a1dd31..21c808f752d8 100644
--- a/fs/ocfs2/buffer_head_io.c
+++ b/fs/ocfs2/buffer_head_io.c
@@ -368,7 +368,7 @@ int ocfs2_read_blocks(struct ocfs2_caching_info *ci, u64 block, int nr,
}
ocfs2_metadata_cache_io_unlock(ci);
- mlog(ML_BH_IO, "block=(%llu), nr=(%d), cached=%s, flags=0x%x\n",
+ mlog(ML_BH_IO, "block=(%llu), nr=(%d), cached=%s, flags=0x%x\n",
(unsigned long long)block, nr,
((flags & OCFS2_BH_IGNORE_CACHE) || ignore_cache) ? "no" : "yes",
flags);
diff --git a/fs/ocfs2/cluster/heartbeat.c b/fs/ocfs2/cluster/heartbeat.c
index eda5b8bcddd5..5c9890006708 100644
--- a/fs/ocfs2/cluster/heartbeat.c
+++ b/fs/ocfs2/cluster/heartbeat.c
@@ -78,7 +78,7 @@ static struct o2hb_callback *hbcall_from_type(enum o2hb_callback_type type);
unsigned int o2hb_dead_threshold = O2HB_DEFAULT_DEAD_THRESHOLD;
-/* Only sets a new threshold if there are no active regions.
+/* Only sets a new threshold if there are no active regions.
*
* No locking or otherwise interesting code is required for reading
* o2hb_dead_threshold as it can't change once regions are active and
@@ -170,7 +170,7 @@ static void o2hb_write_timeout(struct work_struct *work)
mlog(ML_ERROR, "Heartbeat write timeout to device %s after %u "
"milliseconds\n", reg->hr_dev_name,
- jiffies_to_msecs(jiffies - reg->hr_last_timeout_start));
+ jiffies_to_msecs(jiffies - reg->hr_last_timeout_start));
o2quo_disk_timeout();
}
@@ -624,7 +624,7 @@ static int o2hb_check_slot(struct o2hb_region *reg,
"seq %llu last %llu changed %u equal %u\n",
slot->ds_node_num, (long long)slot->ds_last_generation,
le32_to_cpu(hb_block->hb_cksum),
- (unsigned long long)le64_to_cpu(hb_block->hb_seq),
+ (unsigned long long)le64_to_cpu(hb_block->hb_seq),
(unsigned long long)slot->ds_last_time, slot->ds_changed_samples,
slot->ds_equal_samples);
diff --git a/fs/ocfs2/cluster/tcp.c b/fs/ocfs2/cluster/tcp.c
index 334f231a422c..d8d0c65ac03c 100644
--- a/fs/ocfs2/cluster/tcp.c
+++ b/fs/ocfs2/cluster/tcp.c
@@ -485,7 +485,7 @@ static void o2net_set_nn_state(struct o2net_node *nn,
}
if (was_valid && !valid) {
- printk(KERN_INFO "o2net: no longer connected to "
+ printk(KERN_NOTICE "o2net: no longer connected to "
SC_NODEF_FMT "\n", SC_NODEF_ARGS(old_sc));
o2net_complete_nodes_nsw(nn);
}
@@ -493,7 +493,7 @@ static void o2net_set_nn_state(struct o2net_node *nn,
if (!was_valid && valid) {
o2quo_conn_up(o2net_num_from_nn(nn));
cancel_delayed_work(&nn->nn_connect_expired);
- printk(KERN_INFO "o2net: %s " SC_NODEF_FMT "\n",
+ printk(KERN_NOTICE "o2net: %s " SC_NODEF_FMT "\n",
o2nm_this_node() > sc->sc_node->nd_num ?
"connected to" : "accepted connection from",
SC_NODEF_ARGS(sc));
@@ -930,7 +930,7 @@ static void o2net_sendpage(struct o2net_sock_container *sc,
cond_resched();
continue;
}
- mlog(ML_ERROR, "sendpage of size %zu to " SC_NODEF_FMT
+ mlog(ML_ERROR, "sendpage of size %zu to " SC_NODEF_FMT
" failed with %zd\n", size, SC_NODEF_ARGS(sc), ret);
o2net_ensure_shutdown(nn, sc, 0);
break;
@@ -1476,14 +1476,14 @@ static void o2net_idle_timer(unsigned long data)
do_gettimeofday(&now);
- printk(KERN_INFO "o2net: connection to " SC_NODEF_FMT " has been idle for %u.%u "
+ printk(KERN_NOTICE "o2net: connection to " SC_NODEF_FMT " has been idle for %u.%u "
"seconds, shutting it down.\n", SC_NODEF_ARGS(sc),
o2net_idle_timeout() / 1000,
o2net_idle_timeout() % 1000);
mlog(ML_NOTICE, "here are some times that might help debug the "
"situation: (tmr %ld.%ld now %ld.%ld dr %ld.%ld adv "
"%ld.%ld:%ld.%ld func (%08x:%u) %ld.%ld:%ld.%ld)\n",
- sc->sc_tv_timer.tv_sec, (long) sc->sc_tv_timer.tv_usec,
+ sc->sc_tv_timer.tv_sec, (long) sc->sc_tv_timer.tv_usec,
now.tv_sec, (long) now.tv_usec,
sc->sc_tv_data_ready.tv_sec, (long) sc->sc_tv_data_ready.tv_usec,
sc->sc_tv_advance_start.tv_sec,
diff --git a/fs/ocfs2/cluster/tcp_internal.h b/fs/ocfs2/cluster/tcp_internal.h
index 8d58cfe410b1..96fa7ebc530c 100644
--- a/fs/ocfs2/cluster/tcp_internal.h
+++ b/fs/ocfs2/cluster/tcp_internal.h
@@ -32,10 +32,10 @@
* on their number */
#define O2NET_QUORUM_DELAY_MS ((o2hb_dead_threshold + 2) * O2HB_REGION_TIMEOUT_MS)
-/*
+/*
* This version number represents quite a lot, unfortunately. It not
* only represents the raw network message protocol on the wire but also
- * locking semantics of the file system using the protocol. It should
+ * locking semantics of the file system using the protocol. It should
* be somewhere else, I'm sure, but right now it isn't.
*
* With version 11, we separate out the filesystem locking portion. The
diff --git a/fs/ocfs2/dlm/dlmapi.h b/fs/ocfs2/dlm/dlmapi.h
index b5786a787fab..3cfa114aa391 100644
--- a/fs/ocfs2/dlm/dlmapi.h
+++ b/fs/ocfs2/dlm/dlmapi.h
@@ -95,7 +95,7 @@ const char *dlm_errname(enum dlm_status err);
mlog(ML_ERROR, "dlm status = %s\n", dlm_errname((st))); \
} while (0)
-#define DLM_LKSB_UNUSED1 0x01
+#define DLM_LKSB_UNUSED1 0x01
#define DLM_LKSB_PUT_LVB 0x02
#define DLM_LKSB_GET_LVB 0x04
#define DLM_LKSB_UNUSED2 0x08
diff --git a/fs/ocfs2/dlm/dlmast.c b/fs/ocfs2/dlm/dlmast.c
index 01cf8cc3d286..dccc439fa087 100644
--- a/fs/ocfs2/dlm/dlmast.c
+++ b/fs/ocfs2/dlm/dlmast.c
@@ -123,7 +123,7 @@ static void __dlm_queue_ast(struct dlm_ctxt *dlm, struct dlm_lock *lock)
dlm_lock_put(lock);
/* free up the reserved bast that we are cancelling.
* guaranteed that this will not be the last reserved
- * ast because *both* an ast and a bast were reserved
+ * ast because *both* an ast and a bast were reserved
* to get to this point. the res->spinlock will not be
* taken here */
dlm_lockres_release_ast(dlm, res);
diff --git a/fs/ocfs2/dlm/dlmconvert.c b/fs/ocfs2/dlm/dlmconvert.c
index ca96bce50e18..f283bce776b4 100644
--- a/fs/ocfs2/dlm/dlmconvert.c
+++ b/fs/ocfs2/dlm/dlmconvert.c
@@ -396,7 +396,7 @@ static enum dlm_status dlm_send_remote_convert_request(struct dlm_ctxt *dlm,
/* instead of logging the same network error over
* and over, sleep here and wait for the heartbeat
* to notice the node is dead. times out after 5s. */
- dlm_wait_for_node_death(dlm, res->owner,
+ dlm_wait_for_node_death(dlm, res->owner,
DLM_NODE_DEATH_WAIT_MAX);
ret = DLM_RECOVERING;
mlog(0, "node %u died so returning DLM_RECOVERING "
diff --git a/fs/ocfs2/dlm/dlmdebug.c b/fs/ocfs2/dlm/dlmdebug.c
index 42b0bad7a612..0cd24cf54396 100644
--- a/fs/ocfs2/dlm/dlmdebug.c
+++ b/fs/ocfs2/dlm/dlmdebug.c
@@ -102,7 +102,7 @@ void __dlm_print_one_lock_resource(struct dlm_lock_resource *res)
assert_spin_locked(&res->spinlock);
stringify_lockname(res->lockname.name, res->lockname.len,
- buf, sizeof(buf) - 1);
+ buf, sizeof(buf));
printk("lockres: %s, owner=%u, state=%u\n",
buf, res->owner, res->state);
printk(" last used: %lu, refcnt: %u, on purge list: %s\n",
diff --git a/fs/ocfs2/dlm/dlmdomain.c b/fs/ocfs2/dlm/dlmdomain.c
index 0334000676d3..988c9055fd4e 100644
--- a/fs/ocfs2/dlm/dlmdomain.c
+++ b/fs/ocfs2/dlm/dlmdomain.c
@@ -816,7 +816,7 @@ static int dlm_query_join_handler(struct o2net_msg *msg, u32 len, void *data,
}
/* Once the dlm ctxt is marked as leaving then we don't want
- * to be put in someone's domain map.
+ * to be put in someone's domain map.
* Also, explicitly disallow joining at certain troublesome
* times (ie. during recovery). */
if (dlm && dlm->dlm_state != DLM_CTXT_LEAVING) {
diff --git a/fs/ocfs2/dlm/dlmlock.c b/fs/ocfs2/dlm/dlmlock.c
index 437698e9465f..733337772671 100644
--- a/fs/ocfs2/dlm/dlmlock.c
+++ b/fs/ocfs2/dlm/dlmlock.c
@@ -269,7 +269,7 @@ static enum dlm_status dlmlock_remote(struct dlm_ctxt *dlm,
}
dlm_revert_pending_lock(res, lock);
dlm_lock_put(lock);
- } else if (dlm_is_recovery_lock(res->lockname.name,
+ } else if (dlm_is_recovery_lock(res->lockname.name,
res->lockname.len)) {
/* special case for the $RECOVERY lock.
* there will never be an AST delivered to put
diff --git a/fs/ocfs2/dlm/dlmmaster.c b/fs/ocfs2/dlm/dlmmaster.c
index 03ccf9a7b1f4..a659606dcb95 100644
--- a/fs/ocfs2/dlm/dlmmaster.c
+++ b/fs/ocfs2/dlm/dlmmaster.c
@@ -366,7 +366,7 @@ void dlm_hb_event_notify_attached(struct dlm_ctxt *dlm, int idx, int node_up)
struct dlm_master_list_entry *mle;
assert_spin_locked(&dlm->spinlock);
-
+
list_for_each_entry(mle, &dlm->mle_hb_events, hb_events) {
if (node_up)
dlm_mle_node_up(dlm, mle, NULL, idx);
@@ -833,7 +833,7 @@ lookup:
__dlm_insert_mle(dlm, mle);
/* still holding the dlm spinlock, check the recovery map
- * to see if there are any nodes that still need to be
+ * to see if there are any nodes that still need to be
* considered. these will not appear in the mle nodemap
* but they might own this lockres. wait on them. */
bit = find_next_bit(dlm->recovery_map, O2NM_MAX_NODES, 0);
@@ -883,7 +883,7 @@ redo_request:
msleep(500);
}
continue;
- }
+ }
dlm_kick_recovery_thread(dlm);
msleep(1000);
@@ -939,8 +939,8 @@ wait:
res->lockname.name, blocked);
if (++tries > 20) {
mlog(ML_ERROR, "%s:%.*s: spinning on "
- "dlm_wait_for_lock_mastery, blocked=%d\n",
- dlm->name, res->lockname.len,
+ "dlm_wait_for_lock_mastery, blocked=%d\n",
+ dlm->name, res->lockname.len,
res->lockname.name, blocked);
dlm_print_one_lock_resource(res);
dlm_print_one_mle(mle);
@@ -1029,7 +1029,7 @@ recheck:
ret = dlm_restart_lock_mastery(dlm, res, mle, *blocked);
b = (mle->type == DLM_MLE_BLOCK);
if ((*blocked && !b) || (!*blocked && b)) {
- mlog(0, "%s:%.*s: status change: old=%d new=%d\n",
+ mlog(0, "%s:%.*s: status change: old=%d new=%d\n",
dlm->name, res->lockname.len, res->lockname.name,
*blocked, b);
*blocked = b;
@@ -1602,7 +1602,7 @@ send_response:
}
mlog(0, "%u is the owner of %.*s, cleaning everyone else\n",
dlm->node_num, res->lockname.len, res->lockname.name);
- ret = dlm_dispatch_assert_master(dlm, res, 0, request->node_idx,
+ ret = dlm_dispatch_assert_master(dlm, res, 0, request->node_idx,
DLM_ASSERT_MASTER_MLE_CLEANUP);
if (ret < 0) {
mlog(ML_ERROR, "failed to dispatch assert master work\n");
@@ -1701,7 +1701,7 @@ again:
if (r & DLM_ASSERT_RESPONSE_REASSERT) {
mlog(0, "%.*s: node %u create mles on other "
- "nodes and requests a re-assert\n",
+ "nodes and requests a re-assert\n",
namelen, lockname, to);
reassert = 1;
}
@@ -1812,7 +1812,7 @@ int dlm_assert_master_handler(struct o2net_msg *msg, u32 len, void *data,
spin_unlock(&dlm->master_lock);
spin_unlock(&dlm->spinlock);
goto done;
- }
+ }
}
}
spin_unlock(&dlm->master_lock);
@@ -1883,7 +1883,7 @@ ok:
int extra_ref = 0;
int nn = -1;
int rr, err = 0;
-
+
spin_lock(&mle->spinlock);
if (mle->type == DLM_MLE_BLOCK || mle->type == DLM_MLE_MIGRATION)
extra_ref = 1;
@@ -1891,7 +1891,7 @@ ok:
/* MASTER mle: if any bits set in the response map
* then the calling node needs to re-assert to clear
* up nodes that this node contacted */
- while ((nn = find_next_bit (mle->response_map, O2NM_MAX_NODES,
+ while ((nn = find_next_bit (mle->response_map, O2NM_MAX_NODES,
nn+1)) < O2NM_MAX_NODES) {
if (nn != dlm->node_num && nn != assert->node_idx)
master_request = 1;
@@ -2002,7 +2002,7 @@ kill:
__dlm_print_one_lock_resource(res);
spin_unlock(&res->spinlock);
spin_unlock(&dlm->spinlock);
- *ret_data = (void *)res;
+ *ret_data = (void *)res;
dlm_put(dlm);
return -EINVAL;
}
@@ -2040,10 +2040,10 @@ int dlm_dispatch_assert_master(struct dlm_ctxt *dlm,
item->u.am.request_from = request_from;
item->u.am.flags = flags;
- if (ignore_higher)
- mlog(0, "IGNORE HIGHER: %.*s\n", res->lockname.len,
+ if (ignore_higher)
+ mlog(0, "IGNORE HIGHER: %.*s\n", res->lockname.len,
res->lockname.name);
-
+
spin_lock(&dlm->work_lock);
list_add_tail(&item->list, &dlm->work_list);
spin_unlock(&dlm->work_lock);
@@ -2133,7 +2133,7 @@ put:
* think that $RECOVERY is currently mastered by a dead node. If so,
* we wait a short time to allow that node to get notified by its own
* heartbeat stack, then check again. All $RECOVERY lock resources
- * mastered by dead nodes are purged when the hearbeat callback is
+ * mastered by dead nodes are purged when the hearbeat callback is
* fired, so we can know for sure that it is safe to continue once
* the node returns a live node or no node. */
static int dlm_pre_master_reco_lockres(struct dlm_ctxt *dlm,
@@ -2174,7 +2174,7 @@ static int dlm_pre_master_reco_lockres(struct dlm_ctxt *dlm,
ret = -EAGAIN;
}
spin_unlock(&dlm->spinlock);
- mlog(0, "%s: reco lock master is %u\n", dlm->name,
+ mlog(0, "%s: reco lock master is %u\n", dlm->name,
master);
break;
}
@@ -2602,7 +2602,7 @@ fail:
mlog(0, "%s:%.*s: timed out during migration\n",
dlm->name, res->lockname.len, res->lockname.name);
- /* avoid hang during shutdown when migrating lockres
+ /* avoid hang during shutdown when migrating lockres
* to a node which also goes down */
if (dlm_is_node_dead(dlm, target)) {
mlog(0, "%s:%.*s: expected migration "
@@ -2738,7 +2738,7 @@ static int dlm_migration_can_proceed(struct dlm_ctxt *dlm,
can_proceed = !!(res->state & DLM_LOCK_RES_MIGRATING);
spin_unlock(&res->spinlock);
- /* target has died, so make the caller break out of the
+ /* target has died, so make the caller break out of the
* wait_event, but caller must recheck the domain_map */
spin_lock(&dlm->spinlock);
if (!test_bit(mig_target, dlm->domain_map))
diff --git a/fs/ocfs2/dlm/dlmrecovery.c b/fs/ocfs2/dlm/dlmrecovery.c
index 2f9e4e19a4f2..344bcf90cbf4 100644
--- a/fs/ocfs2/dlm/dlmrecovery.c
+++ b/fs/ocfs2/dlm/dlmrecovery.c
@@ -1050,7 +1050,7 @@ static void dlm_move_reco_locks_to_list(struct dlm_ctxt *dlm,
if (lock->ml.node == dead_node) {
mlog(0, "AHA! there was "
"a $RECOVERY lock for dead "
- "node %u (%s)!\n",
+ "node %u (%s)!\n",
dead_node, dlm->name);
list_del_init(&lock->list);
dlm_lock_put(lock);
@@ -1164,6 +1164,39 @@ static void dlm_init_migratable_lockres(struct dlm_migratable_lockres *mres,
mres->master = master;
}
+static void dlm_prepare_lvb_for_migration(struct dlm_lock *lock,
+ struct dlm_migratable_lockres *mres,
+ int queue)
+{
+ if (!lock->lksb)
+ return;
+
+ /* Ignore lvb in all locks in the blocked list */
+ if (queue == DLM_BLOCKED_LIST)
+ return;
+
+ /* Only consider lvbs in locks with granted EX or PR lock levels */
+ if (lock->ml.type != LKM_EXMODE && lock->ml.type != LKM_PRMODE)
+ return;
+
+ if (dlm_lvb_is_empty(mres->lvb)) {
+ memcpy(mres->lvb, lock->lksb->lvb, DLM_LVB_LEN);
+ return;
+ }
+
+ /* Ensure the lvb copied for migration matches in other valid locks */
+ if (!memcmp(mres->lvb, lock->lksb->lvb, DLM_LVB_LEN))
+ return;
+
+ mlog(ML_ERROR, "Mismatched lvb in lock cookie=%u:%llu, name=%.*s, "
+ "node=%u\n",
+ dlm_get_lock_cookie_node(be64_to_cpu(lock->ml.cookie)),
+ dlm_get_lock_cookie_seq(be64_to_cpu(lock->ml.cookie)),
+ lock->lockres->lockname.len, lock->lockres->lockname.name,
+ lock->ml.node);
+ dlm_print_one_lock_resource(lock->lockres);
+ BUG();
+}
/* returns 1 if this lock fills the network structure,
* 0 otherwise */
@@ -1181,20 +1214,7 @@ static int dlm_add_lock_to_array(struct dlm_lock *lock,
ml->list = queue;
if (lock->lksb) {
ml->flags = lock->lksb->flags;
- /* send our current lvb */
- if (ml->type == LKM_EXMODE ||
- ml->type == LKM_PRMODE) {
- /* if it is already set, this had better be a PR
- * and it has to match */
- if (!dlm_lvb_is_empty(mres->lvb) &&
- (ml->type == LKM_EXMODE ||
- memcmp(mres->lvb, lock->lksb->lvb, DLM_LVB_LEN))) {
- mlog(ML_ERROR, "mismatched lvbs!\n");
- dlm_print_one_lock_resource(lock->lockres);
- BUG();
- }
- memcpy(mres->lvb, lock->lksb->lvb, DLM_LVB_LEN);
- }
+ dlm_prepare_lvb_for_migration(lock, mres, queue);
}
ml->node = lock->ml.node;
mres->num_locks++;
@@ -1730,6 +1750,7 @@ static int dlm_process_recovery_data(struct dlm_ctxt *dlm,
struct dlm_lock *lock = NULL;
u8 from = O2NM_MAX_NODES;
unsigned int added = 0;
+ __be64 c;
mlog(0, "running %d locks for this lockres\n", mres->num_locks);
for (i=0; i<mres->num_locks; i++) {
@@ -1777,19 +1798,48 @@ static int dlm_process_recovery_data(struct dlm_ctxt *dlm,
/* lock is always created locally first, and
* destroyed locally last. it must be on the list */
if (!lock) {
- __be64 c = ml->cookie;
- mlog(ML_ERROR, "could not find local lock "
- "with cookie %u:%llu!\n",
+ c = ml->cookie;
+ mlog(ML_ERROR, "Could not find local lock "
+ "with cookie %u:%llu, node %u, "
+ "list %u, flags 0x%x, type %d, "
+ "conv %d, highest blocked %d\n",
dlm_get_lock_cookie_node(be64_to_cpu(c)),
- dlm_get_lock_cookie_seq(be64_to_cpu(c)));
+ dlm_get_lock_cookie_seq(be64_to_cpu(c)),
+ ml->node, ml->list, ml->flags, ml->type,
+ ml->convert_type, ml->highest_blocked);
+ __dlm_print_one_lock_resource(res);
+ BUG();
+ }
+
+ if (lock->ml.node != ml->node) {
+ c = lock->ml.cookie;
+ mlog(ML_ERROR, "Mismatched node# in lock "
+ "cookie %u:%llu, name %.*s, node %u\n",
+ dlm_get_lock_cookie_node(be64_to_cpu(c)),
+ dlm_get_lock_cookie_seq(be64_to_cpu(c)),
+ res->lockname.len, res->lockname.name,
+ lock->ml.node);
+ c = ml->cookie;
+ mlog(ML_ERROR, "Migrate lock cookie %u:%llu, "
+ "node %u, list %u, flags 0x%x, type %d, "
+ "conv %d, highest blocked %d\n",
+ dlm_get_lock_cookie_node(be64_to_cpu(c)),
+ dlm_get_lock_cookie_seq(be64_to_cpu(c)),
+ ml->node, ml->list, ml->flags, ml->type,
+ ml->convert_type, ml->highest_blocked);
__dlm_print_one_lock_resource(res);
BUG();
}
- BUG_ON(lock->ml.node != ml->node);
if (tmpq != queue) {
- mlog(0, "lock was on %u instead of %u for %.*s\n",
- j, ml->list, res->lockname.len, res->lockname.name);
+ c = ml->cookie;
+ mlog(0, "Lock cookie %u:%llu was on list %u "
+ "instead of list %u for %.*s\n",
+ dlm_get_lock_cookie_node(be64_to_cpu(c)),
+ dlm_get_lock_cookie_seq(be64_to_cpu(c)),
+ j, ml->list, res->lockname.len,
+ res->lockname.name);
+ __dlm_print_one_lock_resource(res);
spin_unlock(&res->spinlock);
continue;
}
@@ -1839,7 +1889,7 @@ static int dlm_process_recovery_data(struct dlm_ctxt *dlm,
* the lvb. */
memcpy(res->lvb, mres->lvb, DLM_LVB_LEN);
} else {
- /* otherwise, the node is sending its
+ /* otherwise, the node is sending its
* most recent valid lvb info */
BUG_ON(ml->type != LKM_EXMODE &&
ml->type != LKM_PRMODE);
@@ -1886,7 +1936,7 @@ skip_lvb:
spin_lock(&res->spinlock);
list_for_each_entry(lock, queue, list) {
if (lock->ml.cookie == ml->cookie) {
- __be64 c = lock->ml.cookie;
+ c = lock->ml.cookie;
mlog(ML_ERROR, "%s:%.*s: %u:%llu: lock already "
"exists on this lockres!\n", dlm->name,
res->lockname.len, res->lockname.name,
@@ -2114,7 +2164,7 @@ static void dlm_revalidate_lvb(struct dlm_ctxt *dlm,
assert_spin_locked(&res->spinlock);
if (res->owner == dlm->node_num)
- /* if this node owned the lockres, and if the dead node
+ /* if this node owned the lockres, and if the dead node
* had an EX when he died, blank out the lvb */
search_node = dead_node;
else {
@@ -2152,7 +2202,7 @@ static void dlm_free_dead_locks(struct dlm_ctxt *dlm,
/* this node is the lockres master:
* 1) remove any stale locks for the dead node
- * 2) if the dead node had an EX when he died, blank out the lvb
+ * 2) if the dead node had an EX when he died, blank out the lvb
*/
assert_spin_locked(&dlm->spinlock);
assert_spin_locked(&res->spinlock);
@@ -2193,7 +2243,12 @@ static void dlm_free_dead_locks(struct dlm_ctxt *dlm,
mlog(0, "%s:%.*s: freed %u locks for dead node %u, "
"dropping ref from lockres\n", dlm->name,
res->lockname.len, res->lockname.name, freed, dead_node);
- BUG_ON(!test_bit(dead_node, res->refmap));
+ if(!test_bit(dead_node, res->refmap)) {
+ mlog(ML_ERROR, "%s:%.*s: freed %u locks for dead node %u, "
+ "but ref was not set\n", dlm->name,
+ res->lockname.len, res->lockname.name, freed, dead_node);
+ __dlm_print_one_lock_resource(res);
+ }
dlm_lockres_clear_refmap_bit(dead_node, res);
} else if (test_bit(dead_node, res->refmap)) {
mlog(0, "%s:%.*s: dead node %u had a ref, but had "
@@ -2260,7 +2315,7 @@ static void dlm_do_local_recovery_cleanup(struct dlm_ctxt *dlm, u8 dead_node)
}
spin_unlock(&res->spinlock);
continue;
- }
+ }
spin_lock(&res->spinlock);
/* zero the lvb if necessary */
dlm_revalidate_lvb(dlm, res, dead_node);
@@ -2411,7 +2466,7 @@ static void dlm_reco_unlock_ast(void *astdata, enum dlm_status st)
* this function on each node racing to become the recovery
* master will not stop attempting this until either:
* a) this node gets the EX (and becomes the recovery master),
- * or b) dlm->reco.new_master gets set to some nodenum
+ * or b) dlm->reco.new_master gets set to some nodenum
* != O2NM_INVALID_NODE_NUM (another node will do the reco).
* so each time a recovery master is needed, the entire cluster
* will sync at this point. if the new master dies, that will
@@ -2424,7 +2479,7 @@ static int dlm_pick_recovery_master(struct dlm_ctxt *dlm)
mlog(0, "starting recovery of %s at %lu, dead=%u, this=%u\n",
dlm->name, jiffies, dlm->reco.dead_node, dlm->node_num);
-again:
+again:
memset(&lksb, 0, sizeof(lksb));
ret = dlmlock(dlm, LKM_EXMODE, &lksb, LKM_NOQUEUE|LKM_RECOVERY,
@@ -2437,8 +2492,8 @@ again:
if (ret == DLM_NORMAL) {
mlog(0, "dlm=%s dlmlock says I got it (this=%u)\n",
dlm->name, dlm->node_num);
-
- /* got the EX lock. check to see if another node
+
+ /* got the EX lock. check to see if another node
* just became the reco master */
if (dlm_reco_master_ready(dlm)) {
mlog(0, "%s: got reco EX lock, but %u will "
@@ -2451,12 +2506,12 @@ again:
/* see if recovery was already finished elsewhere */
spin_lock(&dlm->spinlock);
if (dlm->reco.dead_node == O2NM_INVALID_NODE_NUM) {
- status = -EINVAL;
+ status = -EINVAL;
mlog(0, "%s: got reco EX lock, but "
"node got recovered already\n", dlm->name);
if (dlm->reco.new_master != O2NM_INVALID_NODE_NUM) {
mlog(ML_ERROR, "%s: new master is %u "
- "but no dead node!\n",
+ "but no dead node!\n",
dlm->name, dlm->reco.new_master);
BUG();
}
@@ -2468,7 +2523,7 @@ again:
* set the master and send the messages to begin recovery */
if (!status) {
mlog(0, "%s: dead=%u, this=%u, sending "
- "begin_reco now\n", dlm->name,
+ "begin_reco now\n", dlm->name,
dlm->reco.dead_node, dlm->node_num);
status = dlm_send_begin_reco_message(dlm,
dlm->reco.dead_node);
@@ -2501,7 +2556,7 @@ again:
mlog(0, "dlm=%s dlmlock says another node got it (this=%u)\n",
dlm->name, dlm->node_num);
/* another node is master. wait on
- * reco.new_master != O2NM_INVALID_NODE_NUM
+ * reco.new_master != O2NM_INVALID_NODE_NUM
* for at most one second */
wait_event_timeout(dlm->dlm_reco_thread_wq,
dlm_reco_master_ready(dlm),
@@ -2589,7 +2644,13 @@ retry:
"begin reco msg (%d)\n", dlm->name, nodenum, ret);
ret = 0;
}
- if (ret == -EAGAIN) {
+
+ /*
+ * Prior to commit aad1b15310b9bcd59fa81ab8f2b1513b59553ea8,
+ * dlm_begin_reco_handler() returned EAGAIN and not -EAGAIN.
+ * We are handling both for compatibility reasons.
+ */
+ if (ret == -EAGAIN || ret == EAGAIN) {
mlog(0, "%s: trying to start recovery of node "
"%u, but node %u is waiting for last recovery "
"to complete, backoff for a bit\n", dlm->name,
@@ -2599,7 +2660,7 @@ retry:
}
if (ret < 0) {
struct dlm_lock_resource *res;
- /* this is now a serious problem, possibly ENOMEM
+ /* this is now a serious problem, possibly ENOMEM
* in the network stack. must retry */
mlog_errno(ret);
mlog(ML_ERROR, "begin reco of dlm %s to node %u "
@@ -2612,7 +2673,7 @@ retry:
} else {
mlog(ML_ERROR, "recovery lock not found\n");
}
- /* sleep for a bit in hopes that we can avoid
+ /* sleep for a bit in hopes that we can avoid
* another ENOMEM */
msleep(100);
goto retry;
@@ -2664,7 +2725,7 @@ int dlm_begin_reco_handler(struct o2net_msg *msg, u32 len, void *data,
}
if (dlm->reco.dead_node != O2NM_INVALID_NODE_NUM) {
mlog(ML_NOTICE, "%s: dead_node previously set to %u, "
- "node %u changing it to %u\n", dlm->name,
+ "node %u changing it to %u\n", dlm->name,
dlm->reco.dead_node, br->node_idx, br->dead_node);
}
dlm_set_reco_master(dlm, br->node_idx);
@@ -2730,8 +2791,8 @@ stage2:
if (ret < 0) {
mlog_errno(ret);
if (dlm_is_host_down(ret)) {
- /* this has no effect on this recovery
- * session, so set the status to zero to
+ /* this has no effect on this recovery
+ * session, so set the status to zero to
* finish out the last recovery */
mlog(ML_ERROR, "node %u went down after this "
"node finished recovery.\n", nodenum);
@@ -2768,7 +2829,7 @@ int dlm_finalize_reco_handler(struct o2net_msg *msg, u32 len, void *data,
mlog(0, "%s: node %u finalizing recovery stage%d of "
"node %u (%u:%u)\n", dlm->name, fr->node_idx, stage,
fr->dead_node, dlm->reco.dead_node, dlm->reco.new_master);
-
+
spin_lock(&dlm->spinlock);
if (dlm->reco.new_master != fr->node_idx) {
diff --git a/fs/ocfs2/dlm/dlmunlock.c b/fs/ocfs2/dlm/dlmunlock.c
index 00f53b2aea76..49e29ecd0201 100644
--- a/fs/ocfs2/dlm/dlmunlock.c
+++ b/fs/ocfs2/dlm/dlmunlock.c
@@ -190,8 +190,8 @@ static enum dlm_status dlmunlock_common(struct dlm_ctxt *dlm,
actions &= ~(DLM_UNLOCK_REMOVE_LOCK|
DLM_UNLOCK_REGRANT_LOCK|
DLM_UNLOCK_CLEAR_CONVERT_TYPE);
- } else if (status == DLM_RECOVERING ||
- status == DLM_MIGRATING ||
+ } else if (status == DLM_RECOVERING ||
+ status == DLM_MIGRATING ||
status == DLM_FORWARD) {
/* must clear the actions because this unlock
* is about to be retried. cannot free or do
@@ -661,14 +661,14 @@ retry:
if (call_ast) {
mlog(0, "calling unlockast(%p, %d)\n", data, status);
if (is_master) {
- /* it is possible that there is one last bast
+ /* it is possible that there is one last bast
* pending. make sure it is flushed, then
* call the unlockast.
* not an issue if this is a mastered remotely,
* since this lock has been removed from the
* lockres queues and cannot be found. */
dlm_kick_thread(dlm, NULL);
- wait_event(dlm->ast_wq,
+ wait_event(dlm->ast_wq,
dlm_lock_basts_flushed(dlm, lock));
}
(*unlockast)(data, status);
diff --git a/fs/ocfs2/dlmglue.c b/fs/ocfs2/dlmglue.c
index c5e4a49e3a12..e044019cb3b1 100644
--- a/fs/ocfs2/dlmglue.c
+++ b/fs/ocfs2/dlmglue.c
@@ -875,6 +875,14 @@ static inline void ocfs2_generic_handle_convert_action(struct ocfs2_lock_res *lo
lockres_or_flags(lockres, OCFS2_LOCK_NEEDS_REFRESH);
lockres->l_level = lockres->l_requested;
+
+ /*
+ * We set the OCFS2_LOCK_UPCONVERT_FINISHING flag before clearing
+ * the OCFS2_LOCK_BUSY flag to prevent the dc thread from
+ * downconverting the lock before the upconvert has fully completed.
+ */
+ lockres_or_flags(lockres, OCFS2_LOCK_UPCONVERT_FINISHING);
+
lockres_clear_flags(lockres, OCFS2_LOCK_BUSY);
mlog_exit_void();
@@ -907,8 +915,6 @@ static int ocfs2_generic_handle_bast(struct ocfs2_lock_res *lockres,
assert_spin_locked(&lockres->l_lock);
- lockres_or_flags(lockres, OCFS2_LOCK_BLOCKED);
-
if (level > lockres->l_blocking) {
/* only schedule a downconvert if we haven't already scheduled
* one that goes low enough to satisfy the level we're
@@ -921,6 +927,9 @@ static int ocfs2_generic_handle_bast(struct ocfs2_lock_res *lockres,
lockres->l_blocking = level;
}
+ if (needs_downconvert)
+ lockres_or_flags(lockres, OCFS2_LOCK_BLOCKED);
+
mlog_exit(needs_downconvert);
return needs_downconvert;
}
@@ -1133,6 +1142,7 @@ static inline void ocfs2_recover_from_dlm_error(struct ocfs2_lock_res *lockres,
mlog_entry_void();
spin_lock_irqsave(&lockres->l_lock, flags);
lockres_clear_flags(lockres, OCFS2_LOCK_BUSY);
+ lockres_clear_flags(lockres, OCFS2_LOCK_UPCONVERT_FINISHING);
if (convert)
lockres->l_action = OCFS2_AST_INVALID;
else
@@ -1323,13 +1333,13 @@ static int __ocfs2_cluster_lock(struct ocfs2_super *osb,
again:
wait = 0;
+ spin_lock_irqsave(&lockres->l_lock, flags);
+
if (catch_signals && signal_pending(current)) {
ret = -ERESTARTSYS;
- goto out;
+ goto unlock;
}
- spin_lock_irqsave(&lockres->l_lock, flags);
-
mlog_bug_on_msg(lockres->l_flags & OCFS2_LOCK_FREEING,
"Cluster lock called on freeing lockres %s! flags "
"0x%lx\n", lockres->l_name, lockres->l_flags);
@@ -1346,6 +1356,25 @@ again:
goto unlock;
}
+ if (lockres->l_flags & OCFS2_LOCK_UPCONVERT_FINISHING) {
+ /*
+ * We've upconverted. If the lock now has a level we can
+ * work with, we take it. If, however, the lock is not at the
+ * required level, we go thru the full cycle. One way this could
+ * happen is if a process requesting an upconvert to PR is
+ * closely followed by another requesting upconvert to an EX.
+ * If the process requesting EX lands here, we want it to
+ * continue attempting to upconvert and let the process
+ * requesting PR take the lock.
+ * If multiple processes request upconvert to PR, the first one
+ * here will take the lock. The others will have to go thru the
+ * OCFS2_LOCK_BLOCKED check to ensure that there is no pending
+ * downconvert request.
+ */
+ if (level <= lockres->l_level)
+ goto update_holders;
+ }
+
if (lockres->l_flags & OCFS2_LOCK_BLOCKED &&
!ocfs2_may_continue_on_blocked_lock(lockres, level)) {
/* is the lock is currently blocked on behalf of
@@ -1416,11 +1445,14 @@ again:
goto again;
}
+update_holders:
/* Ok, if we get here then we're good to go. */
ocfs2_inc_holders(lockres, level);
ret = 0;
unlock:
+ lockres_clear_flags(lockres, OCFS2_LOCK_UPCONVERT_FINISHING);
+
spin_unlock_irqrestore(&lockres->l_lock, flags);
out:
/*
@@ -3155,7 +3187,7 @@ out:
/* Mark the lockres as being dropped. It will no longer be
* queued if blocking, but we still may have to wait on it
* being dequeued from the downconvert thread before we can consider
- * it safe to drop.
+ * it safe to drop.
*
* You can *not* attempt to call cluster_lock on this lockres anymore. */
void ocfs2_mark_lockres_freeing(struct ocfs2_lock_res *lockres)
@@ -3352,6 +3384,7 @@ static int ocfs2_unblock_lock(struct ocfs2_super *osb,
unsigned long flags;
int blocking;
int new_level;
+ int level;
int ret = 0;
int set_lvb = 0;
unsigned int gen;
@@ -3360,9 +3393,17 @@ static int ocfs2_unblock_lock(struct ocfs2_super *osb,
spin_lock_irqsave(&lockres->l_lock, flags);
- BUG_ON(!(lockres->l_flags & OCFS2_LOCK_BLOCKED));
-
recheck:
+ /*
+ * Is it still blocking? If not, we have no more work to do.
+ */
+ if (!(lockres->l_flags & OCFS2_LOCK_BLOCKED)) {
+ BUG_ON(lockres->l_blocking != DLM_LOCK_NL);
+ spin_unlock_irqrestore(&lockres->l_lock, flags);
+ ret = 0;
+ goto leave;
+ }
+
if (lockres->l_flags & OCFS2_LOCK_BUSY) {
/* XXX
* This is a *big* race. The OCFS2_LOCK_PENDING flag
@@ -3401,6 +3442,31 @@ recheck:
goto leave;
}
+ /*
+ * This prevents livelocks. OCFS2_LOCK_UPCONVERT_FINISHING flag is
+ * set when the ast is received for an upconvert just before the
+ * OCFS2_LOCK_BUSY flag is cleared. Now if the fs received a bast
+ * on the heels of the ast, we want to delay the downconvert just
+ * enough to allow the up requestor to do its task. Because this
+ * lock is in the blocked queue, the lock will be downconverted
+ * as soon as the requestor is done with the lock.
+ */
+ if (lockres->l_flags & OCFS2_LOCK_UPCONVERT_FINISHING)
+ goto leave_requeue;
+
+ /*
+ * How can we block and yet be at NL? We were trying to upconvert
+ * from NL and got canceled. The code comes back here, and now
+ * we notice and clear BLOCKING.
+ */
+ if (lockres->l_level == DLM_LOCK_NL) {
+ BUG_ON(lockres->l_ex_holders || lockres->l_ro_holders);
+ lockres->l_blocking = DLM_LOCK_NL;
+ lockres_clear_flags(lockres, OCFS2_LOCK_BLOCKED);
+ spin_unlock_irqrestore(&lockres->l_lock, flags);
+ goto leave;
+ }
+
/* if we're blocking an exclusive and we have *any* holders,
* then requeue. */
if ((lockres->l_blocking == DLM_LOCK_EX)
@@ -3438,6 +3504,7 @@ recheck:
* may sleep, so we save off a copy of what we're blocking as
* it may change while we're not holding the spin lock. */
blocking = lockres->l_blocking;
+ level = lockres->l_level;
spin_unlock_irqrestore(&lockres->l_lock, flags);
ctl->unblock_action = lockres->l_ops->downconvert_worker(lockres, blocking);
@@ -3446,7 +3513,7 @@ recheck:
goto leave;
spin_lock_irqsave(&lockres->l_lock, flags);
- if (blocking != lockres->l_blocking) {
+ if ((blocking != lockres->l_blocking) || (level != lockres->l_level)) {
/* If this changed underneath us, then we can't drop
* it just yet. */
goto recheck;
diff --git a/fs/ocfs2/export.c b/fs/ocfs2/export.c
index 15713cbb865c..19ad145d2af3 100644
--- a/fs/ocfs2/export.c
+++ b/fs/ocfs2/export.c
@@ -239,7 +239,7 @@ static int ocfs2_encode_fh(struct dentry *dentry, u32 *fh_in, int *max_len,
mlog(0, "Encoding parent: blkno: %llu, generation: %u\n",
(unsigned long long)blkno, generation);
}
-
+
*max_len = len;
bail:
diff --git a/fs/ocfs2/extent_map.c b/fs/ocfs2/extent_map.c
index d35a27f4523e..5328529e7fd2 100644
--- a/fs/ocfs2/extent_map.c
+++ b/fs/ocfs2/extent_map.c
@@ -192,7 +192,7 @@ static int ocfs2_try_to_merge_extent_map(struct ocfs2_extent_map_item *emi,
emi->ei_clusters += ins->ei_clusters;
return 1;
} else if ((ins->ei_phys + ins->ei_clusters) == emi->ei_phys &&
- (ins->ei_cpos + ins->ei_clusters) == emi->ei_phys &&
+ (ins->ei_cpos + ins->ei_clusters) == emi->ei_cpos &&
ins->ei_flags == emi->ei_flags) {
emi->ei_phys = ins->ei_phys;
emi->ei_cpos = ins->ei_cpos;
diff --git a/fs/ocfs2/file.c b/fs/ocfs2/file.c
index 3d30a1c974a8..558ce0312421 100644
--- a/fs/ocfs2/file.c
+++ b/fs/ocfs2/file.c
@@ -749,7 +749,7 @@ static int ocfs2_write_zero_page(struct inode *inode,
int ret;
offset = (size & (PAGE_CACHE_SIZE-1)); /* Within page */
- /* ugh. in prepare/commit_write, if from==to==start of block, we
+ /* ugh. in prepare/commit_write, if from==to==start of block, we
** skip the prepare. make sure we never send an offset for the start
** of a block
*/
@@ -1772,13 +1772,14 @@ static int ocfs2_prepare_inode_for_write(struct dentry *dentry,
loff_t *ppos,
size_t count,
int appending,
- int *direct_io)
+ int *direct_io,
+ int *has_refcount)
{
int ret = 0, meta_level = 0;
struct inode *inode = dentry->d_inode;
loff_t saved_pos, end;
- /*
+ /*
* We start with a read level meta lock and only jump to an ex
* if we need to make modifications here.
*/
@@ -1833,6 +1834,8 @@ static int ocfs2_prepare_inode_for_write(struct dentry *dentry,
saved_pos,
count,
&meta_level);
+ if (has_refcount)
+ *has_refcount = 1;
}
if (ret < 0) {
@@ -1856,6 +1859,10 @@ static int ocfs2_prepare_inode_for_write(struct dentry *dentry,
break;
}
+ if (has_refcount && *has_refcount == 1) {
+ *direct_io = 0;
+ break;
+ }
/*
* Allowing concurrent direct writes means
* i_size changes wouldn't be synchronized, so
@@ -1899,7 +1906,7 @@ static ssize_t ocfs2_file_aio_write(struct kiocb *iocb,
loff_t pos)
{
int ret, direct_io, appending, rw_level, have_alloc_sem = 0;
- int can_do_direct;
+ int can_do_direct, has_refcount = 0;
ssize_t written = 0;
size_t ocount; /* original count */
size_t count; /* after file limit checks */
@@ -1942,7 +1949,7 @@ relock:
can_do_direct = direct_io;
ret = ocfs2_prepare_inode_for_write(file->f_path.dentry, ppos,
iocb->ki_left, appending,
- &can_do_direct);
+ &can_do_direct, &has_refcount);
if (ret < 0) {
mlog_errno(ret);
goto out;
@@ -2006,14 +2013,16 @@ out_dio:
/* buffered aio wouldn't have proper lock coverage today */
BUG_ON(ret == -EIOCBQUEUED && !(file->f_flags & O_DIRECT));
- if ((file->f_flags & O_DSYNC && !direct_io) || IS_SYNC(inode)) {
+ if (((file->f_flags & O_DSYNC) && !direct_io) || IS_SYNC(inode) ||
+ ((file->f_flags & O_DIRECT) && has_refcount)) {
ret = filemap_fdatawrite_range(file->f_mapping, pos,
pos + count - 1);
if (ret < 0)
written = ret;
if (!ret && (old_size != i_size_read(inode) ||
- old_clusters != OCFS2_I(inode)->ip_clusters)) {
+ old_clusters != OCFS2_I(inode)->ip_clusters ||
+ has_refcount)) {
ret = jbd2_journal_force_commit(osb->journal->j_journal);
if (ret < 0)
written = ret;
@@ -2024,7 +2033,7 @@ out_dio:
pos + count - 1);
}
- /*
+ /*
* deep in g_f_a_w_n()->ocfs2_direct_IO we pass in a ocfs2_dio_end_io
* function pointer which is called when o_direct io completes so that
* it can unlock our rw lock. (it's the clustered equivalent of
@@ -2062,7 +2071,7 @@ static int ocfs2_splice_to_file(struct pipe_inode_info *pipe,
int ret;
ret = ocfs2_prepare_inode_for_write(out->f_path.dentry, &sd->pos,
- sd->total_len, 0, NULL);
+ sd->total_len, 0, NULL, NULL);
if (ret < 0) {
mlog_errno(ret);
return ret;
@@ -2189,7 +2198,7 @@ static ssize_t ocfs2_file_aio_read(struct kiocb *iocb,
goto bail;
}
- /*
+ /*
* buffered reads protect themselves in ->readpage(). O_DIRECT reads
* need locks to protect pending reads from racing with truncate.
*/
@@ -2211,10 +2220,10 @@ static ssize_t ocfs2_file_aio_read(struct kiocb *iocb,
* We're fine letting folks race truncates and extending
* writes with read across the cluster, just like they can
* locally. Hence no rw_lock during read.
- *
+ *
* Take and drop the meta data lock to update inode fields
* like i_size. This allows the checks down below
- * generic_file_aio_read() a chance of actually working.
+ * generic_file_aio_read() a chance of actually working.
*/
ret = ocfs2_inode_lock_atime(inode, filp->f_vfsmnt, &lock_level);
if (ret < 0) {
@@ -2239,7 +2248,7 @@ static ssize_t ocfs2_file_aio_read(struct kiocb *iocb,
bail:
if (have_alloc_sem)
up_read(&inode->i_alloc_sem);
- if (rw_level != -1)
+ if (rw_level != -1)
ocfs2_rw_unlock(inode, rw_level);
mlog_exit(ret);
diff --git a/fs/ocfs2/inode.c b/fs/ocfs2/inode.c
index 0297fb8982b8..88459bdd1ff3 100644
--- a/fs/ocfs2/inode.c
+++ b/fs/ocfs2/inode.c
@@ -475,7 +475,7 @@ static int ocfs2_read_locked_inode(struct inode *inode,
if (args->fi_flags & OCFS2_FI_FLAG_ORPHAN_RECOVERY) {
status = ocfs2_try_open_lock(inode, 0);
if (status) {
- make_bad_inode(inode);
+ make_bad_inode(inode);
return status;
}
}
@@ -684,7 +684,7 @@ bail:
return status;
}
-/*
+/*
* Serialize with orphan dir recovery. If the process doing
* recovery on this orphan dir does an iget() with the dir
* i_mutex held, we'll deadlock here. Instead we detect this
diff --git a/fs/ocfs2/ioctl.c b/fs/ocfs2/ioctl.c
index 31fbb0619510..7d9d9c132cef 100644
--- a/fs/ocfs2/ioctl.c
+++ b/fs/ocfs2/ioctl.c
@@ -7,6 +7,7 @@
#include <linux/fs.h>
#include <linux/mount.h>
+#include <linux/compat.h>
#define MLOG_MASK_PREFIX ML_INODE
#include <cluster/masklog.h>
@@ -181,6 +182,10 @@ long ocfs2_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
#ifdef CONFIG_COMPAT
long ocfs2_compat_ioctl(struct file *file, unsigned cmd, unsigned long arg)
{
+ bool preserve;
+ struct reflink_arguments args;
+ struct inode *inode = file->f_path.dentry->d_inode;
+
switch (cmd) {
case OCFS2_IOC32_GETFLAGS:
cmd = OCFS2_IOC_GETFLAGS;
@@ -195,8 +200,15 @@ long ocfs2_compat_ioctl(struct file *file, unsigned cmd, unsigned long arg)
case OCFS2_IOC_GROUP_EXTEND:
case OCFS2_IOC_GROUP_ADD:
case OCFS2_IOC_GROUP_ADD64:
- case OCFS2_IOC_REFLINK:
break;
+ case OCFS2_IOC_REFLINK:
+ if (copy_from_user(&args, (struct reflink_arguments *)arg,
+ sizeof(args)))
+ return -EFAULT;
+ preserve = (args.preserve != 0);
+
+ return ocfs2_reflink_ioctl(inode, compat_ptr(args.old_path),
+ compat_ptr(args.new_path), preserve);
default:
return -ENOIOCTLCMD;
}
diff --git a/fs/ocfs2/journal.c b/fs/ocfs2/journal.c
index bf34c491ae96..9336c60e3a36 100644
--- a/fs/ocfs2/journal.c
+++ b/fs/ocfs2/journal.c
@@ -2034,7 +2034,7 @@ static int ocfs2_queue_orphans(struct ocfs2_super *osb,
status = -ENOENT;
mlog_errno(status);
return status;
- }
+ }
mutex_lock(&orphan_dir_inode->i_mutex);
status = ocfs2_inode_lock(orphan_dir_inode, NULL, 0);
diff --git a/fs/ocfs2/ocfs2.h b/fs/ocfs2/ocfs2.h
index 9362eea7424b..740f448041e2 100644
--- a/fs/ocfs2/ocfs2.h
+++ b/fs/ocfs2/ocfs2.h
@@ -136,6 +136,10 @@ enum ocfs2_unlock_action {
#define OCFS2_LOCK_PENDING (0x00000400) /* This lockres is pending a
call to dlm_lock. Only
exists with BUSY set. */
+#define OCFS2_LOCK_UPCONVERT_FINISHING (0x00000800) /* blocks the dc thread
+ * from downconverting
+ * before the upconvert
+ * has completed */
struct ocfs2_lock_res_ops;
diff --git a/fs/ocfs2/ocfs2_fs.h b/fs/ocfs2/ocfs2_fs.h
index 1a1a679e51b5..7638a38c32bc 100644
--- a/fs/ocfs2/ocfs2_fs.h
+++ b/fs/ocfs2/ocfs2_fs.h
@@ -1417,9 +1417,16 @@ static inline int ocfs2_fast_symlink_chars(int blocksize)
return blocksize - offsetof(struct ocfs2_dinode, id2.i_symlink);
}
-static inline int ocfs2_max_inline_data(int blocksize)
+static inline int ocfs2_max_inline_data_with_xattr(int blocksize,
+ struct ocfs2_dinode *di)
{
- return blocksize - offsetof(struct ocfs2_dinode, id2.i_data.id_data);
+ if (di && (di->i_dyn_features & OCFS2_INLINE_XATTR_FL))
+ return blocksize -
+ offsetof(struct ocfs2_dinode, id2.i_data.id_data) -
+ di->i_xattr_inline_size;
+ else
+ return blocksize -
+ offsetof(struct ocfs2_dinode, id2.i_data.id_data);
}
static inline int ocfs2_extent_recs_per_inode(int blocksize)
diff --git a/fs/ocfs2/refcounttree.c b/fs/ocfs2/refcounttree.c
index 74db2be75dd6..8ae65c9c020c 100644
--- a/fs/ocfs2/refcounttree.c
+++ b/fs/ocfs2/refcounttree.c
@@ -2945,7 +2945,7 @@ static int ocfs2_duplicate_clusters_by_page(handle_t *handle,
while (offset < end) {
page_index = offset >> PAGE_CACHE_SHIFT;
- map_end = (page_index + 1) << PAGE_CACHE_SHIFT;
+ map_end = ((loff_t)page_index + 1) << PAGE_CACHE_SHIFT;
if (map_end > end)
map_end = end;
@@ -2957,8 +2957,12 @@ static int ocfs2_duplicate_clusters_by_page(handle_t *handle,
page = grab_cache_page(mapping, page_index);
- /* This page can't be dirtied before we CoW it out. */
- BUG_ON(PageDirty(page));
+ /*
+ * In case PAGE_CACHE_SIZE <= CLUSTER_SIZE, This page
+ * can't be dirtied before we CoW it out.
+ */
+ if (PAGE_CACHE_SIZE <= OCFS2_SB(sb)->s_clustersize)
+ BUG_ON(PageDirty(page));
if (!PageUptodate(page)) {
ret = block_read_full_page(page, ocfs2_get_block);
@@ -3170,7 +3174,7 @@ static int ocfs2_cow_sync_writeback(struct super_block *sb,
while (offset < end) {
page_index = offset >> PAGE_CACHE_SHIFT;
- map_end = (page_index + 1) << PAGE_CACHE_SHIFT;
+ map_end = ((loff_t)page_index + 1) << PAGE_CACHE_SHIFT;
if (map_end > end)
map_end = end;
diff --git a/fs/ocfs2/stack_o2cb.c b/fs/ocfs2/stack_o2cb.c
index e49c41050264..3038c92af493 100644
--- a/fs/ocfs2/stack_o2cb.c
+++ b/fs/ocfs2/stack_o2cb.c
@@ -277,7 +277,7 @@ static int o2cb_cluster_connect(struct ocfs2_cluster_connection *conn)
u32 dlm_key;
struct dlm_ctxt *dlm;
struct o2dlm_private *priv;
- struct dlm_protocol_version dlm_version;
+ struct dlm_protocol_version fs_version;
BUG_ON(conn == NULL);
BUG_ON(o2cb_stack.sp_proto == NULL);
@@ -304,18 +304,18 @@ static int o2cb_cluster_connect(struct ocfs2_cluster_connection *conn)
/* used by the dlm code to make message headers unique, each
* node in this domain must agree on this. */
dlm_key = crc32_le(0, conn->cc_name, conn->cc_namelen);
- dlm_version.pv_major = conn->cc_version.pv_major;
- dlm_version.pv_minor = conn->cc_version.pv_minor;
+ fs_version.pv_major = conn->cc_version.pv_major;
+ fs_version.pv_minor = conn->cc_version.pv_minor;
- dlm = dlm_register_domain(conn->cc_name, dlm_key, &dlm_version);
+ dlm = dlm_register_domain(conn->cc_name, dlm_key, &fs_version);
if (IS_ERR(dlm)) {
rc = PTR_ERR(dlm);
mlog_errno(rc);
goto out_free;
}
- conn->cc_version.pv_major = dlm_version.pv_major;
- conn->cc_version.pv_minor = dlm_version.pv_minor;
+ conn->cc_version.pv_major = fs_version.pv_major;
+ conn->cc_version.pv_minor = fs_version.pv_minor;
conn->cc_lockspace = dlm;
dlm_register_eviction_cb(dlm, &priv->op_eviction_cb);
diff --git a/fs/ocfs2/super.c b/fs/ocfs2/super.c
index 26069917a9f5..755cd49a5ef3 100644
--- a/fs/ocfs2/super.c
+++ b/fs/ocfs2/super.c
@@ -1062,7 +1062,7 @@ static int ocfs2_fill_super(struct super_block *sb, void *data, int silent)
"file system, but write access is "
"unavailable.\n");
else
- mlog_errno(status);
+ mlog_errno(status);
goto read_super_error;
}
diff --git a/fs/ocfs2/symlink.c b/fs/ocfs2/symlink.c
index 49b133ccbf11..32499d213fc4 100644
--- a/fs/ocfs2/symlink.c
+++ b/fs/ocfs2/symlink.c
@@ -137,20 +137,20 @@ static void *ocfs2_fast_follow_link(struct dentry *dentry,
}
memcpy(link, target, len);
- nd_set_link(nd, link);
bail:
+ nd_set_link(nd, status ? ERR_PTR(status) : link);
brelse(bh);
mlog_exit(status);
- return status ? ERR_PTR(status) : link;
+ return NULL;
}
static void ocfs2_fast_put_link(struct dentry *dentry, struct nameidata *nd, void *cookie)
{
- char *link = cookie;
-
- kfree(link);
+ char *link = nd_get_link(nd);
+ if (!IS_ERR(link))
+ kfree(link);
}
const struct inode_operations ocfs2_symlink_inode_operations = {
diff --git a/fs/ocfs2/uptodate.c b/fs/ocfs2/uptodate.c
index c61369342a27..a0a120e82b97 100644
--- a/fs/ocfs2/uptodate.c
+++ b/fs/ocfs2/uptodate.c
@@ -267,8 +267,8 @@ static int ocfs2_buffer_cached(struct ocfs2_caching_info *ci,
}
/* Warning: even if it returns true, this does *not* guarantee that
- * the block is stored in our inode metadata cache.
- *
+ * the block is stored in our inode metadata cache.
+ *
* This can be called under lock_buffer()
*/
int ocfs2_buffer_uptodate(struct ocfs2_caching_info *ci,
diff --git a/fs/proc/array.c b/fs/proc/array.c
index f560325c444f..13b5d0708175 100644
--- a/fs/proc/array.c
+++ b/fs/proc/array.c
@@ -327,94 +327,6 @@ static inline void task_context_switch_counts(struct seq_file *m,
p->nivcsw);
}
-#ifdef CONFIG_MMU
-
-struct stack_stats {
- struct vm_area_struct *vma;
- unsigned long startpage;
- unsigned long usage;
-};
-
-static int stack_usage_pte_range(pmd_t *pmd, unsigned long addr,
- unsigned long end, struct mm_walk *walk)
-{
- struct stack_stats *ss = walk->private;
- struct vm_area_struct *vma = ss->vma;
- pte_t *pte, ptent;
- spinlock_t *ptl;
- int ret = 0;
-
- pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl);
- for (; addr != end; pte++, addr += PAGE_SIZE) {
- ptent = *pte;
-
-#ifdef CONFIG_STACK_GROWSUP
- if (pte_present(ptent) || is_swap_pte(ptent))
- ss->usage = addr - ss->startpage + PAGE_SIZE;
-#else
- if (pte_present(ptent) || is_swap_pte(ptent)) {
- ss->usage = ss->startpage - addr + PAGE_SIZE;
- pte++;
- ret = 1;
- break;
- }
-#endif
- }
- pte_unmap_unlock(pte - 1, ptl);
- cond_resched();
- return ret;
-}
-
-static inline unsigned long get_stack_usage_in_bytes(struct vm_area_struct *vma,
- struct task_struct *task)
-{
- struct stack_stats ss;
- struct mm_walk stack_walk = {
- .pmd_entry = stack_usage_pte_range,
- .mm = vma->vm_mm,
- .private = &ss,
- };
-
- if (!vma->vm_mm || is_vm_hugetlb_page(vma))
- return 0;
-
- ss.vma = vma;
- ss.startpage = task->stack_start & PAGE_MASK;
- ss.usage = 0;
-
-#ifdef CONFIG_STACK_GROWSUP
- walk_page_range(KSTK_ESP(task) & PAGE_MASK, vma->vm_end,
- &stack_walk);
-#else
- walk_page_range(vma->vm_start, (KSTK_ESP(task) & PAGE_MASK) + PAGE_SIZE,
- &stack_walk);
-#endif
- return ss.usage;
-}
-
-static inline void task_show_stack_usage(struct seq_file *m,
- struct task_struct *task)
-{
- struct vm_area_struct *vma;
- struct mm_struct *mm = get_task_mm(task);
-
- if (mm) {
- down_read(&mm->mmap_sem);
- vma = find_vma(mm, task->stack_start);
- if (vma)
- seq_printf(m, "Stack usage:\t%lu kB\n",
- get_stack_usage_in_bytes(vma, task) >> 10);
-
- up_read(&mm->mmap_sem);
- mmput(mm);
- }
-}
-#else
-static void task_show_stack_usage(struct seq_file *m, struct task_struct *task)
-{
-}
-#endif /* CONFIG_MMU */
-
static void task_cpus_allowed(struct seq_file *m, struct task_struct *task)
{
seq_printf(m, "Cpus_allowed:\t");
@@ -445,7 +357,6 @@ int proc_pid_status(struct seq_file *m, struct pid_namespace *ns,
task_show_regs(m, task);
#endif
task_context_switch_counts(m, task);
- task_show_stack_usage(m, task);
return 0;
}
diff --git a/fs/proc/base.c b/fs/proc/base.c
index 18d5cc62d8ed..58324c299165 100644
--- a/fs/proc/base.c
+++ b/fs/proc/base.c
@@ -1419,7 +1419,6 @@ static void *proc_pid_follow_link(struct dentry *dentry, struct nameidata *nd)
goto out;
error = PROC_I(inode)->op.proc_get_link(inode, &nd->path);
- nd->last_type = LAST_BIND;
out:
return ERR_PTR(error);
}
@@ -2370,16 +2369,30 @@ static void *proc_self_follow_link(struct dentry *dentry, struct nameidata *nd)
{
struct pid_namespace *ns = dentry->d_sb->s_fs_info;
pid_t tgid = task_tgid_nr_ns(current, ns);
- char tmp[PROC_NUMBUF];
- if (!tgid)
- return ERR_PTR(-ENOENT);
- sprintf(tmp, "%d", task_tgid_nr_ns(current, ns));
- return ERR_PTR(vfs_follow_link(nd,tmp));
+ char *name = ERR_PTR(-ENOENT);
+ if (tgid) {
+ name = __getname();
+ if (!name)
+ name = ERR_PTR(-ENOMEM);
+ else
+ sprintf(name, "%d", tgid);
+ }
+ nd_set_link(nd, name);
+ return NULL;
+}
+
+static void proc_self_put_link(struct dentry *dentry, struct nameidata *nd,
+ void *cookie)
+{
+ char *s = nd_get_link(nd);
+ if (!IS_ERR(s))
+ __putname(s);
}
static const struct inode_operations proc_self_inode_operations = {
.readlink = proc_self_readlink,
.follow_link = proc_self_follow_link,
+ .put_link = proc_self_put_link,
};
/*
diff --git a/fs/proc/task_mmu.c b/fs/proc/task_mmu.c
index 47c03f4336b8..f277c4a111cb 100644
--- a/fs/proc/task_mmu.c
+++ b/fs/proc/task_mmu.c
@@ -361,12 +361,11 @@ static int smaps_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end,
if (!pte_present(ptent))
continue;
- mss->resident += PAGE_SIZE;
-
page = vm_normal_page(vma, addr, ptent);
if (!page)
continue;
+ mss->resident += PAGE_SIZE;
/* Accumulate the size in pages that have been accessed. */
if (pte_young(ptent) || PageReferenced(page))
mss->referenced += PAGE_SIZE;
diff --git a/fs/quota/dquot.c b/fs/quota/dquot.c
index dea86abdf2e7..3fc62b097bed 100644
--- a/fs/quota/dquot.c
+++ b/fs/quota/dquot.c
@@ -1377,6 +1377,9 @@ static void inode_sub_rsv_space(struct inode *inode, qsize_t number)
static qsize_t inode_get_rsv_space(struct inode *inode)
{
qsize_t ret;
+
+ if (!inode->i_sb->dq_op->get_reserved_space)
+ return 0;
spin_lock(&inode->i_lock);
ret = *inode_reserved_space(inode);
spin_unlock(&inode->i_lock);
diff --git a/fs/ramfs/file-nommu.c b/fs/ramfs/file-nommu.c
index 2efc57173fd7..1739a4aba25f 100644
--- a/fs/ramfs/file-nommu.c
+++ b/fs/ramfs/file-nommu.c
@@ -123,30 +123,6 @@ add_error:
/*****************************************************************************/
/*
- * check that file shrinkage doesn't leave any VMAs dangling in midair
- */
-static int ramfs_nommu_check_mappings(struct inode *inode,
- size_t newsize, size_t size)
-{
- struct vm_area_struct *vma;
- struct prio_tree_iter iter;
-
- /* search for VMAs that fall within the dead zone */
- vma_prio_tree_foreach(vma, &iter, &inode->i_mapping->i_mmap,
- newsize >> PAGE_SHIFT,
- (size + PAGE_SIZE - 1) >> PAGE_SHIFT
- ) {
- /* found one - only interested if it's shared out of the page
- * cache */
- if (vma->vm_flags & VM_SHARED)
- return -ETXTBSY; /* not quite true, but near enough */
- }
-
- return 0;
-}
-
-/*****************************************************************************/
-/*
*
*/
static int ramfs_nommu_resize(struct inode *inode, loff_t newsize, loff_t size)
@@ -164,7 +140,7 @@ static int ramfs_nommu_resize(struct inode *inode, loff_t newsize, loff_t size)
/* check that a decrease in size doesn't cut off any shared mappings */
if (newsize < size) {
- ret = ramfs_nommu_check_mappings(inode, newsize, size);
+ ret = nommu_shrink_inode_mappings(inode, size, newsize);
if (ret < 0)
return ret;
}
diff --git a/fs/reiserfs/bitmap.c b/fs/reiserfs/bitmap.c
index 685495707181..65c872761177 100644
--- a/fs/reiserfs/bitmap.c
+++ b/fs/reiserfs/bitmap.c
@@ -1277,7 +1277,10 @@ int reiserfs_init_bitmap_cache(struct super_block *sb)
struct reiserfs_bitmap_info *bitmap;
unsigned int bmap_nr = reiserfs_bmap_count(sb);
+ /* Avoid lock recursion in fault case */
+ reiserfs_write_unlock(sb);
bitmap = vmalloc(sizeof(*bitmap) * bmap_nr);
+ reiserfs_write_lock(sb);
if (bitmap == NULL)
return -ENOMEM;
diff --git a/fs/reiserfs/inode.c b/fs/reiserfs/inode.c
index 290ae38fca8a..2df0f5c7c60b 100644
--- a/fs/reiserfs/inode.c
+++ b/fs/reiserfs/inode.c
@@ -31,11 +31,12 @@ void reiserfs_delete_inode(struct inode *inode)
JOURNAL_PER_BALANCE_CNT * 2 +
2 * REISERFS_QUOTA_INIT_BLOCKS(inode->i_sb);
struct reiserfs_transaction_handle th;
+ int depth;
int err;
truncate_inode_pages(&inode->i_data, 0);
- reiserfs_write_lock(inode->i_sb);
+ depth = reiserfs_write_lock_once(inode->i_sb);
/* The = 0 happens when we abort creating a new inode for some reason like lack of space.. */
if (!(inode->i_state & I_NEW) && INODE_PKEY(inode)->k_objectid != 0) { /* also handles bad_inode case */
@@ -74,7 +75,7 @@ void reiserfs_delete_inode(struct inode *inode)
out:
clear_inode(inode); /* note this must go after the journal_end to prevent deadlock */
inode->i_blocks = 0;
- reiserfs_write_unlock(inode->i_sb);
+ reiserfs_write_unlock_once(inode->i_sb, depth);
}
static void _make_cpu_key(struct cpu_key *key, int version, __u32 dirid,
@@ -1496,9 +1497,11 @@ struct inode *reiserfs_iget(struct super_block *s, const struct cpu_key *key)
args.objectid = key->on_disk_key.k_objectid;
args.dirid = key->on_disk_key.k_dir_id;
+ reiserfs_write_unlock(s);
inode = iget5_locked(s, key->on_disk_key.k_objectid,
reiserfs_find_actor, reiserfs_init_locked_inode,
(void *)(&args));
+ reiserfs_write_lock(s);
if (!inode)
return ERR_PTR(-ENOMEM);
@@ -3061,13 +3064,14 @@ static ssize_t reiserfs_direct_IO(int rw, struct kiocb *iocb,
int reiserfs_setattr(struct dentry *dentry, struct iattr *attr)
{
struct inode *inode = dentry->d_inode;
- int error;
unsigned int ia_valid;
+ int depth;
+ int error;
/* must be turned off for recursive notify_change calls */
ia_valid = attr->ia_valid &= ~(ATTR_KILL_SUID|ATTR_KILL_SGID);
- reiserfs_write_lock(inode->i_sb);
+ depth = reiserfs_write_lock_once(inode->i_sb);
if (attr->ia_valid & ATTR_SIZE) {
/* version 2 items will be caught by the s_maxbytes check
** done for us in vmtruncate
@@ -3148,8 +3152,17 @@ int reiserfs_setattr(struct dentry *dentry, struct iattr *attr)
journal_end(&th, inode->i_sb, jbegin_count);
}
}
- if (!error)
+ if (!error) {
+ /*
+ * Relax the lock here, as it might truncate the
+ * inode pages and wait for inode pages locks.
+ * To release such page lock, the owner needs the
+ * reiserfs lock
+ */
+ reiserfs_write_unlock_once(inode->i_sb, depth);
error = inode_setattr(inode, attr);
+ depth = reiserfs_write_lock_once(inode->i_sb);
+ }
}
if (!error && reiserfs_posixacl(inode->i_sb)) {
@@ -3158,7 +3171,8 @@ int reiserfs_setattr(struct dentry *dentry, struct iattr *attr)
}
out:
- reiserfs_write_unlock(inode->i_sb);
+ reiserfs_write_unlock_once(inode->i_sb, depth);
+
return error;
}
diff --git a/fs/reiserfs/ioctl.c b/fs/reiserfs/ioctl.c
index ace77451ceb1..f53505de0712 100644
--- a/fs/reiserfs/ioctl.c
+++ b/fs/reiserfs/ioctl.c
@@ -104,9 +104,10 @@ setflags_out:
err = put_user(inode->i_generation, (int __user *)arg);
break;
case REISERFS_IOC_SETVERSION:
- if (!is_owner_or_cap(inode))
+ if (!is_owner_or_cap(inode)) {
err = -EPERM;
break;
+ }
err = mnt_want_write(filp->f_path.mnt);
if (err)
break;
diff --git a/fs/reiserfs/journal.c b/fs/reiserfs/journal.c
index 2f8a7e7b8dab..ba98546fabbd 100644
--- a/fs/reiserfs/journal.c
+++ b/fs/reiserfs/journal.c
@@ -2009,10 +2009,11 @@ static int do_journal_release(struct reiserfs_transaction_handle *th,
destroy_workqueue(commit_wq);
commit_wq = NULL;
}
- reiserfs_write_lock(sb);
free_journal_ram(sb);
+ reiserfs_write_lock(sb);
+
return 0;
}
@@ -2758,11 +2759,18 @@ int journal_init(struct super_block *sb, const char *j_dev_name,
struct reiserfs_journal *journal;
struct reiserfs_journal_list *jl;
char b[BDEVNAME_SIZE];
+ int ret;
+ /*
+ * Unlock here to avoid various RECLAIM-FS-ON <-> IN-RECLAIM-FS
+ * dependency inversion warnings.
+ */
+ reiserfs_write_unlock(sb);
journal = SB_JOURNAL(sb) = vmalloc(sizeof(struct reiserfs_journal));
if (!journal) {
reiserfs_warning(sb, "journal-1256",
"unable to get memory for journal structure");
+ reiserfs_write_lock(sb);
return 1;
}
memset(journal, 0, sizeof(struct reiserfs_journal));
@@ -2771,10 +2779,12 @@ int journal_init(struct super_block *sb, const char *j_dev_name,
INIT_LIST_HEAD(&journal->j_working_list);
INIT_LIST_HEAD(&journal->j_journal_list);
journal->j_persistent_trans = 0;
- if (reiserfs_allocate_list_bitmaps(sb,
- journal->j_list_bitmap,
- reiserfs_bmap_count(sb)))
+ ret = reiserfs_allocate_list_bitmaps(sb, journal->j_list_bitmap,
+ reiserfs_bmap_count(sb));
+ reiserfs_write_lock(sb);
+ if (ret)
goto free_and_return;
+
allocate_bitmap_nodes(sb);
/* reserved for journal area support */
@@ -2903,7 +2913,9 @@ int journal_init(struct super_block *sb, const char *j_dev_name,
journal->j_mount_id = 10;
journal->j_state = 0;
atomic_set(&(journal->j_jlock), 0);
+ reiserfs_write_unlock(sb);
journal->j_cnode_free_list = allocate_cnodes(num_cnodes);
+ reiserfs_write_lock(sb);
journal->j_cnode_free_orig = journal->j_cnode_free_list;
journal->j_cnode_free = journal->j_cnode_free_list ? num_cnodes : 0;
journal->j_cnode_used = 0;
diff --git a/fs/reiserfs/lock.c b/fs/reiserfs/lock.c
index ee2cfc0fd8a7..b87aa2c1afc1 100644
--- a/fs/reiserfs/lock.c
+++ b/fs/reiserfs/lock.c
@@ -86,3 +86,12 @@ void reiserfs_check_lock_depth(struct super_block *sb, char *caller)
reiserfs_panic(sb, "%s called without kernel lock held %d",
caller);
}
+
+#ifdef CONFIG_REISERFS_CHECK
+void reiserfs_lock_check_recursive(struct super_block *sb)
+{
+ struct reiserfs_sb_info *sb_i = REISERFS_SB(sb);
+
+ WARN_ONCE((sb_i->lock_depth > 0), "Unwanted recursive reiserfs lock!\n");
+}
+#endif
diff --git a/fs/reiserfs/namei.c b/fs/reiserfs/namei.c
index e296ff72a6cc..9d4dcf0b07cb 100644
--- a/fs/reiserfs/namei.c
+++ b/fs/reiserfs/namei.c
@@ -921,6 +921,7 @@ static int reiserfs_unlink(struct inode *dir, struct dentry *dentry)
struct reiserfs_transaction_handle th;
int jbegin_count;
unsigned long savelink;
+ int depth;
inode = dentry->d_inode;
@@ -932,7 +933,7 @@ static int reiserfs_unlink(struct inode *dir, struct dentry *dentry)
JOURNAL_PER_BALANCE_CNT * 2 + 2 +
4 * REISERFS_QUOTA_TRANS_BLOCKS(dir->i_sb);
- reiserfs_write_lock(dir->i_sb);
+ depth = reiserfs_write_lock_once(dir->i_sb);
retval = journal_begin(&th, dir->i_sb, jbegin_count);
if (retval)
goto out_unlink;
@@ -993,7 +994,7 @@ static int reiserfs_unlink(struct inode *dir, struct dentry *dentry)
retval = journal_end(&th, dir->i_sb, jbegin_count);
reiserfs_check_path(&path);
- reiserfs_write_unlock(dir->i_sb);
+ reiserfs_write_unlock_once(dir->i_sb, depth);
return retval;
end_unlink:
@@ -1003,7 +1004,7 @@ static int reiserfs_unlink(struct inode *dir, struct dentry *dentry)
if (err)
retval = err;
out_unlink:
- reiserfs_write_unlock(dir->i_sb);
+ reiserfs_write_unlock_once(dir->i_sb, depth);
return retval;
}
diff --git a/fs/reiserfs/xattr.c b/fs/reiserfs/xattr.c
index 8c7033a8b67e..81f09fab8ae4 100644
--- a/fs/reiserfs/xattr.c
+++ b/fs/reiserfs/xattr.c
@@ -83,7 +83,8 @@ static int xattr_unlink(struct inode *dir, struct dentry *dentry)
BUG_ON(!mutex_is_locked(&dir->i_mutex));
vfs_dq_init(dir);
- mutex_lock_nested(&dentry->d_inode->i_mutex, I_MUTEX_CHILD);
+ reiserfs_mutex_lock_nested_safe(&dentry->d_inode->i_mutex,
+ I_MUTEX_CHILD, dir->i_sb);
error = dir->i_op->unlink(dir, dentry);
mutex_unlock(&dentry->d_inode->i_mutex);
@@ -98,7 +99,8 @@ static int xattr_rmdir(struct inode *dir, struct dentry *dentry)
BUG_ON(!mutex_is_locked(&dir->i_mutex));
vfs_dq_init(dir);
- mutex_lock_nested(&dentry->d_inode->i_mutex, I_MUTEX_CHILD);
+ reiserfs_mutex_lock_nested_safe(&dentry->d_inode->i_mutex,
+ I_MUTEX_CHILD, dir->i_sb);
dentry_unhash(dentry);
error = dir->i_op->rmdir(dir, dentry);
if (!error)
@@ -235,16 +237,22 @@ static int reiserfs_for_each_xattr(struct inode *inode,
if (IS_PRIVATE(inode) || get_inode_sd_version(inode) == STAT_DATA_V1)
return 0;
+ reiserfs_write_unlock(inode->i_sb);
dir = open_xa_dir(inode, XATTR_REPLACE);
if (IS_ERR(dir)) {
err = PTR_ERR(dir);
+ reiserfs_write_lock(inode->i_sb);
goto out;
} else if (!dir->d_inode) {
err = 0;
+ reiserfs_write_lock(inode->i_sb);
goto out_dir;
}
mutex_lock_nested(&dir->d_inode->i_mutex, I_MUTEX_XATTR);
+
+ reiserfs_write_lock(inode->i_sb);
+
buf.xadir = dir;
err = reiserfs_readdir_dentry(dir, &buf, fill_with_dentries, &pos);
while ((err == 0 || err == -ENOSPC) && buf.count) {
@@ -283,8 +291,9 @@ static int reiserfs_for_each_xattr(struct inode *inode,
err = journal_begin(&th, inode->i_sb, blocks);
if (!err) {
int jerror;
- mutex_lock_nested(&dir->d_parent->d_inode->i_mutex,
- I_MUTEX_XATTR);
+ reiserfs_mutex_lock_nested_safe(
+ &dir->d_parent->d_inode->i_mutex,
+ I_MUTEX_XATTR, inode->i_sb);
err = action(dir, data);
jerror = journal_end(&th, inode->i_sb, blocks);
mutex_unlock(&dir->d_parent->d_inode->i_mutex);
@@ -443,7 +452,9 @@ static int lookup_and_delete_xattr(struct inode *inode, const char *name)
}
if (dentry->d_inode) {
+ reiserfs_write_lock(inode->i_sb);
err = xattr_unlink(xadir->d_inode, dentry);
+ reiserfs_write_unlock(inode->i_sb);
update_ctime(inode);
}
@@ -477,15 +488,24 @@ reiserfs_xattr_set_handle(struct reiserfs_transaction_handle *th,
if (get_inode_sd_version(inode) == STAT_DATA_V1)
return -EOPNOTSUPP;
- if (!buffer)
- return lookup_and_delete_xattr(inode, name);
+ reiserfs_write_unlock(inode->i_sb);
+
+ if (!buffer) {
+ err = lookup_and_delete_xattr(inode, name);
+ reiserfs_write_lock(inode->i_sb);
+ return err;
+ }
dentry = xattr_lookup(inode, name, flags);
- if (IS_ERR(dentry))
+ if (IS_ERR(dentry)) {
+ reiserfs_write_lock(inode->i_sb);
return PTR_ERR(dentry);
+ }
down_write(&REISERFS_I(inode)->i_xattr_sem);
+ reiserfs_write_lock(inode->i_sb);
+
xahash = xattr_hash(buffer, buffer_size);
while (buffer_pos < buffer_size || buffer_pos == 0) {
size_t chunk;
@@ -540,8 +560,12 @@ reiserfs_xattr_set_handle(struct reiserfs_transaction_handle *th,
.ia_size = buffer_size,
.ia_valid = ATTR_SIZE | ATTR_CTIME,
};
+
+ reiserfs_write_unlock(inode->i_sb);
mutex_lock_nested(&dentry->d_inode->i_mutex, I_MUTEX_XATTR);
down_write(&dentry->d_inode->i_alloc_sem);
+ reiserfs_write_lock(inode->i_sb);
+
err = reiserfs_setattr(dentry, &newattrs);
up_write(&dentry->d_inode->i_alloc_sem);
mutex_unlock(&dentry->d_inode->i_mutex);
diff --git a/fs/reiserfs/xattr_acl.c b/fs/reiserfs/xattr_acl.c
index cc32e6ada67b..dd20a7883f0f 100644
--- a/fs/reiserfs/xattr_acl.c
+++ b/fs/reiserfs/xattr_acl.c
@@ -455,7 +455,9 @@ int reiserfs_acl_chmod(struct inode *inode)
return 0;
}
+ reiserfs_write_unlock(inode->i_sb);
acl = reiserfs_get_acl(inode, ACL_TYPE_ACCESS);
+ reiserfs_write_lock(inode->i_sb);
if (!acl)
return 0;
if (IS_ERR(acl))
diff --git a/fs/romfs/super.c b/fs/romfs/super.c
index c117fa80d1e9..42d213546894 100644
--- a/fs/romfs/super.c
+++ b/fs/romfs/super.c
@@ -544,6 +544,7 @@ error:
error_rsb_inval:
ret = -EINVAL;
error_rsb:
+ kfree(rsb);
return ret;
}
diff --git a/fs/sysfs/dir.c b/fs/sysfs/dir.c
index f05f2303a8b8..699f371b9f12 100644
--- a/fs/sysfs/dir.c
+++ b/fs/sysfs/dir.c
@@ -106,8 +106,10 @@ static struct sysfs_dirent *sysfs_get_active(struct sysfs_dirent *sd)
return NULL;
t = atomic_cmpxchg(&sd->s_active, v, v + 1);
- if (likely(t == v))
+ if (likely(t == v)) {
+ rwsem_acquire_read(&sd->dep_map, 0, 1, _RET_IP_);
return sd;
+ }
if (t < 0)
return NULL;
@@ -130,6 +132,7 @@ static void sysfs_put_active(struct sysfs_dirent *sd)
if (unlikely(!sd))
return;
+ rwsem_release(&sd->dep_map, 1, _RET_IP_);
v = atomic_dec_return(&sd->s_active);
if (likely(v != SD_DEACTIVATED_BIAS))
return;
@@ -194,15 +197,21 @@ static void sysfs_deactivate(struct sysfs_dirent *sd)
BUG_ON(sd->s_sibling || !(sd->s_flags & SYSFS_FLAG_REMOVED));
sd->s_sibling = (void *)&wait;
+ rwsem_acquire(&sd->dep_map, 0, 0, _RET_IP_);
/* atomic_add_return() is a mb(), put_active() will always see
* the updated sd->s_sibling.
*/
v = atomic_add_return(SD_DEACTIVATED_BIAS, &sd->s_active);
- if (v != SD_DEACTIVATED_BIAS)
+ if (v != SD_DEACTIVATED_BIAS) {
+ lock_contended(&sd->dep_map, _RET_IP_);
wait_for_completion(&wait);
+ }
sd->s_sibling = NULL;
+
+ lock_acquired(&sd->dep_map, _RET_IP_);
+ rwsem_release(&sd->dep_map, 1, _RET_IP_);
}
static int sysfs_alloc_ino(ino_t *pino)
@@ -345,6 +354,7 @@ struct sysfs_dirent *sysfs_new_dirent(const char *name, umode_t mode, int type)
atomic_set(&sd->s_count, 1);
atomic_set(&sd->s_active, 0);
+ sysfs_dirent_init_lockdep(sd);
sd->s_name = name;
sd->s_mode = mode;
diff --git a/fs/sysfs/inode.c b/fs/sysfs/inode.c
index 220b758523ae..6a06a1d1ea7b 100644
--- a/fs/sysfs/inode.c
+++ b/fs/sysfs/inode.c
@@ -81,24 +81,23 @@ int sysfs_sd_setattr(struct sysfs_dirent *sd, struct iattr * iattr)
if (!sd_attrs)
return -ENOMEM;
sd->s_iattr = sd_attrs;
- } else {
- /* attributes were changed at least once in past */
- iattrs = &sd_attrs->ia_iattr;
-
- if (ia_valid & ATTR_UID)
- iattrs->ia_uid = iattr->ia_uid;
- if (ia_valid & ATTR_GID)
- iattrs->ia_gid = iattr->ia_gid;
- if (ia_valid & ATTR_ATIME)
- iattrs->ia_atime = iattr->ia_atime;
- if (ia_valid & ATTR_MTIME)
- iattrs->ia_mtime = iattr->ia_mtime;
- if (ia_valid & ATTR_CTIME)
- iattrs->ia_ctime = iattr->ia_ctime;
- if (ia_valid & ATTR_MODE) {
- umode_t mode = iattr->ia_mode;
- iattrs->ia_mode = sd->s_mode = mode;
- }
+ }
+ /* attributes were changed at least once in past */
+ iattrs = &sd_attrs->ia_iattr;
+
+ if (ia_valid & ATTR_UID)
+ iattrs->ia_uid = iattr->ia_uid;
+ if (ia_valid & ATTR_GID)
+ iattrs->ia_gid = iattr->ia_gid;
+ if (ia_valid & ATTR_ATIME)
+ iattrs->ia_atime = iattr->ia_atime;
+ if (ia_valid & ATTR_MTIME)
+ iattrs->ia_mtime = iattr->ia_mtime;
+ if (ia_valid & ATTR_CTIME)
+ iattrs->ia_ctime = iattr->ia_ctime;
+ if (ia_valid & ATTR_MODE) {
+ umode_t mode = iattr->ia_mode;
+ iattrs->ia_mode = sd->s_mode = mode;
}
return 0;
}
diff --git a/fs/sysfs/sysfs.h b/fs/sysfs/sysfs.h
index ca52e7b9d8f8..cdd9377a6e06 100644
--- a/fs/sysfs/sysfs.h
+++ b/fs/sysfs/sysfs.h
@@ -8,6 +8,7 @@
* This file is released under the GPLv2.
*/
+#include <linux/lockdep.h>
#include <linux/fs.h>
struct sysfs_open_dirent;
@@ -50,6 +51,9 @@ struct sysfs_inode_attrs {
struct sysfs_dirent {
atomic_t s_count;
atomic_t s_active;
+#ifdef CONFIG_DEBUG_LOCK_ALLOC
+ struct lockdep_map dep_map;
+#endif
struct sysfs_dirent *s_parent;
struct sysfs_dirent *s_sibling;
const char *s_name;
@@ -84,6 +88,17 @@ static inline unsigned int sysfs_type(struct sysfs_dirent *sd)
return sd->s_flags & SYSFS_TYPE_MASK;
}
+#ifdef CONFIG_DEBUG_LOCK_ALLOC
+#define sysfs_dirent_init_lockdep(sd) \
+do { \
+ static struct lock_class_key __key; \
+ \
+ lockdep_init_map(&sd->dep_map, "s_active", &__key, 0); \
+} while(0)
+#else
+#define sysfs_dirent_init_lockdep(sd) do {} while(0)
+#endif
+
/*
* Context structure to be used while adding/removing nodes.
*/
diff --git a/fs/ubifs/gc.c b/fs/ubifs/gc.c
index 618c2701d3a7..e5a3d8e96bb7 100644
--- a/fs/ubifs/gc.c
+++ b/fs/ubifs/gc.c
@@ -54,6 +54,7 @@
*/
#include <linux/pagemap.h>
+#include <linux/list_sort.h>
#include "ubifs.h"
/*
@@ -108,101 +109,6 @@ static int switch_gc_head(struct ubifs_info *c)
}
/**
- * list_sort - sort a list.
- * @priv: private data, passed to @cmp
- * @head: the list to sort
- * @cmp: the elements comparison function
- *
- * This function has been implemented by Mark J Roberts <mjr@znex.org>. It
- * implements "merge sort" which has O(nlog(n)) complexity. The list is sorted
- * in ascending order.
- *
- * The comparison function @cmp is supposed to return a negative value if @a is
- * than @b, and a positive value if @a is greater than @b. If @a and @b are
- * equivalent, then it does not matter what this function returns.
- */
-static void list_sort(void *priv, struct list_head *head,
- int (*cmp)(void *priv, struct list_head *a,
- struct list_head *b))
-{
- struct list_head *p, *q, *e, *list, *tail, *oldhead;
- int insize, nmerges, psize, qsize, i;
-
- if (list_empty(head))
- return;
-
- list = head->next;
- list_del(head);
- insize = 1;
- for (;;) {
- p = oldhead = list;
- list = tail = NULL;
- nmerges = 0;
-
- while (p) {
- nmerges++;
- q = p;
- psize = 0;
- for (i = 0; i < insize; i++) {
- psize++;
- q = q->next == oldhead ? NULL : q->next;
- if (!q)
- break;
- }
-
- qsize = insize;
- while (psize > 0 || (qsize > 0 && q)) {
- if (!psize) {
- e = q;
- q = q->next;
- qsize--;
- if (q == oldhead)
- q = NULL;
- } else if (!qsize || !q) {
- e = p;
- p = p->next;
- psize--;
- if (p == oldhead)
- p = NULL;
- } else if (cmp(priv, p, q) <= 0) {
- e = p;
- p = p->next;
- psize--;
- if (p == oldhead)
- p = NULL;
- } else {
- e = q;
- q = q->next;
- qsize--;
- if (q == oldhead)
- q = NULL;
- }
- if (tail)
- tail->next = e;
- else
- list = e;
- e->prev = tail;
- tail = e;
- }
- p = q;
- }
-
- tail->next = list;
- list->prev = tail;
-
- if (nmerges <= 1)
- break;
-
- insize *= 2;
- }
-
- head->next = list;
- head->prev = list->prev;
- list->prev->next = head;
- list->prev = head;
-}
-
-/**
* data_nodes_cmp - compare 2 data nodes.
* @priv: UBIFS file-system description object
* @a: first data node
diff --git a/fs/xfs/linux-2.6/xfs_acl.c b/fs/xfs/linux-2.6/xfs_acl.c
index 2512125dfa7c..883ca5ab8af5 100644
--- a/fs/xfs/linux-2.6/xfs_acl.c
+++ b/fs/xfs/linux-2.6/xfs_acl.c
@@ -251,8 +251,9 @@ xfs_set_mode(struct inode *inode, mode_t mode)
if (mode != inode->i_mode) {
struct iattr iattr;
- iattr.ia_valid = ATTR_MODE;
+ iattr.ia_valid = ATTR_MODE | ATTR_CTIME;
iattr.ia_mode = mode;
+ iattr.ia_ctime = current_fs_time(inode->i_sb);
error = -xfs_setattr(XFS_I(inode), &iattr, XFS_ATTR_NOACL);
}
diff --git a/fs/xfs/linux-2.6/xfs_super.c b/fs/xfs/linux-2.6/xfs_super.c
index 09783cc444ac..77414db10dc2 100644
--- a/fs/xfs/linux-2.6/xfs_super.c
+++ b/fs/xfs/linux-2.6/xfs_super.c
@@ -954,16 +954,14 @@ xfs_fs_destroy_inode(
ASSERT_ALWAYS(!xfs_iflags_test(ip, XFS_IRECLAIM));
/*
- * If we have nothing to flush with this inode then complete the
- * teardown now, otherwise delay the flush operation.
+ * We always use background reclaim here because even if the
+ * inode is clean, it still may be under IO and hence we have
+ * to take the flush lock. The background reclaim path handles
+ * this more efficiently than we can here, so simply let background
+ * reclaim tear down all inodes.
*/
- if (!xfs_inode_clean(ip)) {
- xfs_inode_set_reclaim_tag(ip);
- return;
- }
-
out_reclaim:
- xfs_ireclaim(ip);
+ xfs_inode_set_reclaim_tag(ip);
}
/*
diff --git a/fs/xfs/linux-2.6/xfs_sync.c b/fs/xfs/linux-2.6/xfs_sync.c
index 6fed97a8cd3e..1f5e4bb5e970 100644
--- a/fs/xfs/linux-2.6/xfs_sync.c
+++ b/fs/xfs/linux-2.6/xfs_sync.c
@@ -65,7 +65,6 @@ xfs_inode_ag_lookup(
* as the tree is sparse and a gang lookup walks to find
* the number of objects requested.
*/
- read_lock(&pag->pag_ici_lock);
if (tag == XFS_ICI_NO_TAG) {
nr_found = radix_tree_gang_lookup(&pag->pag_ici_root,
(void **)&ip, *first_index, 1);
@@ -74,7 +73,7 @@ xfs_inode_ag_lookup(
(void **)&ip, *first_index, 1, tag);
}
if (!nr_found)
- goto unlock;
+ return NULL;
/*
* Update the index for the next lookup. Catch overflows
@@ -84,13 +83,8 @@ xfs_inode_ag_lookup(
*/
*first_index = XFS_INO_TO_AGINO(mp, ip->i_ino + 1);
if (*first_index < XFS_INO_TO_AGINO(mp, ip->i_ino))
- goto unlock;
-
+ return NULL;
return ip;
-
-unlock:
- read_unlock(&pag->pag_ici_lock);
- return NULL;
}
STATIC int
@@ -100,7 +94,8 @@ xfs_inode_ag_walk(
int (*execute)(struct xfs_inode *ip,
struct xfs_perag *pag, int flags),
int flags,
- int tag)
+ int tag,
+ int exclusive)
{
struct xfs_perag *pag = &mp->m_perag[ag];
uint32_t first_index;
@@ -114,10 +109,20 @@ restart:
int error = 0;
xfs_inode_t *ip;
+ if (exclusive)
+ write_lock(&pag->pag_ici_lock);
+ else
+ read_lock(&pag->pag_ici_lock);
ip = xfs_inode_ag_lookup(mp, pag, &first_index, tag);
- if (!ip)
+ if (!ip) {
+ if (exclusive)
+ write_unlock(&pag->pag_ici_lock);
+ else
+ read_unlock(&pag->pag_ici_lock);
break;
+ }
+ /* execute releases pag->pag_ici_lock */
error = execute(ip, pag, flags);
if (error == EAGAIN) {
skipped++;
@@ -125,9 +130,8 @@ restart:
}
if (error)
last_error = error;
- /*
- * bail out if the filesystem is corrupted.
- */
+
+ /* bail out if the filesystem is corrupted. */
if (error == EFSCORRUPTED)
break;
@@ -148,7 +152,8 @@ xfs_inode_ag_iterator(
int (*execute)(struct xfs_inode *ip,
struct xfs_perag *pag, int flags),
int flags,
- int tag)
+ int tag,
+ int exclusive)
{
int error = 0;
int last_error = 0;
@@ -157,7 +162,8 @@ xfs_inode_ag_iterator(
for (ag = 0; ag < mp->m_sb.sb_agcount; ag++) {
if (!mp->m_perag[ag].pag_ici_init)
continue;
- error = xfs_inode_ag_walk(mp, ag, execute, flags, tag);
+ error = xfs_inode_ag_walk(mp, ag, execute, flags, tag,
+ exclusive);
if (error) {
last_error = error;
if (error == EFSCORRUPTED)
@@ -174,30 +180,31 @@ xfs_sync_inode_valid(
struct xfs_perag *pag)
{
struct inode *inode = VFS_I(ip);
+ int error = EFSCORRUPTED;
/* nothing to sync during shutdown */
- if (XFS_FORCED_SHUTDOWN(ip->i_mount)) {
- read_unlock(&pag->pag_ici_lock);
- return EFSCORRUPTED;
- }
+ if (XFS_FORCED_SHUTDOWN(ip->i_mount))
+ goto out_unlock;
- /*
- * If we can't get a reference on the inode, it must be in reclaim.
- * Leave it for the reclaim code to flush. Also avoid inodes that
- * haven't been fully initialised.
- */
- if (!igrab(inode)) {
- read_unlock(&pag->pag_ici_lock);
- return ENOENT;
- }
- read_unlock(&pag->pag_ici_lock);
+ /* avoid new or reclaimable inodes. Leave for reclaim code to flush */
+ error = ENOENT;
+ if (xfs_iflags_test(ip, XFS_INEW | XFS_IRECLAIMABLE | XFS_IRECLAIM))
+ goto out_unlock;
- if (is_bad_inode(inode) || xfs_iflags_test(ip, XFS_INEW)) {
+ /* If we can't grab the inode, it must on it's way to reclaim. */
+ if (!igrab(inode))
+ goto out_unlock;
+
+ if (is_bad_inode(inode)) {
IRELE(ip);
- return ENOENT;
+ goto out_unlock;
}
- return 0;
+ /* inode is valid */
+ error = 0;
+out_unlock:
+ read_unlock(&pag->pag_ici_lock);
+ return error;
}
STATIC int
@@ -282,7 +289,7 @@ xfs_sync_data(
ASSERT((flags & ~(SYNC_TRYLOCK|SYNC_WAIT)) == 0);
error = xfs_inode_ag_iterator(mp, xfs_sync_inode_data, flags,
- XFS_ICI_NO_TAG);
+ XFS_ICI_NO_TAG, 0);
if (error)
return XFS_ERROR(error);
@@ -304,7 +311,7 @@ xfs_sync_attr(
ASSERT((flags & ~SYNC_WAIT) == 0);
return xfs_inode_ag_iterator(mp, xfs_sync_inode_attr, flags,
- XFS_ICI_NO_TAG);
+ XFS_ICI_NO_TAG, 0);
}
STATIC int
@@ -664,60 +671,6 @@ xfs_syncd_stop(
kthread_stop(mp->m_sync_task);
}
-STATIC int
-xfs_reclaim_inode(
- xfs_inode_t *ip,
- int sync_mode)
-{
- xfs_perag_t *pag = xfs_get_perag(ip->i_mount, ip->i_ino);
-
- /* The hash lock here protects a thread in xfs_iget_core from
- * racing with us on linking the inode back with a vnode.
- * Once we have the XFS_IRECLAIM flag set it will not touch
- * us.
- */
- write_lock(&pag->pag_ici_lock);
- spin_lock(&ip->i_flags_lock);
- if (__xfs_iflags_test(ip, XFS_IRECLAIM) ||
- !__xfs_iflags_test(ip, XFS_IRECLAIMABLE)) {
- spin_unlock(&ip->i_flags_lock);
- write_unlock(&pag->pag_ici_lock);
- return -EAGAIN;
- }
- __xfs_iflags_set(ip, XFS_IRECLAIM);
- spin_unlock(&ip->i_flags_lock);
- write_unlock(&pag->pag_ici_lock);
- xfs_put_perag(ip->i_mount, pag);
-
- /*
- * If the inode is still dirty, then flush it out. If the inode
- * is not in the AIL, then it will be OK to flush it delwri as
- * long as xfs_iflush() does not keep any references to the inode.
- * We leave that decision up to xfs_iflush() since it has the
- * knowledge of whether it's OK to simply do a delwri flush of
- * the inode or whether we need to wait until the inode is
- * pulled from the AIL.
- * We get the flush lock regardless, though, just to make sure
- * we don't free it while it is being flushed.
- */
- xfs_ilock(ip, XFS_ILOCK_EXCL);
- xfs_iflock(ip);
-
- /*
- * In the case of a forced shutdown we rely on xfs_iflush() to
- * wait for the inode to be unpinned before returning an error.
- */
- if (!is_bad_inode(VFS_I(ip)) && xfs_iflush(ip, sync_mode) == 0) {
- /* synchronize with xfs_iflush_done */
- xfs_iflock(ip);
- xfs_ifunlock(ip);
- }
-
- xfs_iunlock(ip, XFS_ILOCK_EXCL);
- xfs_ireclaim(ip);
- return 0;
-}
-
void
__xfs_inode_set_reclaim_tag(
struct xfs_perag *pag,
@@ -760,19 +713,55 @@ __xfs_inode_clear_reclaim_tag(
}
STATIC int
-xfs_reclaim_inode_now(
+xfs_reclaim_inode(
struct xfs_inode *ip,
struct xfs_perag *pag,
- int flags)
+ int sync_mode)
{
- /* ignore if already under reclaim */
- if (xfs_iflags_test(ip, XFS_IRECLAIM)) {
- read_unlock(&pag->pag_ici_lock);
+ /*
+ * The radix tree lock here protects a thread in xfs_iget from racing
+ * with us starting reclaim on the inode. Once we have the
+ * XFS_IRECLAIM flag set it will not touch us.
+ */
+ spin_lock(&ip->i_flags_lock);
+ ASSERT_ALWAYS(__xfs_iflags_test(ip, XFS_IRECLAIMABLE));
+ if (__xfs_iflags_test(ip, XFS_IRECLAIM)) {
+ /* ignore as it is already under reclaim */
+ spin_unlock(&ip->i_flags_lock);
+ write_unlock(&pag->pag_ici_lock);
return 0;
}
- read_unlock(&pag->pag_ici_lock);
+ __xfs_iflags_set(ip, XFS_IRECLAIM);
+ spin_unlock(&ip->i_flags_lock);
+ write_unlock(&pag->pag_ici_lock);
- return xfs_reclaim_inode(ip, flags);
+ /*
+ * If the inode is still dirty, then flush it out. If the inode
+ * is not in the AIL, then it will be OK to flush it delwri as
+ * long as xfs_iflush() does not keep any references to the inode.
+ * We leave that decision up to xfs_iflush() since it has the
+ * knowledge of whether it's OK to simply do a delwri flush of
+ * the inode or whether we need to wait until the inode is
+ * pulled from the AIL.
+ * We get the flush lock regardless, though, just to make sure
+ * we don't free it while it is being flushed.
+ */
+ xfs_ilock(ip, XFS_ILOCK_EXCL);
+ xfs_iflock(ip);
+
+ /*
+ * In the case of a forced shutdown we rely on xfs_iflush() to
+ * wait for the inode to be unpinned before returning an error.
+ */
+ if (!is_bad_inode(VFS_I(ip)) && xfs_iflush(ip, sync_mode) == 0) {
+ /* synchronize with xfs_iflush_done */
+ xfs_iflock(ip);
+ xfs_ifunlock(ip);
+ }
+
+ xfs_iunlock(ip, XFS_ILOCK_EXCL);
+ xfs_ireclaim(ip);
+ return 0;
}
int
@@ -780,6 +769,6 @@ xfs_reclaim_inodes(
xfs_mount_t *mp,
int mode)
{
- return xfs_inode_ag_iterator(mp, xfs_reclaim_inode_now, mode,
- XFS_ICI_RECLAIM_TAG);
+ return xfs_inode_ag_iterator(mp, xfs_reclaim_inode, mode,
+ XFS_ICI_RECLAIM_TAG, 1);
}
diff --git a/fs/xfs/linux-2.6/xfs_sync.h b/fs/xfs/linux-2.6/xfs_sync.h
index a500b4d91835..ea932b43335d 100644
--- a/fs/xfs/linux-2.6/xfs_sync.h
+++ b/fs/xfs/linux-2.6/xfs_sync.h
@@ -54,6 +54,6 @@ void __xfs_inode_clear_reclaim_tag(struct xfs_mount *mp, struct xfs_perag *pag,
int xfs_sync_inode_valid(struct xfs_inode *ip, struct xfs_perag *pag);
int xfs_inode_ag_iterator(struct xfs_mount *mp,
int (*execute)(struct xfs_inode *ip, struct xfs_perag *pag, int flags),
- int flags, int tag);
+ int flags, int tag, int write_lock);
#endif
diff --git a/fs/xfs/linux-2.6/xfs_trace.h b/fs/xfs/linux-2.6/xfs_trace.h
index c40834bdee58..c22a608321a3 100644
--- a/fs/xfs/linux-2.6/xfs_trace.h
+++ b/fs/xfs/linux-2.6/xfs_trace.h
@@ -33,51 +33,55 @@ struct xfs_dquot;
struct xlog_ticket;
struct log;
+DECLARE_EVENT_CLASS(xfs_attr_list_class,
+ TP_PROTO(struct xfs_attr_list_context *ctx),
+ TP_ARGS(ctx),
+ TP_STRUCT__entry(
+ __field(dev_t, dev)
+ __field(xfs_ino_t, ino)
+ __field(u32, hashval)
+ __field(u32, blkno)
+ __field(u32, offset)
+ __field(void *, alist)
+ __field(int, bufsize)
+ __field(int, count)
+ __field(int, firstu)
+ __field(int, dupcnt)
+ __field(int, flags)
+ ),
+ TP_fast_assign(
+ __entry->dev = VFS_I(ctx->dp)->i_sb->s_dev;
+ __entry->ino = ctx->dp->i_ino;
+ __entry->hashval = ctx->cursor->hashval;
+ __entry->blkno = ctx->cursor->blkno;
+ __entry->offset = ctx->cursor->offset;
+ __entry->alist = ctx->alist;
+ __entry->bufsize = ctx->bufsize;
+ __entry->count = ctx->count;
+ __entry->firstu = ctx->firstu;
+ __entry->flags = ctx->flags;
+ ),
+ TP_printk("dev %d:%d ino 0x%llx cursor h/b/o 0x%x/0x%x/%u dupcnt %u "
+ "alist 0x%p size %u count %u firstu %u flags %d %s",
+ MAJOR(__entry->dev), MINOR(__entry->dev),
+ __entry->ino,
+ __entry->hashval,
+ __entry->blkno,
+ __entry->offset,
+ __entry->dupcnt,
+ __entry->alist,
+ __entry->bufsize,
+ __entry->count,
+ __entry->firstu,
+ __entry->flags,
+ __print_flags(__entry->flags, "|", XFS_ATTR_FLAGS)
+ )
+)
+
#define DEFINE_ATTR_LIST_EVENT(name) \
-TRACE_EVENT(name, \
+DEFINE_EVENT(xfs_attr_list_class, name, \
TP_PROTO(struct xfs_attr_list_context *ctx), \
- TP_ARGS(ctx), \
- TP_STRUCT__entry( \
- __field(dev_t, dev) \
- __field(xfs_ino_t, ino) \
- __field(u32, hashval) \
- __field(u32, blkno) \
- __field(u32, offset) \
- __field(void *, alist) \
- __field(int, bufsize) \
- __field(int, count) \
- __field(int, firstu) \
- __field(int, dupcnt) \
- __field(int, flags) \
- ), \
- TP_fast_assign( \
- __entry->dev = VFS_I(ctx->dp)->i_sb->s_dev; \
- __entry->ino = ctx->dp->i_ino; \
- __entry->hashval = ctx->cursor->hashval; \
- __entry->blkno = ctx->cursor->blkno; \
- __entry->offset = ctx->cursor->offset; \
- __entry->alist = ctx->alist; \
- __entry->bufsize = ctx->bufsize; \
- __entry->count = ctx->count; \
- __entry->firstu = ctx->firstu; \
- __entry->flags = ctx->flags; \
- ), \
- TP_printk("dev %d:%d ino 0x%llx cursor h/b/o 0x%x/0x%x/%u dupcnt %u " \
- "alist 0x%p size %u count %u firstu %u flags %d %s", \
- MAJOR(__entry->dev), MINOR(__entry->dev), \
- __entry->ino, \
- __entry->hashval, \
- __entry->blkno, \
- __entry->offset, \
- __entry->dupcnt, \
- __entry->alist, \
- __entry->bufsize, \
- __entry->count, \
- __entry->firstu, \
- __entry->flags, \
- __print_flags(__entry->flags, "|", XFS_ATTR_FLAGS) \
- ) \
-)
+ TP_ARGS(ctx))
DEFINE_ATTR_LIST_EVENT(xfs_attr_list_sf);
DEFINE_ATTR_LIST_EVENT(xfs_attr_list_sf_all);
DEFINE_ATTR_LIST_EVENT(xfs_attr_list_leaf);
@@ -178,91 +182,99 @@ TRACE_EVENT(xfs_iext_insert,
(char *)__entry->caller_ip)
);
+DECLARE_EVENT_CLASS(xfs_bmap_class,
+ TP_PROTO(struct xfs_inode *ip, xfs_extnum_t idx, int state,
+ unsigned long caller_ip),
+ TP_ARGS(ip, idx, state, caller_ip),
+ TP_STRUCT__entry(
+ __field(dev_t, dev)
+ __field(xfs_ino_t, ino)
+ __field(xfs_extnum_t, idx)
+ __field(xfs_fileoff_t, startoff)
+ __field(xfs_fsblock_t, startblock)
+ __field(xfs_filblks_t, blockcount)
+ __field(xfs_exntst_t, state)
+ __field(int, bmap_state)
+ __field(unsigned long, caller_ip)
+ ),
+ TP_fast_assign(
+ struct xfs_ifork *ifp = (state & BMAP_ATTRFORK) ?
+ ip->i_afp : &ip->i_df;
+ struct xfs_bmbt_irec r;
+
+ xfs_bmbt_get_all(xfs_iext_get_ext(ifp, idx), &r);
+ __entry->dev = VFS_I(ip)->i_sb->s_dev;
+ __entry->ino = ip->i_ino;
+ __entry->idx = idx;
+ __entry->startoff = r.br_startoff;
+ __entry->startblock = r.br_startblock;
+ __entry->blockcount = r.br_blockcount;
+ __entry->state = r.br_state;
+ __entry->bmap_state = state;
+ __entry->caller_ip = caller_ip;
+ ),
+ TP_printk("dev %d:%d ino 0x%llx state %s idx %ld "
+ "offset %lld block %s count %lld flag %d caller %pf",
+ MAJOR(__entry->dev), MINOR(__entry->dev),
+ __entry->ino,
+ __print_flags(__entry->bmap_state, "|", XFS_BMAP_EXT_FLAGS),
+ (long)__entry->idx,
+ __entry->startoff,
+ xfs_fmtfsblock(__entry->startblock),
+ __entry->blockcount,
+ __entry->state,
+ (char *)__entry->caller_ip)
+)
+
#define DEFINE_BMAP_EVENT(name) \
-TRACE_EVENT(name, \
+DEFINE_EVENT(xfs_bmap_class, name, \
TP_PROTO(struct xfs_inode *ip, xfs_extnum_t idx, int state, \
unsigned long caller_ip), \
- TP_ARGS(ip, idx, state, caller_ip), \
- TP_STRUCT__entry( \
- __field(dev_t, dev) \
- __field(xfs_ino_t, ino) \
- __field(xfs_extnum_t, idx) \
- __field(xfs_fileoff_t, startoff) \
- __field(xfs_fsblock_t, startblock) \
- __field(xfs_filblks_t, blockcount) \
- __field(xfs_exntst_t, state) \
- __field(int, bmap_state) \
- __field(unsigned long, caller_ip) \
- ), \
- TP_fast_assign( \
- struct xfs_ifork *ifp = (state & BMAP_ATTRFORK) ? \
- ip->i_afp : &ip->i_df; \
- struct xfs_bmbt_irec r; \
- \
- xfs_bmbt_get_all(xfs_iext_get_ext(ifp, idx), &r); \
- __entry->dev = VFS_I(ip)->i_sb->s_dev; \
- __entry->ino = ip->i_ino; \
- __entry->idx = idx; \
- __entry->startoff = r.br_startoff; \
- __entry->startblock = r.br_startblock; \
- __entry->blockcount = r.br_blockcount; \
- __entry->state = r.br_state; \
- __entry->bmap_state = state; \
- __entry->caller_ip = caller_ip; \
- ), \
- TP_printk("dev %d:%d ino 0x%llx state %s idx %ld " \
- "offset %lld block %s count %lld flag %d caller %pf", \
- MAJOR(__entry->dev), MINOR(__entry->dev), \
- __entry->ino, \
- __print_flags(__entry->bmap_state, "|", XFS_BMAP_EXT_FLAGS), \
- (long)__entry->idx, \
- __entry->startoff, \
- xfs_fmtfsblock(__entry->startblock), \
- __entry->blockcount, \
- __entry->state, \
- (char *)__entry->caller_ip) \
-)
-
+ TP_ARGS(ip, idx, state, caller_ip))
DEFINE_BMAP_EVENT(xfs_iext_remove);
DEFINE_BMAP_EVENT(xfs_bmap_pre_update);
DEFINE_BMAP_EVENT(xfs_bmap_post_update);
DEFINE_BMAP_EVENT(xfs_extlist);
-#define DEFINE_BUF_EVENT(tname) \
-TRACE_EVENT(tname, \
- TP_PROTO(struct xfs_buf *bp, unsigned long caller_ip), \
- TP_ARGS(bp, caller_ip), \
- TP_STRUCT__entry( \
- __field(dev_t, dev) \
- __field(xfs_daddr_t, bno) \
- __field(size_t, buffer_length) \
- __field(int, hold) \
- __field(int, pincount) \
- __field(unsigned, lockval) \
- __field(unsigned, flags) \
- __field(unsigned long, caller_ip) \
- ), \
- TP_fast_assign( \
- __entry->dev = bp->b_target->bt_dev; \
- __entry->bno = bp->b_bn; \
- __entry->buffer_length = bp->b_buffer_length; \
- __entry->hold = atomic_read(&bp->b_hold); \
- __entry->pincount = atomic_read(&bp->b_pin_count); \
- __entry->lockval = xfs_buf_lock_value(bp); \
- __entry->flags = bp->b_flags; \
- __entry->caller_ip = caller_ip; \
- ), \
- TP_printk("dev %d:%d bno 0x%llx len 0x%zx hold %d pincount %d " \
- "lock %d flags %s caller %pf", \
- MAJOR(__entry->dev), MINOR(__entry->dev), \
- (unsigned long long)__entry->bno, \
- __entry->buffer_length, \
- __entry->hold, \
- __entry->pincount, \
- __entry->lockval, \
- __print_flags(__entry->flags, "|", XFS_BUF_FLAGS), \
- (void *)__entry->caller_ip) \
+DECLARE_EVENT_CLASS(xfs_buf_class,
+ TP_PROTO(struct xfs_buf *bp, unsigned long caller_ip),
+ TP_ARGS(bp, caller_ip),
+ TP_STRUCT__entry(
+ __field(dev_t, dev)
+ __field(xfs_daddr_t, bno)
+ __field(size_t, buffer_length)
+ __field(int, hold)
+ __field(int, pincount)
+ __field(unsigned, lockval)
+ __field(unsigned, flags)
+ __field(unsigned long, caller_ip)
+ ),
+ TP_fast_assign(
+ __entry->dev = bp->b_target->bt_dev;
+ __entry->bno = bp->b_bn;
+ __entry->buffer_length = bp->b_buffer_length;
+ __entry->hold = atomic_read(&bp->b_hold);
+ __entry->pincount = atomic_read(&bp->b_pin_count);
+ __entry->lockval = xfs_buf_lock_value(bp);
+ __entry->flags = bp->b_flags;
+ __entry->caller_ip = caller_ip;
+ ),
+ TP_printk("dev %d:%d bno 0x%llx len 0x%zx hold %d pincount %d "
+ "lock %d flags %s caller %pf",
+ MAJOR(__entry->dev), MINOR(__entry->dev),
+ (unsigned long long)__entry->bno,
+ __entry->buffer_length,
+ __entry->hold,
+ __entry->pincount,
+ __entry->lockval,
+ __print_flags(__entry->flags, "|", XFS_BUF_FLAGS),
+ (void *)__entry->caller_ip)
)
+
+#define DEFINE_BUF_EVENT(name) \
+DEFINE_EVENT(xfs_buf_class, name, \
+ TP_PROTO(struct xfs_buf *bp, unsigned long caller_ip), \
+ TP_ARGS(bp, caller_ip))
DEFINE_BUF_EVENT(xfs_buf_init);
DEFINE_BUF_EVENT(xfs_buf_free);
DEFINE_BUF_EVENT(xfs_buf_hold);
@@ -299,41 +311,45 @@ DEFINE_BUF_EVENT(xfs_reset_dqcounts);
DEFINE_BUF_EVENT(xfs_inode_item_push);
/* pass flags explicitly */
-#define DEFINE_BUF_FLAGS_EVENT(tname) \
-TRACE_EVENT(tname, \
- TP_PROTO(struct xfs_buf *bp, unsigned flags, unsigned long caller_ip), \
- TP_ARGS(bp, flags, caller_ip), \
- TP_STRUCT__entry( \
- __field(dev_t, dev) \
- __field(xfs_daddr_t, bno) \
- __field(size_t, buffer_length) \
- __field(int, hold) \
- __field(int, pincount) \
- __field(unsigned, lockval) \
- __field(unsigned, flags) \
- __field(unsigned long, caller_ip) \
- ), \
- TP_fast_assign( \
- __entry->dev = bp->b_target->bt_dev; \
- __entry->bno = bp->b_bn; \
- __entry->buffer_length = bp->b_buffer_length; \
- __entry->flags = flags; \
- __entry->hold = atomic_read(&bp->b_hold); \
- __entry->pincount = atomic_read(&bp->b_pin_count); \
- __entry->lockval = xfs_buf_lock_value(bp); \
- __entry->caller_ip = caller_ip; \
- ), \
- TP_printk("dev %d:%d bno 0x%llx len 0x%zx hold %d pincount %d " \
- "lock %d flags %s caller %pf", \
- MAJOR(__entry->dev), MINOR(__entry->dev), \
- (unsigned long long)__entry->bno, \
- __entry->buffer_length, \
- __entry->hold, \
- __entry->pincount, \
- __entry->lockval, \
- __print_flags(__entry->flags, "|", XFS_BUF_FLAGS), \
- (void *)__entry->caller_ip) \
+DECLARE_EVENT_CLASS(xfs_buf_flags_class,
+ TP_PROTO(struct xfs_buf *bp, unsigned flags, unsigned long caller_ip),
+ TP_ARGS(bp, flags, caller_ip),
+ TP_STRUCT__entry(
+ __field(dev_t, dev)
+ __field(xfs_daddr_t, bno)
+ __field(size_t, buffer_length)
+ __field(int, hold)
+ __field(int, pincount)
+ __field(unsigned, lockval)
+ __field(unsigned, flags)
+ __field(unsigned long, caller_ip)
+ ),
+ TP_fast_assign(
+ __entry->dev = bp->b_target->bt_dev;
+ __entry->bno = bp->b_bn;
+ __entry->buffer_length = bp->b_buffer_length;
+ __entry->flags = flags;
+ __entry->hold = atomic_read(&bp->b_hold);
+ __entry->pincount = atomic_read(&bp->b_pin_count);
+ __entry->lockval = xfs_buf_lock_value(bp);
+ __entry->caller_ip = caller_ip;
+ ),
+ TP_printk("dev %d:%d bno 0x%llx len 0x%zx hold %d pincount %d "
+ "lock %d flags %s caller %pf",
+ MAJOR(__entry->dev), MINOR(__entry->dev),
+ (unsigned long long)__entry->bno,
+ __entry->buffer_length,
+ __entry->hold,
+ __entry->pincount,
+ __entry->lockval,
+ __print_flags(__entry->flags, "|", XFS_BUF_FLAGS),
+ (void *)__entry->caller_ip)
)
+
+#define DEFINE_BUF_FLAGS_EVENT(name) \
+DEFINE_EVENT(xfs_buf_flags_class, name, \
+ TP_PROTO(struct xfs_buf *bp, unsigned flags, unsigned long caller_ip), \
+ TP_ARGS(bp, flags, caller_ip))
DEFINE_BUF_FLAGS_EVENT(xfs_buf_find);
DEFINE_BUF_FLAGS_EVENT(xfs_buf_get);
DEFINE_BUF_FLAGS_EVENT(xfs_buf_read);
@@ -376,55 +392,58 @@ TRACE_EVENT(xfs_buf_ioerror,
(void *)__entry->caller_ip)
);
-#define DEFINE_BUF_ITEM_EVENT(tname) \
-TRACE_EVENT(tname, \
- TP_PROTO(struct xfs_buf_log_item *bip), \
- TP_ARGS(bip), \
- TP_STRUCT__entry( \
- __field(dev_t, dev) \
- __field(xfs_daddr_t, buf_bno) \
- __field(size_t, buf_len) \
- __field(int, buf_hold) \
- __field(int, buf_pincount) \
- __field(int, buf_lockval) \
- __field(unsigned, buf_flags) \
- __field(unsigned, bli_recur) \
- __field(int, bli_refcount) \
- __field(unsigned, bli_flags) \
- __field(void *, li_desc) \
- __field(unsigned, li_flags) \
- ), \
- TP_fast_assign( \
- __entry->dev = bip->bli_buf->b_target->bt_dev; \
- __entry->bli_flags = bip->bli_flags; \
- __entry->bli_recur = bip->bli_recur; \
- __entry->bli_refcount = atomic_read(&bip->bli_refcount); \
- __entry->buf_bno = bip->bli_buf->b_bn; \
- __entry->buf_len = bip->bli_buf->b_buffer_length; \
- __entry->buf_flags = bip->bli_buf->b_flags; \
- __entry->buf_hold = atomic_read(&bip->bli_buf->b_hold); \
- __entry->buf_pincount = \
- atomic_read(&bip->bli_buf->b_pin_count); \
- __entry->buf_lockval = xfs_buf_lock_value(bip->bli_buf); \
- __entry->li_desc = bip->bli_item.li_desc; \
- __entry->li_flags = bip->bli_item.li_flags; \
- ), \
- TP_printk("dev %d:%d bno 0x%llx len 0x%zx hold %d pincount %d " \
- "lock %d flags %s recur %d refcount %d bliflags %s " \
- "lidesc 0x%p liflags %s", \
- MAJOR(__entry->dev), MINOR(__entry->dev), \
- (unsigned long long)__entry->buf_bno, \
- __entry->buf_len, \
- __entry->buf_hold, \
- __entry->buf_pincount, \
- __entry->buf_lockval, \
- __print_flags(__entry->buf_flags, "|", XFS_BUF_FLAGS), \
- __entry->bli_recur, \
- __entry->bli_refcount, \
- __print_flags(__entry->bli_flags, "|", XFS_BLI_FLAGS), \
- __entry->li_desc, \
- __print_flags(__entry->li_flags, "|", XFS_LI_FLAGS)) \
+DECLARE_EVENT_CLASS(xfs_buf_item_class,
+ TP_PROTO(struct xfs_buf_log_item *bip),
+ TP_ARGS(bip),
+ TP_STRUCT__entry(
+ __field(dev_t, dev)
+ __field(xfs_daddr_t, buf_bno)
+ __field(size_t, buf_len)
+ __field(int, buf_hold)
+ __field(int, buf_pincount)
+ __field(int, buf_lockval)
+ __field(unsigned, buf_flags)
+ __field(unsigned, bli_recur)
+ __field(int, bli_refcount)
+ __field(unsigned, bli_flags)
+ __field(void *, li_desc)
+ __field(unsigned, li_flags)
+ ),
+ TP_fast_assign(
+ __entry->dev = bip->bli_buf->b_target->bt_dev;
+ __entry->bli_flags = bip->bli_flags;
+ __entry->bli_recur = bip->bli_recur;
+ __entry->bli_refcount = atomic_read(&bip->bli_refcount);
+ __entry->buf_bno = bip->bli_buf->b_bn;
+ __entry->buf_len = bip->bli_buf->b_buffer_length;
+ __entry->buf_flags = bip->bli_buf->b_flags;
+ __entry->buf_hold = atomic_read(&bip->bli_buf->b_hold);
+ __entry->buf_pincount = atomic_read(&bip->bli_buf->b_pin_count);
+ __entry->buf_lockval = xfs_buf_lock_value(bip->bli_buf);
+ __entry->li_desc = bip->bli_item.li_desc;
+ __entry->li_flags = bip->bli_item.li_flags;
+ ),
+ TP_printk("dev %d:%d bno 0x%llx len 0x%zx hold %d pincount %d "
+ "lock %d flags %s recur %d refcount %d bliflags %s "
+ "lidesc 0x%p liflags %s",
+ MAJOR(__entry->dev), MINOR(__entry->dev),
+ (unsigned long long)__entry->buf_bno,
+ __entry->buf_len,
+ __entry->buf_hold,
+ __entry->buf_pincount,
+ __entry->buf_lockval,
+ __print_flags(__entry->buf_flags, "|", XFS_BUF_FLAGS),
+ __entry->bli_recur,
+ __entry->bli_refcount,
+ __print_flags(__entry->bli_flags, "|", XFS_BLI_FLAGS),
+ __entry->li_desc,
+ __print_flags(__entry->li_flags, "|", XFS_LI_FLAGS))
)
+
+#define DEFINE_BUF_ITEM_EVENT(name) \
+DEFINE_EVENT(xfs_buf_item_class, name, \
+ TP_PROTO(struct xfs_buf_log_item *bip), \
+ TP_ARGS(bip))
DEFINE_BUF_ITEM_EVENT(xfs_buf_item_size);
DEFINE_BUF_ITEM_EVENT(xfs_buf_item_size_stale);
DEFINE_BUF_ITEM_EVENT(xfs_buf_item_format);
@@ -450,78 +469,90 @@ DEFINE_BUF_ITEM_EVENT(xfs_trans_bhold);
DEFINE_BUF_ITEM_EVENT(xfs_trans_bhold_release);
DEFINE_BUF_ITEM_EVENT(xfs_trans_binval);
+DECLARE_EVENT_CLASS(xfs_lock_class,
+ TP_PROTO(struct xfs_inode *ip, unsigned lock_flags,
+ unsigned long caller_ip),
+ TP_ARGS(ip, lock_flags, caller_ip),
+ TP_STRUCT__entry(
+ __field(dev_t, dev)
+ __field(xfs_ino_t, ino)
+ __field(int, lock_flags)
+ __field(unsigned long, caller_ip)
+ ),
+ TP_fast_assign(
+ __entry->dev = VFS_I(ip)->i_sb->s_dev;
+ __entry->ino = ip->i_ino;
+ __entry->lock_flags = lock_flags;
+ __entry->caller_ip = caller_ip;
+ ),
+ TP_printk("dev %d:%d ino 0x%llx flags %s caller %pf",
+ MAJOR(__entry->dev), MINOR(__entry->dev),
+ __entry->ino,
+ __print_flags(__entry->lock_flags, "|", XFS_LOCK_FLAGS),
+ (void *)__entry->caller_ip)
+)
+
#define DEFINE_LOCK_EVENT(name) \
-TRACE_EVENT(name, \
+DEFINE_EVENT(xfs_lock_class, name, \
TP_PROTO(struct xfs_inode *ip, unsigned lock_flags, \
unsigned long caller_ip), \
- TP_ARGS(ip, lock_flags, caller_ip), \
- TP_STRUCT__entry( \
- __field(dev_t, dev) \
- __field(xfs_ino_t, ino) \
- __field(int, lock_flags) \
- __field(unsigned long, caller_ip) \
- ), \
- TP_fast_assign( \
- __entry->dev = VFS_I(ip)->i_sb->s_dev; \
- __entry->ino = ip->i_ino; \
- __entry->lock_flags = lock_flags; \
- __entry->caller_ip = caller_ip; \
- ), \
- TP_printk("dev %d:%d ino 0x%llx flags %s caller %pf", \
- MAJOR(__entry->dev), MINOR(__entry->dev), \
- __entry->ino, \
- __print_flags(__entry->lock_flags, "|", XFS_LOCK_FLAGS), \
- (void *)__entry->caller_ip) \
-)
-
+ TP_ARGS(ip, lock_flags, caller_ip))
DEFINE_LOCK_EVENT(xfs_ilock);
DEFINE_LOCK_EVENT(xfs_ilock_nowait);
DEFINE_LOCK_EVENT(xfs_ilock_demote);
DEFINE_LOCK_EVENT(xfs_iunlock);
+DECLARE_EVENT_CLASS(xfs_iget_class,
+ TP_PROTO(struct xfs_inode *ip),
+ TP_ARGS(ip),
+ TP_STRUCT__entry(
+ __field(dev_t, dev)
+ __field(xfs_ino_t, ino)
+ ),
+ TP_fast_assign(
+ __entry->dev = VFS_I(ip)->i_sb->s_dev;
+ __entry->ino = ip->i_ino;
+ ),
+ TP_printk("dev %d:%d ino 0x%llx",
+ MAJOR(__entry->dev), MINOR(__entry->dev),
+ __entry->ino)
+)
+
#define DEFINE_IGET_EVENT(name) \
-TRACE_EVENT(name, \
+DEFINE_EVENT(xfs_iget_class, name, \
TP_PROTO(struct xfs_inode *ip), \
- TP_ARGS(ip), \
- TP_STRUCT__entry( \
- __field(dev_t, dev) \
- __field(xfs_ino_t, ino) \
- ), \
- TP_fast_assign( \
- __entry->dev = VFS_I(ip)->i_sb->s_dev; \
- __entry->ino = ip->i_ino; \
- ), \
- TP_printk("dev %d:%d ino 0x%llx", \
- MAJOR(__entry->dev), MINOR(__entry->dev), \
- __entry->ino) \
-)
+ TP_ARGS(ip))
DEFINE_IGET_EVENT(xfs_iget_skip);
DEFINE_IGET_EVENT(xfs_iget_reclaim);
DEFINE_IGET_EVENT(xfs_iget_found);
DEFINE_IGET_EVENT(xfs_iget_alloc);
+DECLARE_EVENT_CLASS(xfs_inode_class,
+ TP_PROTO(struct xfs_inode *ip, unsigned long caller_ip),
+ TP_ARGS(ip, caller_ip),
+ TP_STRUCT__entry(
+ __field(dev_t, dev)
+ __field(xfs_ino_t, ino)
+ __field(int, count)
+ __field(unsigned long, caller_ip)
+ ),
+ TP_fast_assign(
+ __entry->dev = VFS_I(ip)->i_sb->s_dev;
+ __entry->ino = ip->i_ino;
+ __entry->count = atomic_read(&VFS_I(ip)->i_count);
+ __entry->caller_ip = caller_ip;
+ ),
+ TP_printk("dev %d:%d ino 0x%llx count %d caller %pf",
+ MAJOR(__entry->dev), MINOR(__entry->dev),
+ __entry->ino,
+ __entry->count,
+ (char *)__entry->caller_ip)
+)
+
#define DEFINE_INODE_EVENT(name) \
-TRACE_EVENT(name, \
+DEFINE_EVENT(xfs_inode_class, name, \
TP_PROTO(struct xfs_inode *ip, unsigned long caller_ip), \
- TP_ARGS(ip, caller_ip), \
- TP_STRUCT__entry( \
- __field(dev_t, dev) \
- __field(xfs_ino_t, ino) \
- __field(int, count) \
- __field(unsigned long, caller_ip) \
- ), \
- TP_fast_assign( \
- __entry->dev = VFS_I(ip)->i_sb->s_dev; \
- __entry->ino = ip->i_ino; \
- __entry->count = atomic_read(&VFS_I(ip)->i_count); \
- __entry->caller_ip = caller_ip; \
- ), \
- TP_printk("dev %d:%d ino 0x%llx count %d caller %pf", \
- MAJOR(__entry->dev), MINOR(__entry->dev), \
- __entry->ino, \
- __entry->count, \
- (char *)__entry->caller_ip) \
-)
+ TP_ARGS(ip, caller_ip))
DEFINE_INODE_EVENT(xfs_ihold);
DEFINE_INODE_EVENT(xfs_irele);
/* the old xfs_itrace_entry tracer - to be replaced by s.th. in the VFS */
@@ -529,55 +560,59 @@ DEFINE_INODE_EVENT(xfs_inode);
#define xfs_itrace_entry(ip) \
trace_xfs_inode(ip, _THIS_IP_)
-#define DEFINE_DQUOT_EVENT(tname) \
-TRACE_EVENT(tname, \
- TP_PROTO(struct xfs_dquot *dqp), \
- TP_ARGS(dqp), \
- TP_STRUCT__entry( \
- __field(dev_t, dev) \
- __field(__be32, id) \
- __field(unsigned, flags) \
- __field(unsigned, nrefs) \
- __field(unsigned long long, res_bcount) \
- __field(unsigned long long, bcount) \
- __field(unsigned long long, icount) \
- __field(unsigned long long, blk_hardlimit) \
- __field(unsigned long long, blk_softlimit) \
- __field(unsigned long long, ino_hardlimit) \
- __field(unsigned long long, ino_softlimit) \
- ), \
- TP_fast_assign( \
- __entry->dev = dqp->q_mount->m_super->s_dev; \
- __entry->id = dqp->q_core.d_id; \
- __entry->flags = dqp->dq_flags; \
- __entry->nrefs = dqp->q_nrefs; \
- __entry->res_bcount = dqp->q_res_bcount; \
- __entry->bcount = be64_to_cpu(dqp->q_core.d_bcount); \
- __entry->icount = be64_to_cpu(dqp->q_core.d_icount); \
- __entry->blk_hardlimit = \
- be64_to_cpu(dqp->q_core.d_blk_hardlimit); \
- __entry->blk_softlimit = \
- be64_to_cpu(dqp->q_core.d_blk_softlimit); \
- __entry->ino_hardlimit = \
- be64_to_cpu(dqp->q_core.d_ino_hardlimit); \
- __entry->ino_softlimit = \
- be64_to_cpu(dqp->q_core.d_ino_softlimit); \
+DECLARE_EVENT_CLASS(xfs_dquot_class,
+ TP_PROTO(struct xfs_dquot *dqp),
+ TP_ARGS(dqp),
+ TP_STRUCT__entry(
+ __field(dev_t, dev)
+ __field(__be32, id)
+ __field(unsigned, flags)
+ __field(unsigned, nrefs)
+ __field(unsigned long long, res_bcount)
+ __field(unsigned long long, bcount)
+ __field(unsigned long long, icount)
+ __field(unsigned long long, blk_hardlimit)
+ __field(unsigned long long, blk_softlimit)
+ __field(unsigned long long, ino_hardlimit)
+ __field(unsigned long long, ino_softlimit)
), \
- TP_printk("dev %d:%d id 0x%x flags %s nrefs %u res_bc 0x%llx " \
- "bcnt 0x%llx [hard 0x%llx | soft 0x%llx] " \
- "icnt 0x%llx [hard 0x%llx | soft 0x%llx]", \
- MAJOR(__entry->dev), MINOR(__entry->dev), \
- be32_to_cpu(__entry->id), \
- __print_flags(__entry->flags, "|", XFS_DQ_FLAGS), \
- __entry->nrefs, \
- __entry->res_bcount, \
- __entry->bcount, \
- __entry->blk_hardlimit, \
- __entry->blk_softlimit, \
- __entry->icount, \
- __entry->ino_hardlimit, \
- __entry->ino_softlimit) \
+ TP_fast_assign(
+ __entry->dev = dqp->q_mount->m_super->s_dev;
+ __entry->id = dqp->q_core.d_id;
+ __entry->flags = dqp->dq_flags;
+ __entry->nrefs = dqp->q_nrefs;
+ __entry->res_bcount = dqp->q_res_bcount;
+ __entry->bcount = be64_to_cpu(dqp->q_core.d_bcount);
+ __entry->icount = be64_to_cpu(dqp->q_core.d_icount);
+ __entry->blk_hardlimit =
+ be64_to_cpu(dqp->q_core.d_blk_hardlimit);
+ __entry->blk_softlimit =
+ be64_to_cpu(dqp->q_core.d_blk_softlimit);
+ __entry->ino_hardlimit =
+ be64_to_cpu(dqp->q_core.d_ino_hardlimit);
+ __entry->ino_softlimit =
+ be64_to_cpu(dqp->q_core.d_ino_softlimit);
+ ),
+ TP_printk("dev %d:%d id 0x%x flags %s nrefs %u res_bc 0x%llx "
+ "bcnt 0x%llx [hard 0x%llx | soft 0x%llx] "
+ "icnt 0x%llx [hard 0x%llx | soft 0x%llx]",
+ MAJOR(__entry->dev), MINOR(__entry->dev),
+ be32_to_cpu(__entry->id),
+ __print_flags(__entry->flags, "|", XFS_DQ_FLAGS),
+ __entry->nrefs,
+ __entry->res_bcount,
+ __entry->bcount,
+ __entry->blk_hardlimit,
+ __entry->blk_softlimit,
+ __entry->icount,
+ __entry->ino_hardlimit,
+ __entry->ino_softlimit)
)
+
+#define DEFINE_DQUOT_EVENT(name) \
+DEFINE_EVENT(xfs_dquot_class, name, \
+ TP_PROTO(struct xfs_dquot *dqp), \
+ TP_ARGS(dqp))
DEFINE_DQUOT_EVENT(xfs_dqadjust);
DEFINE_DQUOT_EVENT(xfs_dqshake_dirty);
DEFINE_DQUOT_EVENT(xfs_dqshake_unlink);
@@ -610,72 +645,75 @@ DEFINE_DQUOT_EVENT(xfs_dqflush_done);
DEFINE_IGET_EVENT(xfs_dquot_dqalloc);
DEFINE_IGET_EVENT(xfs_dquot_dqdetach);
+DECLARE_EVENT_CLASS(xfs_loggrant_class,
+ TP_PROTO(struct log *log, struct xlog_ticket *tic),
+ TP_ARGS(log, tic),
+ TP_STRUCT__entry(
+ __field(dev_t, dev)
+ __field(unsigned, trans_type)
+ __field(char, ocnt)
+ __field(char, cnt)
+ __field(int, curr_res)
+ __field(int, unit_res)
+ __field(unsigned int, flags)
+ __field(void *, reserve_headq)
+ __field(void *, write_headq)
+ __field(int, grant_reserve_cycle)
+ __field(int, grant_reserve_bytes)
+ __field(int, grant_write_cycle)
+ __field(int, grant_write_bytes)
+ __field(int, curr_cycle)
+ __field(int, curr_block)
+ __field(xfs_lsn_t, tail_lsn)
+ ),
+ TP_fast_assign(
+ __entry->dev = log->l_mp->m_super->s_dev;
+ __entry->trans_type = tic->t_trans_type;
+ __entry->ocnt = tic->t_ocnt;
+ __entry->cnt = tic->t_cnt;
+ __entry->curr_res = tic->t_curr_res;
+ __entry->unit_res = tic->t_unit_res;
+ __entry->flags = tic->t_flags;
+ __entry->reserve_headq = log->l_reserve_headq;
+ __entry->write_headq = log->l_write_headq;
+ __entry->grant_reserve_cycle = log->l_grant_reserve_cycle;
+ __entry->grant_reserve_bytes = log->l_grant_reserve_bytes;
+ __entry->grant_write_cycle = log->l_grant_write_cycle;
+ __entry->grant_write_bytes = log->l_grant_write_bytes;
+ __entry->curr_cycle = log->l_curr_cycle;
+ __entry->curr_block = log->l_curr_block;
+ __entry->tail_lsn = log->l_tail_lsn;
+ ),
+ TP_printk("dev %d:%d type %s t_ocnt %u t_cnt %u t_curr_res %u "
+ "t_unit_res %u t_flags %s reserve_headq 0x%p "
+ "write_headq 0x%p grant_reserve_cycle %d "
+ "grant_reserve_bytes %d grant_write_cycle %d "
+ "grant_write_bytes %d curr_cycle %d curr_block %d "
+ "tail_cycle %d tail_block %d",
+ MAJOR(__entry->dev), MINOR(__entry->dev),
+ __print_symbolic(__entry->trans_type, XFS_TRANS_TYPES),
+ __entry->ocnt,
+ __entry->cnt,
+ __entry->curr_res,
+ __entry->unit_res,
+ __print_flags(__entry->flags, "|", XLOG_TIC_FLAGS),
+ __entry->reserve_headq,
+ __entry->write_headq,
+ __entry->grant_reserve_cycle,
+ __entry->grant_reserve_bytes,
+ __entry->grant_write_cycle,
+ __entry->grant_write_bytes,
+ __entry->curr_cycle,
+ __entry->curr_block,
+ CYCLE_LSN(__entry->tail_lsn),
+ BLOCK_LSN(__entry->tail_lsn)
+ )
+)
-#define DEFINE_LOGGRANT_EVENT(tname) \
-TRACE_EVENT(tname, \
+#define DEFINE_LOGGRANT_EVENT(name) \
+DEFINE_EVENT(xfs_loggrant_class, name, \
TP_PROTO(struct log *log, struct xlog_ticket *tic), \
- TP_ARGS(log, tic), \
- TP_STRUCT__entry( \
- __field(dev_t, dev) \
- __field(unsigned, trans_type) \
- __field(char, ocnt) \
- __field(char, cnt) \
- __field(int, curr_res) \
- __field(int, unit_res) \
- __field(unsigned int, flags) \
- __field(void *, reserve_headq) \
- __field(void *, write_headq) \
- __field(int, grant_reserve_cycle) \
- __field(int, grant_reserve_bytes) \
- __field(int, grant_write_cycle) \
- __field(int, grant_write_bytes) \
- __field(int, curr_cycle) \
- __field(int, curr_block) \
- __field(xfs_lsn_t, tail_lsn) \
- ), \
- TP_fast_assign( \
- __entry->dev = log->l_mp->m_super->s_dev; \
- __entry->trans_type = tic->t_trans_type; \
- __entry->ocnt = tic->t_ocnt; \
- __entry->cnt = tic->t_cnt; \
- __entry->curr_res = tic->t_curr_res; \
- __entry->unit_res = tic->t_unit_res; \
- __entry->flags = tic->t_flags; \
- __entry->reserve_headq = log->l_reserve_headq; \
- __entry->write_headq = log->l_write_headq; \
- __entry->grant_reserve_cycle = log->l_grant_reserve_cycle; \
- __entry->grant_reserve_bytes = log->l_grant_reserve_bytes; \
- __entry->grant_write_cycle = log->l_grant_write_cycle; \
- __entry->grant_write_bytes = log->l_grant_write_bytes; \
- __entry->curr_cycle = log->l_curr_cycle; \
- __entry->curr_block = log->l_curr_block; \
- __entry->tail_lsn = log->l_tail_lsn; \
- ), \
- TP_printk("dev %d:%d type %s t_ocnt %u t_cnt %u t_curr_res %u " \
- "t_unit_res %u t_flags %s reserve_headq 0x%p " \
- "write_headq 0x%p grant_reserve_cycle %d " \
- "grant_reserve_bytes %d grant_write_cycle %d " \
- "grant_write_bytes %d curr_cycle %d curr_block %d " \
- "tail_cycle %d tail_block %d", \
- MAJOR(__entry->dev), MINOR(__entry->dev), \
- __print_symbolic(__entry->trans_type, XFS_TRANS_TYPES), \
- __entry->ocnt, \
- __entry->cnt, \
- __entry->curr_res, \
- __entry->unit_res, \
- __print_flags(__entry->flags, "|", XLOG_TIC_FLAGS), \
- __entry->reserve_headq, \
- __entry->write_headq, \
- __entry->grant_reserve_cycle, \
- __entry->grant_reserve_bytes, \
- __entry->grant_write_cycle, \
- __entry->grant_write_bytes, \
- __entry->curr_cycle, \
- __entry->curr_block, \
- CYCLE_LSN(__entry->tail_lsn), \
- BLOCK_LSN(__entry->tail_lsn) \
- ) \
-)
+ TP_ARGS(log, tic))
DEFINE_LOGGRANT_EVENT(xfs_log_done_nonperm);
DEFINE_LOGGRANT_EVENT(xfs_log_done_perm);
DEFINE_LOGGRANT_EVENT(xfs_log_reserve);
@@ -815,7 +853,7 @@ TRACE_EVENT(name, \
), \
TP_printk("dev %d:%d ino 0x%llx size 0x%llx new_size 0x%llx " \
"offset 0x%llx count %zd flags %s " \
- "startoff 0x%llx startblock 0x%llx blockcount 0x%llx", \
+ "startoff 0x%llx startblock %s blockcount 0x%llx", \
MAJOR(__entry->dev), MINOR(__entry->dev), \
__entry->ino, \
__entry->size, \
@@ -824,7 +862,7 @@ TRACE_EVENT(name, \
__entry->count, \
__print_flags(__entry->flags, "|", BMAPI_FLAGS), \
__entry->startoff, \
- __entry->startblock, \
+ xfs_fmtfsblock(__entry->startblock), \
__entry->blockcount) \
)
DEFINE_IOMAP_EVENT(xfs_iomap_enter);
@@ -897,28 +935,32 @@ TRACE_EVENT(xfs_itruncate_start,
__entry->toss_finish)
);
+DECLARE_EVENT_CLASS(xfs_itrunc_class,
+ TP_PROTO(struct xfs_inode *ip, xfs_fsize_t new_size),
+ TP_ARGS(ip, new_size),
+ TP_STRUCT__entry(
+ __field(dev_t, dev)
+ __field(xfs_ino_t, ino)
+ __field(xfs_fsize_t, size)
+ __field(xfs_fsize_t, new_size)
+ ),
+ TP_fast_assign(
+ __entry->dev = VFS_I(ip)->i_sb->s_dev;
+ __entry->ino = ip->i_ino;
+ __entry->size = ip->i_d.di_size;
+ __entry->new_size = new_size;
+ ),
+ TP_printk("dev %d:%d ino 0x%llx size 0x%llx new_size 0x%llx",
+ MAJOR(__entry->dev), MINOR(__entry->dev),
+ __entry->ino,
+ __entry->size,
+ __entry->new_size)
+)
+
#define DEFINE_ITRUNC_EVENT(name) \
-TRACE_EVENT(name, \
+DEFINE_EVENT(xfs_itrunc_class, name, \
TP_PROTO(struct xfs_inode *ip, xfs_fsize_t new_size), \
- TP_ARGS(ip, new_size), \
- TP_STRUCT__entry( \
- __field(dev_t, dev) \
- __field(xfs_ino_t, ino) \
- __field(xfs_fsize_t, size) \
- __field(xfs_fsize_t, new_size) \
- ), \
- TP_fast_assign( \
- __entry->dev = VFS_I(ip)->i_sb->s_dev; \
- __entry->ino = ip->i_ino; \
- __entry->size = ip->i_d.di_size; \
- __entry->new_size = new_size; \
- ), \
- TP_printk("dev %d:%d ino 0x%llx size 0x%llx new_size 0x%llx", \
- MAJOR(__entry->dev), MINOR(__entry->dev), \
- __entry->ino, \
- __entry->size, \
- __entry->new_size) \
-)
+ TP_ARGS(ip, new_size))
DEFINE_ITRUNC_EVENT(xfs_itruncate_finish_start);
DEFINE_ITRUNC_EVENT(xfs_itruncate_finish_end);
@@ -1037,28 +1079,28 @@ TRACE_EVENT(xfs_alloc_unbusy,
TRACE_EVENT(xfs_alloc_busysearch,
TP_PROTO(struct xfs_mount *mp, xfs_agnumber_t agno, xfs_agblock_t agbno,
- xfs_extlen_t len, int found),
- TP_ARGS(mp, agno, agbno, len, found),
+ xfs_extlen_t len, xfs_lsn_t lsn),
+ TP_ARGS(mp, agno, agbno, len, lsn),
TP_STRUCT__entry(
__field(dev_t, dev)
__field(xfs_agnumber_t, agno)
__field(xfs_agblock_t, agbno)
__field(xfs_extlen_t, len)
- __field(int, found)
+ __field(xfs_lsn_t, lsn)
),
TP_fast_assign(
__entry->dev = mp->m_super->s_dev;
__entry->agno = agno;
__entry->agbno = agbno;
__entry->len = len;
- __entry->found = found;
+ __entry->lsn = lsn;
),
- TP_printk("dev %d:%d agno %u agbno %u len %u %s",
+ TP_printk("dev %d:%d agno %u agbno %u len %u force lsn 0x%llx",
MAJOR(__entry->dev), MINOR(__entry->dev),
__entry->agno,
__entry->agbno,
__entry->len,
- __print_symbolic(__entry->found, XFS_BUSY_STATES))
+ __entry->lsn)
);
TRACE_EVENT(xfs_agf,
@@ -1152,77 +1194,80 @@ TRACE_EVENT(xfs_free_extent,
);
-#define DEFINE_ALLOC_EVENT(name) \
-TRACE_EVENT(name, \
- TP_PROTO(struct xfs_alloc_arg *args), \
- TP_ARGS(args), \
- TP_STRUCT__entry( \
- __field(dev_t, dev) \
- __field(xfs_agnumber_t, agno) \
- __field(xfs_agblock_t, agbno) \
- __field(xfs_extlen_t, minlen) \
- __field(xfs_extlen_t, maxlen) \
- __field(xfs_extlen_t, mod) \
- __field(xfs_extlen_t, prod) \
- __field(xfs_extlen_t, minleft) \
- __field(xfs_extlen_t, total) \
- __field(xfs_extlen_t, alignment) \
- __field(xfs_extlen_t, minalignslop) \
- __field(xfs_extlen_t, len) \
- __field(short, type) \
- __field(short, otype) \
- __field(char, wasdel) \
- __field(char, wasfromfl) \
- __field(char, isfl) \
- __field(char, userdata) \
- __field(xfs_fsblock_t, firstblock) \
- ), \
- TP_fast_assign( \
- __entry->dev = args->mp->m_super->s_dev; \
- __entry->agno = args->agno; \
- __entry->agbno = args->agbno; \
- __entry->minlen = args->minlen; \
- __entry->maxlen = args->maxlen; \
- __entry->mod = args->mod; \
- __entry->prod = args->prod; \
- __entry->minleft = args->minleft; \
- __entry->total = args->total; \
- __entry->alignment = args->alignment; \
- __entry->minalignslop = args->minalignslop; \
- __entry->len = args->len; \
- __entry->type = args->type; \
- __entry->otype = args->otype; \
- __entry->wasdel = args->wasdel; \
- __entry->wasfromfl = args->wasfromfl; \
- __entry->isfl = args->isfl; \
- __entry->userdata = args->userdata; \
- __entry->firstblock = args->firstblock; \
- ), \
- TP_printk("dev %d:%d agno %u agbno %u minlen %u maxlen %u mod %u " \
- "prod %u minleft %u total %u alignment %u minalignslop %u " \
- "len %u type %s otype %s wasdel %d wasfromfl %d isfl %d " \
- "userdata %d firstblock 0x%llx", \
- MAJOR(__entry->dev), MINOR(__entry->dev), \
- __entry->agno, \
- __entry->agbno, \
- __entry->minlen, \
- __entry->maxlen, \
- __entry->mod, \
- __entry->prod, \
- __entry->minleft, \
- __entry->total, \
- __entry->alignment, \
- __entry->minalignslop, \
- __entry->len, \
- __print_symbolic(__entry->type, XFS_ALLOC_TYPES), \
- __print_symbolic(__entry->otype, XFS_ALLOC_TYPES), \
- __entry->wasdel, \
- __entry->wasfromfl, \
- __entry->isfl, \
- __entry->userdata, \
- __entry->firstblock) \
+DECLARE_EVENT_CLASS(xfs_alloc_class,
+ TP_PROTO(struct xfs_alloc_arg *args),
+ TP_ARGS(args),
+ TP_STRUCT__entry(
+ __field(dev_t, dev)
+ __field(xfs_agnumber_t, agno)
+ __field(xfs_agblock_t, agbno)
+ __field(xfs_extlen_t, minlen)
+ __field(xfs_extlen_t, maxlen)
+ __field(xfs_extlen_t, mod)
+ __field(xfs_extlen_t, prod)
+ __field(xfs_extlen_t, minleft)
+ __field(xfs_extlen_t, total)
+ __field(xfs_extlen_t, alignment)
+ __field(xfs_extlen_t, minalignslop)
+ __field(xfs_extlen_t, len)
+ __field(short, type)
+ __field(short, otype)
+ __field(char, wasdel)
+ __field(char, wasfromfl)
+ __field(char, isfl)
+ __field(char, userdata)
+ __field(xfs_fsblock_t, firstblock)
+ ),
+ TP_fast_assign(
+ __entry->dev = args->mp->m_super->s_dev;
+ __entry->agno = args->agno;
+ __entry->agbno = args->agbno;
+ __entry->minlen = args->minlen;
+ __entry->maxlen = args->maxlen;
+ __entry->mod = args->mod;
+ __entry->prod = args->prod;
+ __entry->minleft = args->minleft;
+ __entry->total = args->total;
+ __entry->alignment = args->alignment;
+ __entry->minalignslop = args->minalignslop;
+ __entry->len = args->len;
+ __entry->type = args->type;
+ __entry->otype = args->otype;
+ __entry->wasdel = args->wasdel;
+ __entry->wasfromfl = args->wasfromfl;
+ __entry->isfl = args->isfl;
+ __entry->userdata = args->userdata;
+ __entry->firstblock = args->firstblock;
+ ),
+ TP_printk("dev %d:%d agno %u agbno %u minlen %u maxlen %u mod %u "
+ "prod %u minleft %u total %u alignment %u minalignslop %u "
+ "len %u type %s otype %s wasdel %d wasfromfl %d isfl %d "
+ "userdata %d firstblock 0x%llx",
+ MAJOR(__entry->dev), MINOR(__entry->dev),
+ __entry->agno,
+ __entry->agbno,
+ __entry->minlen,
+ __entry->maxlen,
+ __entry->mod,
+ __entry->prod,
+ __entry->minleft,
+ __entry->total,
+ __entry->alignment,
+ __entry->minalignslop,
+ __entry->len,
+ __print_symbolic(__entry->type, XFS_ALLOC_TYPES),
+ __print_symbolic(__entry->otype, XFS_ALLOC_TYPES),
+ __entry->wasdel,
+ __entry->wasfromfl,
+ __entry->isfl,
+ __entry->userdata,
+ __entry->firstblock)
)
+#define DEFINE_ALLOC_EVENT(name) \
+DEFINE_EVENT(xfs_alloc_class, name, \
+ TP_PROTO(struct xfs_alloc_arg *args), \
+ TP_ARGS(args))
DEFINE_ALLOC_EVENT(xfs_alloc_exact_done);
DEFINE_ALLOC_EVENT(xfs_alloc_exact_error);
DEFINE_ALLOC_EVENT(xfs_alloc_near_nominleft);
@@ -1245,92 +1290,100 @@ DEFINE_ALLOC_EVENT(xfs_alloc_vextent_noagbp);
DEFINE_ALLOC_EVENT(xfs_alloc_vextent_loopfailed);
DEFINE_ALLOC_EVENT(xfs_alloc_vextent_allfailed);
-#define DEFINE_DIR2_TRACE(tname) \
-TRACE_EVENT(tname, \
+DECLARE_EVENT_CLASS(xfs_dir2_class,
+ TP_PROTO(struct xfs_da_args *args),
+ TP_ARGS(args),
+ TP_STRUCT__entry(
+ __field(dev_t, dev)
+ __field(xfs_ino_t, ino)
+ __dynamic_array(char, name, args->namelen)
+ __field(int, namelen)
+ __field(xfs_dahash_t, hashval)
+ __field(xfs_ino_t, inumber)
+ __field(int, op_flags)
+ ),
+ TP_fast_assign(
+ __entry->dev = VFS_I(args->dp)->i_sb->s_dev;
+ __entry->ino = args->dp->i_ino;
+ if (args->namelen)
+ memcpy(__get_str(name), args->name, args->namelen);
+ __entry->namelen = args->namelen;
+ __entry->hashval = args->hashval;
+ __entry->inumber = args->inumber;
+ __entry->op_flags = args->op_flags;
+ ),
+ TP_printk("dev %d:%d ino 0x%llx name %.*s namelen %d hashval 0x%x "
+ "inumber 0x%llx op_flags %s",
+ MAJOR(__entry->dev), MINOR(__entry->dev),
+ __entry->ino,
+ __entry->namelen,
+ __entry->namelen ? __get_str(name) : NULL,
+ __entry->namelen,
+ __entry->hashval,
+ __entry->inumber,
+ __print_flags(__entry->op_flags, "|", XFS_DA_OP_FLAGS))
+)
+
+#define DEFINE_DIR2_EVENT(name) \
+DEFINE_EVENT(xfs_dir2_class, name, \
TP_PROTO(struct xfs_da_args *args), \
- TP_ARGS(args), \
- TP_STRUCT__entry( \
- __field(dev_t, dev) \
- __field(xfs_ino_t, ino) \
- __dynamic_array(char, name, args->namelen) \
- __field(int, namelen) \
- __field(xfs_dahash_t, hashval) \
- __field(xfs_ino_t, inumber) \
- __field(int, op_flags) \
- ), \
- TP_fast_assign( \
- __entry->dev = VFS_I(args->dp)->i_sb->s_dev; \
- __entry->ino = args->dp->i_ino; \
- if (args->namelen) \
- memcpy(__get_str(name), args->name, args->namelen); \
- __entry->namelen = args->namelen; \
- __entry->hashval = args->hashval; \
- __entry->inumber = args->inumber; \
- __entry->op_flags = args->op_flags; \
- ), \
- TP_printk("dev %d:%d ino 0x%llx name %.*s namelen %d hashval 0x%x " \
- "inumber 0x%llx op_flags %s", \
- MAJOR(__entry->dev), MINOR(__entry->dev), \
- __entry->ino, \
- __entry->namelen, \
- __entry->namelen ? __get_str(name) : NULL, \
- __entry->namelen, \
- __entry->hashval, \
- __entry->inumber, \
- __print_flags(__entry->op_flags, "|", XFS_DA_OP_FLAGS)) \
+ TP_ARGS(args))
+DEFINE_DIR2_EVENT(xfs_dir2_sf_addname);
+DEFINE_DIR2_EVENT(xfs_dir2_sf_create);
+DEFINE_DIR2_EVENT(xfs_dir2_sf_lookup);
+DEFINE_DIR2_EVENT(xfs_dir2_sf_replace);
+DEFINE_DIR2_EVENT(xfs_dir2_sf_removename);
+DEFINE_DIR2_EVENT(xfs_dir2_sf_toino4);
+DEFINE_DIR2_EVENT(xfs_dir2_sf_toino8);
+DEFINE_DIR2_EVENT(xfs_dir2_sf_to_block);
+DEFINE_DIR2_EVENT(xfs_dir2_block_addname);
+DEFINE_DIR2_EVENT(xfs_dir2_block_lookup);
+DEFINE_DIR2_EVENT(xfs_dir2_block_replace);
+DEFINE_DIR2_EVENT(xfs_dir2_block_removename);
+DEFINE_DIR2_EVENT(xfs_dir2_block_to_sf);
+DEFINE_DIR2_EVENT(xfs_dir2_block_to_leaf);
+DEFINE_DIR2_EVENT(xfs_dir2_leaf_addname);
+DEFINE_DIR2_EVENT(xfs_dir2_leaf_lookup);
+DEFINE_DIR2_EVENT(xfs_dir2_leaf_replace);
+DEFINE_DIR2_EVENT(xfs_dir2_leaf_removename);
+DEFINE_DIR2_EVENT(xfs_dir2_leaf_to_block);
+DEFINE_DIR2_EVENT(xfs_dir2_leaf_to_node);
+DEFINE_DIR2_EVENT(xfs_dir2_node_addname);
+DEFINE_DIR2_EVENT(xfs_dir2_node_lookup);
+DEFINE_DIR2_EVENT(xfs_dir2_node_replace);
+DEFINE_DIR2_EVENT(xfs_dir2_node_removename);
+DEFINE_DIR2_EVENT(xfs_dir2_node_to_leaf);
+
+DECLARE_EVENT_CLASS(xfs_dir2_space_class,
+ TP_PROTO(struct xfs_da_args *args, int idx),
+ TP_ARGS(args, idx),
+ TP_STRUCT__entry(
+ __field(dev_t, dev)
+ __field(xfs_ino_t, ino)
+ __field(int, op_flags)
+ __field(int, idx)
+ ),
+ TP_fast_assign(
+ __entry->dev = VFS_I(args->dp)->i_sb->s_dev;
+ __entry->ino = args->dp->i_ino;
+ __entry->op_flags = args->op_flags;
+ __entry->idx = idx;
+ ),
+ TP_printk("dev %d:%d ino 0x%llx op_flags %s index %d",
+ MAJOR(__entry->dev), MINOR(__entry->dev),
+ __entry->ino,
+ __print_flags(__entry->op_flags, "|", XFS_DA_OP_FLAGS),
+ __entry->idx)
)
-DEFINE_DIR2_TRACE(xfs_dir2_sf_addname);
-DEFINE_DIR2_TRACE(xfs_dir2_sf_create);
-DEFINE_DIR2_TRACE(xfs_dir2_sf_lookup);
-DEFINE_DIR2_TRACE(xfs_dir2_sf_replace);
-DEFINE_DIR2_TRACE(xfs_dir2_sf_removename);
-DEFINE_DIR2_TRACE(xfs_dir2_sf_toino4);
-DEFINE_DIR2_TRACE(xfs_dir2_sf_toino8);
-DEFINE_DIR2_TRACE(xfs_dir2_sf_to_block);
-DEFINE_DIR2_TRACE(xfs_dir2_block_addname);
-DEFINE_DIR2_TRACE(xfs_dir2_block_lookup);
-DEFINE_DIR2_TRACE(xfs_dir2_block_replace);
-DEFINE_DIR2_TRACE(xfs_dir2_block_removename);
-DEFINE_DIR2_TRACE(xfs_dir2_block_to_sf);
-DEFINE_DIR2_TRACE(xfs_dir2_block_to_leaf);
-DEFINE_DIR2_TRACE(xfs_dir2_leaf_addname);
-DEFINE_DIR2_TRACE(xfs_dir2_leaf_lookup);
-DEFINE_DIR2_TRACE(xfs_dir2_leaf_replace);
-DEFINE_DIR2_TRACE(xfs_dir2_leaf_removename);
-DEFINE_DIR2_TRACE(xfs_dir2_leaf_to_block);
-DEFINE_DIR2_TRACE(xfs_dir2_leaf_to_node);
-DEFINE_DIR2_TRACE(xfs_dir2_node_addname);
-DEFINE_DIR2_TRACE(xfs_dir2_node_lookup);
-DEFINE_DIR2_TRACE(xfs_dir2_node_replace);
-DEFINE_DIR2_TRACE(xfs_dir2_node_removename);
-DEFINE_DIR2_TRACE(xfs_dir2_node_to_leaf);
-#define DEFINE_DIR2_SPACE_TRACE(tname) \
-TRACE_EVENT(tname, \
+#define DEFINE_DIR2_SPACE_EVENT(name) \
+DEFINE_EVENT(xfs_dir2_space_class, name, \
TP_PROTO(struct xfs_da_args *args, int idx), \
- TP_ARGS(args, idx), \
- TP_STRUCT__entry( \
- __field(dev_t, dev) \
- __field(xfs_ino_t, ino) \
- __field(int, op_flags) \
- __field(int, idx) \
- ), \
- TP_fast_assign( \
- __entry->dev = VFS_I(args->dp)->i_sb->s_dev; \
- __entry->ino = args->dp->i_ino; \
- __entry->op_flags = args->op_flags; \
- __entry->idx = idx; \
- ), \
- TP_printk("dev %d:%d ino 0x%llx op_flags %s index %d", \
- MAJOR(__entry->dev), MINOR(__entry->dev), \
- __entry->ino, \
- __print_flags(__entry->op_flags, "|", XFS_DA_OP_FLAGS), \
- __entry->idx) \
-)
-DEFINE_DIR2_SPACE_TRACE(xfs_dir2_leafn_add);
-DEFINE_DIR2_SPACE_TRACE(xfs_dir2_leafn_remove);
-DEFINE_DIR2_SPACE_TRACE(xfs_dir2_grow_inode);
-DEFINE_DIR2_SPACE_TRACE(xfs_dir2_shrink_inode);
+ TP_ARGS(args, idx))
+DEFINE_DIR2_SPACE_EVENT(xfs_dir2_leafn_add);
+DEFINE_DIR2_SPACE_EVENT(xfs_dir2_leafn_remove);
+DEFINE_DIR2_SPACE_EVENT(xfs_dir2_grow_inode);
+DEFINE_DIR2_SPACE_EVENT(xfs_dir2_shrink_inode);
TRACE_EVENT(xfs_dir2_leafn_moveents,
TP_PROTO(struct xfs_da_args *args, int src_idx, int dst_idx, int count),
diff --git a/fs/xfs/quota/xfs_qm_syscalls.c b/fs/xfs/quota/xfs_qm_syscalls.c
index 71af76fe8a23..873e07e29074 100644
--- a/fs/xfs/quota/xfs_qm_syscalls.c
+++ b/fs/xfs/quota/xfs_qm_syscalls.c
@@ -891,7 +891,7 @@ xfs_qm_dqrele_all_inodes(
uint flags)
{
ASSERT(mp->m_quotainfo);
- xfs_inode_ag_iterator(mp, xfs_dqrele_inode, flags, XFS_ICI_NO_TAG);
+ xfs_inode_ag_iterator(mp, xfs_dqrele_inode, flags, XFS_ICI_NO_TAG, 0);
}
/*------------------------------------------------------------------------*/
diff --git a/fs/xfs/xfs_alloc.c b/fs/xfs/xfs_alloc.c
index a1c65fc6d9c4..275b1f4f9430 100644
--- a/fs/xfs/xfs_alloc.c
+++ b/fs/xfs/xfs_alloc.c
@@ -2563,43 +2563,41 @@ xfs_alloc_search_busy(xfs_trans_t *tp,
xfs_mount_t *mp;
xfs_perag_busy_t *bsy;
xfs_agblock_t uend, bend;
- xfs_lsn_t lsn;
+ xfs_lsn_t lsn = 0;
int cnt;
mp = tp->t_mountp;
spin_lock(&mp->m_perag[agno].pagb_lock);
- cnt = mp->m_perag[agno].pagb_count;
uend = bno + len - 1;
- /* search pagb_list for this slot, skipping open slots */
- for (bsy = mp->m_perag[agno].pagb_list; cnt; bsy++) {
+ /*
+ * search pagb_list for this slot, skipping open slots. We have to
+ * search the entire array as there may be multiple overlaps and
+ * we have to get the most recent LSN for the log force to push out
+ * all the transactions that span the range.
+ */
+ for (cnt = 0; cnt < mp->m_perag[agno].pagb_count; cnt++) {
+ bsy = &mp->m_perag[agno].pagb_list[cnt];
+ if (!bsy->busy_tp)
+ continue;
- /*
- * (start1,length1) within (start2, length2)
- */
- if (bsy->busy_tp != NULL) {
- bend = bsy->busy_start + bsy->busy_length - 1;
- if ((bno > bend) || (uend < bsy->busy_start)) {
- cnt--;
- } else {
- break;
- }
- }
- }
+ bend = bsy->busy_start + bsy->busy_length - 1;
+ if (bno > bend || uend < bsy->busy_start)
+ continue;
- trace_xfs_alloc_busysearch(mp, agno, bno, len, !!cnt);
+ /* (start1,length1) within (start2, length2) */
+ if (XFS_LSN_CMP(bsy->busy_tp->t_commit_lsn, lsn) > 0)
+ lsn = bsy->busy_tp->t_commit_lsn;
+ }
+ spin_unlock(&mp->m_perag[agno].pagb_lock);
+ trace_xfs_alloc_busysearch(tp->t_mountp, agno, bno, len, lsn);
/*
* If a block was found, force the log through the LSN of the
* transaction that freed the block
*/
- if (cnt) {
- lsn = bsy->busy_tp->t_commit_lsn;
- spin_unlock(&mp->m_perag[agno].pagb_lock);
+ if (lsn)
xfs_log_force(mp, lsn, XFS_LOG_FORCE|XFS_LOG_SYNC);
- } else {
- spin_unlock(&mp->m_perag[agno].pagb_lock);
- }
}
diff --git a/fs/xfs/xfs_dfrag.c b/fs/xfs/xfs_dfrag.c
index d1483a4f71b8..84ca1cf16a1e 100644
--- a/fs/xfs/xfs_dfrag.c
+++ b/fs/xfs/xfs_dfrag.c
@@ -114,10 +114,82 @@ xfs_swapext(
return error;
}
+/*
+ * We need to check that the format of the data fork in the temporary inode is
+ * valid for the target inode before doing the swap. This is not a problem with
+ * attr1 because of the fixed fork offset, but attr2 has a dynamically sized
+ * data fork depending on the space the attribute fork is taking so we can get
+ * invalid formats on the target inode.
+ *
+ * E.g. target has space for 7 extents in extent format, temp inode only has
+ * space for 6. If we defragment down to 7 extents, then the tmp format is a
+ * btree, but when swapped it needs to be in extent format. Hence we can't just
+ * blindly swap data forks on attr2 filesystems.
+ *
+ * Note that we check the swap in both directions so that we don't end up with
+ * a corrupt temporary inode, either.
+ *
+ * Note that fixing the way xfs_fsr sets up the attribute fork in the source
+ * inode will prevent this situation from occurring, so all we do here is
+ * reject and log the attempt. basically we are putting the responsibility on
+ * userspace to get this right.
+ */
+static int
+xfs_swap_extents_check_format(
+ xfs_inode_t *ip, /* target inode */
+ xfs_inode_t *tip) /* tmp inode */
+{
+
+ /* Should never get a local format */
+ if (ip->i_d.di_format == XFS_DINODE_FMT_LOCAL ||
+ tip->i_d.di_format == XFS_DINODE_FMT_LOCAL)
+ return EINVAL;
+
+ /*
+ * if the target inode has less extents that then temporary inode then
+ * why did userspace call us?
+ */
+ if (ip->i_d.di_nextents < tip->i_d.di_nextents)
+ return EINVAL;
+
+ /*
+ * if the target inode is in extent form and the temp inode is in btree
+ * form then we will end up with the target inode in the wrong format
+ * as we already know there are less extents in the temp inode.
+ */
+ if (ip->i_d.di_format == XFS_DINODE_FMT_EXTENTS &&
+ tip->i_d.di_format == XFS_DINODE_FMT_BTREE)
+ return EINVAL;
+
+ /* Check temp in extent form to max in target */
+ if (tip->i_d.di_format == XFS_DINODE_FMT_EXTENTS &&
+ XFS_IFORK_NEXTENTS(tip, XFS_DATA_FORK) > ip->i_df.if_ext_max)
+ return EINVAL;
+
+ /* Check target in extent form to max in temp */
+ if (ip->i_d.di_format == XFS_DINODE_FMT_EXTENTS &&
+ XFS_IFORK_NEXTENTS(ip, XFS_DATA_FORK) > tip->i_df.if_ext_max)
+ return EINVAL;
+
+ /* Check root block of temp in btree form to max in target */
+ if (tip->i_d.di_format == XFS_DINODE_FMT_BTREE &&
+ XFS_IFORK_BOFF(ip) &&
+ tip->i_df.if_broot_bytes > XFS_IFORK_BOFF(ip))
+ return EINVAL;
+
+ /* Check root block of target in btree form to max in temp */
+ if (ip->i_d.di_format == XFS_DINODE_FMT_BTREE &&
+ XFS_IFORK_BOFF(tip) &&
+ ip->i_df.if_broot_bytes > XFS_IFORK_BOFF(tip))
+ return EINVAL;
+
+ return 0;
+}
+
int
xfs_swap_extents(
- xfs_inode_t *ip,
- xfs_inode_t *tip,
+ xfs_inode_t *ip, /* target inode */
+ xfs_inode_t *tip, /* tmp inode */
xfs_swapext_t *sxp)
{
xfs_mount_t *mp;
@@ -161,13 +233,6 @@ xfs_swap_extents(
goto out_unlock;
}
- /* Should never get a local format */
- if (ip->i_d.di_format == XFS_DINODE_FMT_LOCAL ||
- tip->i_d.di_format == XFS_DINODE_FMT_LOCAL) {
- error = XFS_ERROR(EINVAL);
- goto out_unlock;
- }
-
if (VN_CACHED(VFS_I(tip)) != 0) {
error = xfs_flushinval_pages(tip, 0, -1,
FI_REMAPF_LOCKED);
@@ -189,13 +254,12 @@ xfs_swap_extents(
goto out_unlock;
}
- /*
- * If the target has extended attributes, the tmp file
- * must also in order to ensure the correct data fork
- * format.
- */
- if ( XFS_IFORK_Q(ip) != XFS_IFORK_Q(tip) ) {
- error = XFS_ERROR(EINVAL);
+ /* check inode formats now that data is flushed */
+ error = xfs_swap_extents_check_format(ip, tip);
+ if (error) {
+ xfs_fs_cmn_err(CE_NOTE, mp,
+ "%s: inode 0x%llx format is incompatible for exchanging.",
+ __FILE__, ip->i_ino);
goto out_unlock;
}
@@ -276,6 +340,16 @@ xfs_swap_extents(
*tifp = *tempifp; /* struct copy */
/*
+ * Fix the in-memory data fork values that are dependent on the fork
+ * offset in the inode. We can't assume they remain the same as attr2
+ * has dynamic fork offsets.
+ */
+ ifp->if_ext_max = XFS_IFORK_SIZE(ip, XFS_DATA_FORK) /
+ (uint)sizeof(xfs_bmbt_rec_t);
+ tifp->if_ext_max = XFS_IFORK_SIZE(tip, XFS_DATA_FORK) /
+ (uint)sizeof(xfs_bmbt_rec_t);
+
+ /*
* Fix the on-disk inode values
*/
tmp = (__uint64_t)ip->i_d.di_nblocks;
diff --git a/fs/xfs/xfs_iget.c b/fs/xfs/xfs_iget.c
index fa402a6bbbcf..155e798f30a1 100644
--- a/fs/xfs/xfs_iget.c
+++ b/fs/xfs/xfs_iget.c
@@ -73,7 +73,6 @@ xfs_inode_alloc(
ASSERT(atomic_read(&ip->i_pincount) == 0);
ASSERT(!spin_is_locked(&ip->i_flags_lock));
ASSERT(completion_done(&ip->i_flush));
- ASSERT(!rwsem_is_locked(&ip->i_iolock.mr_lock));
mrlock_init(&ip->i_iolock, MRLOCK_BARRIER, "xfsio", ip->i_ino);
diff --git a/fs/xfs/xfs_inode.c b/fs/xfs/xfs_inode.c
index ce278b3ae7fc..ef77fd88c8e3 100644
--- a/fs/xfs/xfs_inode.c
+++ b/fs/xfs/xfs_inode.c
@@ -2841,8 +2841,8 @@ xfs_iflush(
mp = ip->i_mount;
/*
- * If the inode isn't dirty, then just release the inode
- * flush lock and do nothing.
+ * If the inode isn't dirty, then just release the inode flush lock and
+ * do nothing.
*/
if (xfs_inode_clean(ip)) {
xfs_ifunlock(ip);
@@ -2868,6 +2868,19 @@ xfs_iflush(
xfs_iunpin_wait(ip);
/*
+ * For stale inodes we cannot rely on the backing buffer remaining
+ * stale in cache for the remaining life of the stale inode and so
+ * xfs_itobp() below may give us a buffer that no longer contains
+ * inodes below. We have to check this after ensuring the inode is
+ * unpinned so that it is safe to reclaim the stale inode after the
+ * flush call.
+ */
+ if (xfs_iflags_test(ip, XFS_ISTALE)) {
+ xfs_ifunlock(ip);
+ return 0;
+ }
+
+ /*
* This may have been unpinned because the filesystem is shutting
* down forcibly. If that's the case we must not write this inode
* to disk, because the log record didn't make it to disk!
diff --git a/fs/xfs/xfs_rtalloc.c b/fs/xfs/xfs_rtalloc.c
index 9e15a1185362..6be05f756d59 100644
--- a/fs/xfs/xfs_rtalloc.c
+++ b/fs/xfs/xfs_rtalloc.c
@@ -1517,6 +1517,8 @@ xfs_rtfree_range(
*/
error = xfs_rtfind_forw(mp, tp, end, mp->m_sb.sb_rextents - 1,
&postblock);
+ if (error)
+ return error;
/*
* If there are blocks not being freed at the front of the
* old extent, add summary data for them to be allocated.
diff --git a/fs/xfs/xfs_vnodeops.c b/fs/xfs/xfs_vnodeops.c
index 6558ffd8d140..6f268756bf36 100644
--- a/fs/xfs/xfs_vnodeops.c
+++ b/fs/xfs/xfs_vnodeops.c
@@ -70,7 +70,6 @@ xfs_setattr(
uint commit_flags=0;
uid_t uid=0, iuid=0;
gid_t gid=0, igid=0;
- int timeflags = 0;
struct xfs_dquot *udqp, *gdqp, *olddquot1, *olddquot2;
int need_iolock = 1;
@@ -135,16 +134,13 @@ xfs_setattr(
if (flags & XFS_ATTR_NOLOCK)
need_iolock = 0;
if (!(mask & ATTR_SIZE)) {
- if ((mask != (ATTR_CTIME|ATTR_ATIME|ATTR_MTIME)) ||
- (mp->m_flags & XFS_MOUNT_WSYNC)) {
- tp = xfs_trans_alloc(mp, XFS_TRANS_SETATTR_NOT_SIZE);
- commit_flags = 0;
- if ((code = xfs_trans_reserve(tp, 0,
- XFS_ICHANGE_LOG_RES(mp), 0,
- 0, 0))) {
- lock_flags = 0;
- goto error_return;
- }
+ tp = xfs_trans_alloc(mp, XFS_TRANS_SETATTR_NOT_SIZE);
+ commit_flags = 0;
+ code = xfs_trans_reserve(tp, 0, XFS_ICHANGE_LOG_RES(mp),
+ 0, 0, 0);
+ if (code) {
+ lock_flags = 0;
+ goto error_return;
}
} else {
if (DM_EVENT_ENABLED(ip, DM_EVENT_TRUNCATE) &&
@@ -295,15 +291,23 @@ xfs_setattr(
* or we are explicitly asked to change it. This handles
* the semantic difference between truncate() and ftruncate()
* as implemented in the VFS.
+ *
+ * The regular truncate() case without ATTR_CTIME and ATTR_MTIME
+ * is a special case where we need to update the times despite
+ * not having these flags set. For all other operations the
+ * VFS set these flags explicitly if it wants a timestamp
+ * update.
*/
- if (iattr->ia_size != ip->i_size || (mask & ATTR_CTIME))
- timeflags |= XFS_ICHGTIME_MOD | XFS_ICHGTIME_CHG;
+ if (iattr->ia_size != ip->i_size &&
+ (!(mask & (ATTR_CTIME | ATTR_MTIME)))) {
+ iattr->ia_ctime = iattr->ia_mtime =
+ current_fs_time(inode->i_sb);
+ mask |= ATTR_CTIME | ATTR_MTIME;
+ }
if (iattr->ia_size > ip->i_size) {
ip->i_d.di_size = iattr->ia_size;
ip->i_size = iattr->ia_size;
- if (!(flags & XFS_ATTR_DMI))
- xfs_ichgtime(ip, XFS_ICHGTIME_CHG);
xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);
} else if (iattr->ia_size <= ip->i_size ||
(iattr->ia_size == 0 && ip->i_d.di_nextents)) {
@@ -374,9 +378,6 @@ xfs_setattr(
ip->i_d.di_gid = gid;
inode->i_gid = gid;
}
-
- xfs_trans_log_inode (tp, ip, XFS_ILOG_CORE);
- timeflags |= XFS_ICHGTIME_CHG;
}
/*
@@ -393,51 +394,37 @@ xfs_setattr(
inode->i_mode &= S_IFMT;
inode->i_mode |= mode & ~S_IFMT;
-
- xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);
- timeflags |= XFS_ICHGTIME_CHG;
}
/*
* Change file access or modified times.
*/
- if (mask & (ATTR_ATIME|ATTR_MTIME)) {
- if (mask & ATTR_ATIME) {
- inode->i_atime = iattr->ia_atime;
- ip->i_d.di_atime.t_sec = iattr->ia_atime.tv_sec;
- ip->i_d.di_atime.t_nsec = iattr->ia_atime.tv_nsec;
- ip->i_update_core = 1;
- }
- if (mask & ATTR_MTIME) {
- inode->i_mtime = iattr->ia_mtime;
- ip->i_d.di_mtime.t_sec = iattr->ia_mtime.tv_sec;
- ip->i_d.di_mtime.t_nsec = iattr->ia_mtime.tv_nsec;
- timeflags &= ~XFS_ICHGTIME_MOD;
- timeflags |= XFS_ICHGTIME_CHG;
- }
- if (tp && (mask & (ATTR_MTIME_SET|ATTR_ATIME_SET)))
- xfs_trans_log_inode (tp, ip, XFS_ILOG_CORE);
+ if (mask & ATTR_ATIME) {
+ inode->i_atime = iattr->ia_atime;
+ ip->i_d.di_atime.t_sec = iattr->ia_atime.tv_sec;
+ ip->i_d.di_atime.t_nsec = iattr->ia_atime.tv_nsec;
+ ip->i_update_core = 1;
}
-
- /*
- * Change file inode change time only if ATTR_CTIME set
- * AND we have been called by a DMI function.
- */
-
- if ((flags & XFS_ATTR_DMI) && (mask & ATTR_CTIME)) {
+ if (mask & ATTR_CTIME) {
inode->i_ctime = iattr->ia_ctime;
ip->i_d.di_ctime.t_sec = iattr->ia_ctime.tv_sec;
ip->i_d.di_ctime.t_nsec = iattr->ia_ctime.tv_nsec;
ip->i_update_core = 1;
- timeflags &= ~XFS_ICHGTIME_CHG;
+ }
+ if (mask & ATTR_MTIME) {
+ inode->i_mtime = iattr->ia_mtime;
+ ip->i_d.di_mtime.t_sec = iattr->ia_mtime.tv_sec;
+ ip->i_d.di_mtime.t_nsec = iattr->ia_mtime.tv_nsec;
+ ip->i_update_core = 1;
}
/*
- * Send out timestamp changes that need to be set to the
- * current time. Not done when called by a DMI function.
+ * And finally, log the inode core if any attribute in it
+ * has been changed.
*/
- if (timeflags && !(flags & XFS_ATTR_DMI))
- xfs_ichgtime(ip, timeflags);
+ if (mask & (ATTR_UID|ATTR_GID|ATTR_MODE|
+ ATTR_ATIME|ATTR_CTIME|ATTR_MTIME))
+ xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);
XFS_STATS_INC(xs_ig_attrchg);
@@ -452,12 +439,10 @@ xfs_setattr(
* mix so this probably isn't worth the trouble to optimize.
*/
code = 0;
- if (tp) {
- if (mp->m_flags & XFS_MOUNT_WSYNC)
- xfs_trans_set_sync(tp);
+ if (mp->m_flags & XFS_MOUNT_WSYNC)
+ xfs_trans_set_sync(tp);
- code = xfs_trans_commit(tp, commit_flags);
- }
+ code = xfs_trans_commit(tp, commit_flags);
xfs_iunlock(ip, lock_flags);