summaryrefslogtreecommitdiff
path: root/fs
diff options
context:
space:
mode:
Diffstat (limited to 'fs')
-rw-r--r--fs/Kconfig1
-rw-r--r--fs/Makefile1
-rw-r--r--fs/afs/cell.c4
-rw-r--r--fs/afs/cmservice.c10
-rw-r--r--fs/afs/dir.c92
-rw-r--r--fs/afs/file.c12
-rw-r--r--fs/afs/vlclient.c11
-rw-r--r--fs/afs/yfsclient.c2
-rw-r--r--fs/block_dev.c64
-rw-r--r--fs/btrfs/ctree.h4
-rw-r--r--fs/btrfs/disk-io.c2
-rw-r--r--fs/btrfs/extent-tree.c71
-rw-r--r--fs/btrfs/extent_io.c35
-rw-r--r--fs/btrfs/tree-log.c16
-rw-r--r--fs/btrfs/volumes.c13
-rw-r--r--fs/ceph/addr.c5
-rw-r--r--fs/ceph/caps.c5
-rw-r--r--fs/ceph/inode.c7
-rw-r--r--fs/ceph/locks.c3
-rw-r--r--fs/ceph/snap.c4
-rw-r--r--fs/ceph/super.h2
-rw-r--r--fs/ceph/xattr.c19
-rw-r--r--fs/cifs/Kconfig2
-rw-r--r--fs/cifs/cifsfs.c1
-rw-r--r--fs/cifs/cifsfs.h2
-rw-r--r--fs/cifs/cifsproto.h1
-rw-r--r--fs/cifs/cifssmb.c197
-rw-r--r--fs/cifs/connect.c30
-rw-r--r--fs/cifs/dir.c5
-rw-r--r--fs/cifs/export.c2
-rw-r--r--fs/cifs/misc.c22
-rw-r--r--fs/cifs/sess.c26
-rw-r--r--fs/cifs/smb2ops.c39
-rw-r--r--fs/cifs/smb2pdu.c7
-rw-r--r--fs/cifs/smbencrypt.c18
-rw-r--r--fs/coda/pioctl.c7
-rw-r--r--fs/compat_ioctl.c3
-rw-r--r--fs/configfs/configfs_internal.h15
-rw-r--r--fs/configfs/dir.c137
-rw-r--r--fs/configfs/file.c280
-rw-r--r--fs/d_path.c6
-rw-r--r--fs/dax.c2
-rw-r--r--fs/devpts/inode.c2
-rw-r--r--fs/erofs/Kconfig91
-rw-r--r--fs/erofs/Makefile11
-rw-r--r--fs/erofs/compress.h60
-rw-r--r--fs/erofs/data.c360
-rw-r--r--fs/erofs/decompressor.c338
-rw-r--r--fs/erofs/dir.c142
-rw-r--r--fs/erofs/erofs_fs.h316
-rw-r--r--fs/erofs/inode.c337
-rw-r--r--fs/erofs/internal.h431
-rw-r--r--fs/erofs/namei.c252
-rw-r--r--fs/erofs/super.c615
-rw-r--r--fs/erofs/tagptr.h110
-rw-r--r--fs/erofs/utils.c333
-rw-r--r--fs/erofs/xattr.c704
-rw-r--r--fs/erofs/xattr.h92
-rw-r--r--fs/erofs/zdata.c1431
-rw-r--r--fs/erofs/zdata.h193
-rw-r--r--fs/erofs/zmap.c471
-rw-r--r--fs/erofs/zpvec.h157
-rw-r--r--fs/eventpoll.c4
-rw-r--r--fs/exportfs/expfs.c2
-rw-r--r--fs/ext4/inode.c3
-rw-r--r--fs/fs-writeback.c174
-rw-r--r--fs/fs_context.c4
-rw-r--r--fs/gfs2/bmap.c164
-rw-r--r--fs/io_uring.c593
-rw-r--r--fs/isofs/export.c2
-rw-r--r--fs/jfs/Kconfig2
-rw-r--r--fs/kernfs/dir.c9
-rw-r--r--fs/locks.c11
-rw-r--r--fs/namei.c64
-rw-r--r--fs/namespace.c21
-rw-r--r--fs/nfs/delegation.c25
-rw-r--r--fs/nfs/delegation.h2
-rw-r--r--fs/nfs/dir.c2
-rw-r--r--fs/nfs/direct.c27
-rw-r--r--fs/nfs/flexfilelayout/flexfilelayout.c28
-rw-r--r--fs/nfs/fscache.c7
-rw-r--r--fs/nfs/fscache.h2
-rw-r--r--fs/nfs/inode.c35
-rw-r--r--fs/nfs/internal.h10
-rw-r--r--fs/nfs/nfs4_fs.h3
-rw-r--r--fs/nfs/nfs4client.c5
-rw-r--r--fs/nfs/nfs4file.c12
-rw-r--r--fs/nfs/nfs4proc.c109
-rw-r--r--fs/nfs/nfs4state.c49
-rw-r--r--fs/nfs/nfstrace.h2
-rw-r--r--fs/nfs/pagelist.c19
-rw-r--r--fs/nfs/pnfs.c7
-rw-r--r--fs/nfs/pnfs_nfs.c15
-rw-r--r--fs/nfs/proc.c7
-rw-r--r--fs/nfs/read.c35
-rw-r--r--fs/nfs/super.c1
-rw-r--r--fs/nfs/write.c38
-rw-r--r--fs/nfsd/nfscache.c2
-rw-r--r--fs/nfsd/nfsctl.c22
-rw-r--r--fs/orangefs/file.c2
-rw-r--r--fs/orangefs/orangefs-kernel.h2
-rw-r--r--fs/proc/root.c3
-rw-r--r--fs/read_write.c49
-rw-r--r--fs/seq_file.c2
-rw-r--r--fs/super.c104
-rw-r--r--fs/timerfd.c6
-rw-r--r--fs/ubifs/budget.c2
-rw-r--r--fs/ubifs/orphan.c2
-rw-r--r--fs/ubifs/super.c4
-rw-r--r--fs/ufs/Kconfig2
-rw-r--r--fs/userfaultfd.c25
-rw-r--r--fs/xfs/libxfs/xfs_bmap.c29
-rw-r--r--fs/xfs/libxfs/xfs_da_btree.c19
-rw-r--r--fs/xfs/libxfs/xfs_dir2_node.c3
-rw-r--r--fs/xfs/xfs_ioctl.c2
-rw-r--r--fs/xfs/xfs_ioctl32.c56
-rw-r--r--fs/xfs/xfs_iops.c1
-rw-r--r--fs/xfs/xfs_log.c5
-rw-r--r--fs/xfs/xfs_pnfs.c2
-rw-r--r--fs/xfs/xfs_reflink.c63
120 files changed, 8307 insertions, 1224 deletions
diff --git a/fs/Kconfig b/fs/Kconfig
index bfb1c6095c7a..669d46550e6d 100644
--- a/fs/Kconfig
+++ b/fs/Kconfig
@@ -261,6 +261,7 @@ source "fs/romfs/Kconfig"
source "fs/pstore/Kconfig"
source "fs/sysv/Kconfig"
source "fs/ufs/Kconfig"
+source "fs/erofs/Kconfig"
endif # MISC_FILESYSTEMS
diff --git a/fs/Makefile b/fs/Makefile
index d60089fd689b..b2e4973a0bea 100644
--- a/fs/Makefile
+++ b/fs/Makefile
@@ -130,3 +130,4 @@ obj-$(CONFIG_F2FS_FS) += f2fs/
obj-$(CONFIG_CEPH_FS) += ceph/
obj-$(CONFIG_PSTORE) += pstore/
obj-$(CONFIG_EFIVAR_FS) += efivarfs/
+obj-$(CONFIG_EROFS_FS) += erofs/
diff --git a/fs/afs/cell.c b/fs/afs/cell.c
index a2a87117d262..fd5133e26a38 100644
--- a/fs/afs/cell.c
+++ b/fs/afs/cell.c
@@ -74,6 +74,7 @@ struct afs_cell *afs_lookup_cell_rcu(struct afs_net *net,
cell = rcu_dereference_raw(net->ws_cell);
if (cell) {
afs_get_cell(cell);
+ ret = 0;
break;
}
ret = -EDESTADDRREQ;
@@ -108,6 +109,9 @@ struct afs_cell *afs_lookup_cell_rcu(struct afs_net *net,
done_seqretry(&net->cells_lock, seq);
+ if (ret != 0 && cell)
+ afs_put_cell(net, cell);
+
return ret == 0 ? cell : ERR_PTR(ret);
}
diff --git a/fs/afs/cmservice.c b/fs/afs/cmservice.c
index 4f1b6f466ff5..b86195e4dc6c 100644
--- a/fs/afs/cmservice.c
+++ b/fs/afs/cmservice.c
@@ -505,18 +505,14 @@ static void SRXAFSCB_ProbeUuid(struct work_struct *work)
struct afs_call *call = container_of(work, struct afs_call, work);
struct afs_uuid *r = call->request;
- struct {
- __be32 match;
- } reply;
-
_enter("");
if (memcmp(r, &call->net->uuid, sizeof(call->net->uuid)) == 0)
- reply.match = htonl(0);
+ afs_send_empty_reply(call);
else
- reply.match = htonl(1);
+ rxrpc_kernel_abort_call(call->net->socket, call->rxcall,
+ 1, 1, "K-1");
- afs_send_simple_reply(call, &reply, sizeof(reply));
afs_put_call(call);
_leave("");
}
diff --git a/fs/afs/dir.c b/fs/afs/dir.c
index e640d67274be..139b4e3cc946 100644
--- a/fs/afs/dir.c
+++ b/fs/afs/dir.c
@@ -440,7 +440,7 @@ static int afs_dir_iterate_block(struct afs_vnode *dvnode,
* iterate through the data blob that lists the contents of an AFS directory
*/
static int afs_dir_iterate(struct inode *dir, struct dir_context *ctx,
- struct key *key)
+ struct key *key, afs_dataversion_t *_dir_version)
{
struct afs_vnode *dvnode = AFS_FS_I(dir);
struct afs_xdr_dir_page *dbuf;
@@ -460,6 +460,7 @@ static int afs_dir_iterate(struct inode *dir, struct dir_context *ctx,
req = afs_read_dir(dvnode, key);
if (IS_ERR(req))
return PTR_ERR(req);
+ *_dir_version = req->data_version;
/* round the file position up to the next entry boundary */
ctx->pos += sizeof(union afs_xdr_dirent) - 1;
@@ -514,7 +515,10 @@ out:
*/
static int afs_readdir(struct file *file, struct dir_context *ctx)
{
- return afs_dir_iterate(file_inode(file), ctx, afs_file_key(file));
+ afs_dataversion_t dir_version;
+
+ return afs_dir_iterate(file_inode(file), ctx, afs_file_key(file),
+ &dir_version);
}
/*
@@ -555,7 +559,8 @@ static int afs_lookup_one_filldir(struct dir_context *ctx, const char *name,
* - just returns the FID the dentry name maps to if found
*/
static int afs_do_lookup_one(struct inode *dir, struct dentry *dentry,
- struct afs_fid *fid, struct key *key)
+ struct afs_fid *fid, struct key *key,
+ afs_dataversion_t *_dir_version)
{
struct afs_super_info *as = dir->i_sb->s_fs_info;
struct afs_lookup_one_cookie cookie = {
@@ -568,7 +573,7 @@ static int afs_do_lookup_one(struct inode *dir, struct dentry *dentry,
_enter("{%lu},%p{%pd},", dir->i_ino, dentry, dentry);
/* search the directory */
- ret = afs_dir_iterate(dir, &cookie.ctx, key);
+ ret = afs_dir_iterate(dir, &cookie.ctx, key, _dir_version);
if (ret < 0) {
_leave(" = %d [iter]", ret);
return ret;
@@ -642,6 +647,7 @@ static struct inode *afs_do_lookup(struct inode *dir, struct dentry *dentry,
struct afs_server *server;
struct afs_vnode *dvnode = AFS_FS_I(dir), *vnode;
struct inode *inode = NULL, *ti;
+ afs_dataversion_t data_version = READ_ONCE(dvnode->status.data_version);
int ret, i;
_enter("{%lu},%p{%pd},", dir->i_ino, dentry, dentry);
@@ -669,12 +675,14 @@ static struct inode *afs_do_lookup(struct inode *dir, struct dentry *dentry,
cookie->fids[i].vid = as->volume->vid;
/* search the directory */
- ret = afs_dir_iterate(dir, &cookie->ctx, key);
+ ret = afs_dir_iterate(dir, &cookie->ctx, key, &data_version);
if (ret < 0) {
inode = ERR_PTR(ret);
goto out;
}
+ dentry->d_fsdata = (void *)(unsigned long)data_version;
+
inode = ERR_PTR(-ENOENT);
if (!cookie->found)
goto out;
@@ -951,7 +959,8 @@ static struct dentry *afs_lookup(struct inode *dir, struct dentry *dentry,
inode ? AFS_FS_I(inode) : NULL);
} else {
trace_afs_lookup(dvnode, &dentry->d_name,
- inode ? AFS_FS_I(inode) : NULL);
+ IS_ERR_OR_NULL(inode) ? NULL
+ : AFS_FS_I(inode));
}
return d;
}
@@ -968,7 +977,8 @@ static int afs_d_revalidate(struct dentry *dentry, unsigned int flags)
struct dentry *parent;
struct inode *inode;
struct key *key;
- long dir_version, de_version;
+ afs_dataversion_t dir_version;
+ long de_version;
int ret;
if (flags & LOOKUP_RCU)
@@ -1014,20 +1024,20 @@ static int afs_d_revalidate(struct dentry *dentry, unsigned int flags)
* on a 32-bit system, we only have 32 bits in the dentry to store the
* version.
*/
- dir_version = (long)dir->status.data_version;
+ dir_version = dir->status.data_version;
de_version = (long)dentry->d_fsdata;
- if (de_version == dir_version)
- goto out_valid;
+ if (de_version == (long)dir_version)
+ goto out_valid_noupdate;
- dir_version = (long)dir->invalid_before;
- if (de_version - dir_version >= 0)
+ dir_version = dir->invalid_before;
+ if (de_version - (long)dir_version >= 0)
goto out_valid;
_debug("dir modified");
afs_stat_v(dir, n_reval);
/* search the directory for this vnode */
- ret = afs_do_lookup_one(&dir->vfs_inode, dentry, &fid, key);
+ ret = afs_do_lookup_one(&dir->vfs_inode, dentry, &fid, key, &dir_version);
switch (ret) {
case 0:
/* the filename maps to something */
@@ -1080,7 +1090,8 @@ static int afs_d_revalidate(struct dentry *dentry, unsigned int flags)
}
out_valid:
- dentry->d_fsdata = (void *)dir_version;
+ dentry->d_fsdata = (void *)(unsigned long)dir_version;
+out_valid_noupdate:
dput(parent);
key_put(key);
_leave(" = 1 [valid]");
@@ -1186,6 +1197,20 @@ static void afs_prep_for_new_inode(struct afs_fs_cursor *fc,
}
/*
+ * Note that a dentry got changed. We need to set d_fsdata to the data version
+ * number derived from the result of the operation. It doesn't matter if
+ * d_fsdata goes backwards as we'll just revalidate.
+ */
+static void afs_update_dentry_version(struct afs_fs_cursor *fc,
+ struct dentry *dentry,
+ struct afs_status_cb *scb)
+{
+ if (fc->ac.error == 0)
+ dentry->d_fsdata =
+ (void *)(unsigned long)scb->status.data_version;
+}
+
+/*
* create a directory on an AFS filesystem
*/
static int afs_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode)
@@ -1227,6 +1252,7 @@ static int afs_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode)
afs_check_for_remote_deletion(&fc, dvnode);
afs_vnode_commit_status(&fc, dvnode, fc.cb_break,
&data_version, &scb[0]);
+ afs_update_dentry_version(&fc, dentry, &scb[0]);
afs_vnode_new_inode(&fc, dentry, &iget_data, &scb[1]);
ret = afs_end_vnode_operation(&fc);
if (ret < 0)
@@ -1319,6 +1345,7 @@ static int afs_rmdir(struct inode *dir, struct dentry *dentry)
afs_vnode_commit_status(&fc, dvnode, fc.cb_break,
&data_version, scb);
+ afs_update_dentry_version(&fc, dentry, scb);
ret = afs_end_vnode_operation(&fc);
if (ret == 0) {
afs_dir_remove_subdir(dentry);
@@ -1458,6 +1485,7 @@ static int afs_unlink(struct inode *dir, struct dentry *dentry)
&data_version, &scb[0]);
afs_vnode_commit_status(&fc, vnode, fc.cb_break_2,
&data_version_2, &scb[1]);
+ afs_update_dentry_version(&fc, dentry, &scb[0]);
ret = afs_end_vnode_operation(&fc);
if (ret == 0 && !(scb[1].have_status || scb[1].have_error))
ret = afs_dir_remove_link(dvnode, dentry, key);
@@ -1526,6 +1554,7 @@ static int afs_create(struct inode *dir, struct dentry *dentry, umode_t mode,
afs_check_for_remote_deletion(&fc, dvnode);
afs_vnode_commit_status(&fc, dvnode, fc.cb_break,
&data_version, &scb[0]);
+ afs_update_dentry_version(&fc, dentry, &scb[0]);
afs_vnode_new_inode(&fc, dentry, &iget_data, &scb[1]);
ret = afs_end_vnode_operation(&fc);
if (ret < 0)
@@ -1607,6 +1636,7 @@ static int afs_link(struct dentry *from, struct inode *dir,
afs_vnode_commit_status(&fc, vnode, fc.cb_break_2,
NULL, &scb[1]);
ihold(&vnode->vfs_inode);
+ afs_update_dentry_version(&fc, dentry, &scb[0]);
d_instantiate(dentry, &vnode->vfs_inode);
mutex_unlock(&vnode->io_lock);
@@ -1686,6 +1716,7 @@ static int afs_symlink(struct inode *dir, struct dentry *dentry,
afs_check_for_remote_deletion(&fc, dvnode);
afs_vnode_commit_status(&fc, dvnode, fc.cb_break,
&data_version, &scb[0]);
+ afs_update_dentry_version(&fc, dentry, &scb[0]);
afs_vnode_new_inode(&fc, dentry, &iget_data, &scb[1]);
ret = afs_end_vnode_operation(&fc);
if (ret < 0)
@@ -1791,6 +1822,17 @@ static int afs_rename(struct inode *old_dir, struct dentry *old_dentry,
}
}
+ /* This bit is potentially nasty as there's a potential race with
+ * afs_d_revalidate{,_rcu}(). We have to change d_fsdata on the dentry
+ * to reflect it's new parent's new data_version after the op, but
+ * d_revalidate may see old_dentry between the op having taken place
+ * and the version being updated.
+ *
+ * So drop the old_dentry for now to make other threads go through
+ * lookup instead - which we hold a lock against.
+ */
+ d_drop(old_dentry);
+
ret = -ERESTARTSYS;
if (afs_begin_vnode_operation(&fc, orig_dvnode, key, true)) {
afs_dataversion_t orig_data_version;
@@ -1802,9 +1844,9 @@ static int afs_rename(struct inode *old_dir, struct dentry *old_dentry,
if (orig_dvnode != new_dvnode) {
if (mutex_lock_interruptible_nested(&new_dvnode->io_lock, 1) < 0) {
afs_end_vnode_operation(&fc);
- goto error_rehash;
+ goto error_rehash_old;
}
- new_data_version = new_dvnode->status.data_version;
+ new_data_version = new_dvnode->status.data_version + 1;
} else {
new_data_version = orig_data_version;
new_scb = &scb[0];
@@ -1827,7 +1869,7 @@ static int afs_rename(struct inode *old_dir, struct dentry *old_dentry,
}
ret = afs_end_vnode_operation(&fc);
if (ret < 0)
- goto error_rehash;
+ goto error_rehash_old;
}
if (ret == 0) {
@@ -1853,10 +1895,26 @@ static int afs_rename(struct inode *old_dir, struct dentry *old_dentry,
drop_nlink(new_inode);
spin_unlock(&new_inode->i_lock);
}
+
+ /* Now we can update d_fsdata on the dentries to reflect their
+ * new parent's data_version.
+ *
+ * Note that if we ever implement RENAME_EXCHANGE, we'll have
+ * to update both dentries with opposing dir versions.
+ */
+ if (new_dvnode != orig_dvnode) {
+ afs_update_dentry_version(&fc, old_dentry, &scb[1]);
+ afs_update_dentry_version(&fc, new_dentry, &scb[1]);
+ } else {
+ afs_update_dentry_version(&fc, old_dentry, &scb[0]);
+ afs_update_dentry_version(&fc, new_dentry, &scb[0]);
+ }
d_move(old_dentry, new_dentry);
goto error_tmp;
}
+error_rehash_old:
+ d_rehash(new_dentry);
error_rehash:
if (rehash)
d_rehash(rehash);
diff --git a/fs/afs/file.c b/fs/afs/file.c
index 56b69576274d..dd3c55c9101c 100644
--- a/fs/afs/file.c
+++ b/fs/afs/file.c
@@ -191,11 +191,13 @@ void afs_put_read(struct afs_read *req)
int i;
if (refcount_dec_and_test(&req->usage)) {
- for (i = 0; i < req->nr_pages; i++)
- if (req->pages[i])
- put_page(req->pages[i]);
- if (req->pages != req->array)
- kfree(req->pages);
+ if (req->pages) {
+ for (i = 0; i < req->nr_pages; i++)
+ if (req->pages[i])
+ put_page(req->pages[i]);
+ if (req->pages != req->array)
+ kfree(req->pages);
+ }
kfree(req);
}
}
diff --git a/fs/afs/vlclient.c b/fs/afs/vlclient.c
index d7e0fd3c00df..cfb0ac4bd039 100644
--- a/fs/afs/vlclient.c
+++ b/fs/afs/vlclient.c
@@ -56,23 +56,24 @@ static int afs_deliver_vl_get_entry_by_name_u(struct afs_call *call)
struct afs_uuid__xdr *xdr;
struct afs_uuid *uuid;
int j;
+ int n = entry->nr_servers;
tmp = ntohl(uvldb->serverFlags[i]);
if (tmp & AFS_VLSF_DONTUSE ||
(new_only && !(tmp & AFS_VLSF_NEWREPSITE)))
continue;
if (tmp & AFS_VLSF_RWVOL) {
- entry->fs_mask[i] |= AFS_VOL_VTM_RW;
+ entry->fs_mask[n] |= AFS_VOL_VTM_RW;
if (vlflags & AFS_VLF_BACKEXISTS)
- entry->fs_mask[i] |= AFS_VOL_VTM_BAK;
+ entry->fs_mask[n] |= AFS_VOL_VTM_BAK;
}
if (tmp & AFS_VLSF_ROVOL)
- entry->fs_mask[i] |= AFS_VOL_VTM_RO;
- if (!entry->fs_mask[i])
+ entry->fs_mask[n] |= AFS_VOL_VTM_RO;
+ if (!entry->fs_mask[n])
continue;
xdr = &uvldb->serverNumber[i];
- uuid = (struct afs_uuid *)&entry->fs_server[i];
+ uuid = (struct afs_uuid *)&entry->fs_server[n];
uuid->time_low = xdr->time_low;
uuid->time_mid = htons(ntohl(xdr->time_mid));
uuid->time_hi_and_version = htons(ntohl(xdr->time_hi_and_version));
diff --git a/fs/afs/yfsclient.c b/fs/afs/yfsclient.c
index 2575503170fc..ca2452806ebf 100644
--- a/fs/afs/yfsclient.c
+++ b/fs/afs/yfsclient.c
@@ -2171,7 +2171,7 @@ int yfs_fs_store_opaque_acl2(struct afs_fs_cursor *fc, const struct afs_acl *acl
key_serial(fc->key), vnode->fid.vid, vnode->fid.vnode);
size = round_up(acl->size, 4);
- call = afs_alloc_flat_call(net, &yfs_RXYFSStoreStatus,
+ call = afs_alloc_flat_call(net, &yfs_RXYFSStoreOpaqueACL2,
sizeof(__be32) * 2 +
sizeof(struct yfs_xdr_YFSFid) +
sizeof(__be32) + size,
diff --git a/fs/block_dev.c b/fs/block_dev.c
index a6f7c892cb4a..677cb364d33f 100644
--- a/fs/block_dev.c
+++ b/fs/block_dev.c
@@ -345,24 +345,15 @@ __blkdev_direct_IO(struct kiocb *iocb, struct iov_iter *iter, int nr_pages)
struct bio *bio;
bool is_poll = (iocb->ki_flags & IOCB_HIPRI) != 0;
bool is_read = (iov_iter_rw(iter) == READ), is_sync;
- bool nowait = (iocb->ki_flags & IOCB_NOWAIT) != 0;
loff_t pos = iocb->ki_pos;
blk_qc_t qc = BLK_QC_T_NONE;
- gfp_t gfp;
- ssize_t ret;
+ int ret = 0;
if ((pos | iov_iter_alignment(iter)) &
(bdev_logical_block_size(bdev) - 1))
return -EINVAL;
- if (nowait)
- gfp = GFP_NOWAIT;
- else
- gfp = GFP_KERNEL;
-
- bio = bio_alloc_bioset(gfp, nr_pages, &blkdev_dio_pool);
- if (!bio)
- return -EAGAIN;
+ bio = bio_alloc_bioset(GFP_KERNEL, nr_pages, &blkdev_dio_pool);
dio = container_of(bio, struct blkdev_dio, bio);
dio->is_sync = is_sync = is_sync_kiocb(iocb);
@@ -384,10 +375,7 @@ __blkdev_direct_IO(struct kiocb *iocb, struct iov_iter *iter, int nr_pages)
if (!is_poll)
blk_start_plug(&plug);
- ret = 0;
for (;;) {
- int err;
-
bio_set_dev(bio, bdev);
bio->bi_iter.bi_sector = pos >> 9;
bio->bi_write_hint = iocb->ki_hint;
@@ -395,10 +383,8 @@ __blkdev_direct_IO(struct kiocb *iocb, struct iov_iter *iter, int nr_pages)
bio->bi_end_io = blkdev_bio_end_io;
bio->bi_ioprio = iocb->ki_ioprio;
- err = bio_iov_iter_get_pages(bio, iter);
- if (unlikely(err)) {
- if (!ret)
- ret = err;
+ ret = bio_iov_iter_get_pages(bio, iter);
+ if (unlikely(ret)) {
bio->bi_status = BLK_STS_IOERR;
bio_endio(bio);
break;
@@ -413,14 +399,6 @@ __blkdev_direct_IO(struct kiocb *iocb, struct iov_iter *iter, int nr_pages)
task_io_account_write(bio->bi_iter.bi_size);
}
- /*
- * Tell underlying layer to not block for resource shortage.
- * And if we would have blocked, return error inline instead
- * of through the bio->bi_end_io() callback.
- */
- if (nowait)
- bio->bi_opf |= (REQ_NOWAIT | REQ_NOWAIT_INLINE);
-
dio->size += bio->bi_iter.bi_size;
pos += bio->bi_iter.bi_size;
@@ -434,12 +412,6 @@ __blkdev_direct_IO(struct kiocb *iocb, struct iov_iter *iter, int nr_pages)
}
qc = submit_bio(bio);
- if (qc == BLK_QC_T_EAGAIN) {
- if (!ret)
- ret = -EAGAIN;
- goto error;
- }
- ret = dio->size;
if (polled)
WRITE_ONCE(iocb->ki_cookie, qc);
@@ -460,20 +432,8 @@ __blkdev_direct_IO(struct kiocb *iocb, struct iov_iter *iter, int nr_pages)
atomic_inc(&dio->ref);
}
- qc = submit_bio(bio);
- if (qc == BLK_QC_T_EAGAIN) {
- if (!ret)
- ret = -EAGAIN;
- goto error;
- }
- ret = dio->size;
-
- bio = bio_alloc(gfp, nr_pages);
- if (!bio) {
- if (!ret)
- ret = -EAGAIN;
- goto error;
- }
+ submit_bio(bio);
+ bio = bio_alloc(GFP_KERNEL, nr_pages);
}
if (!is_poll)
@@ -493,16 +453,13 @@ __blkdev_direct_IO(struct kiocb *iocb, struct iov_iter *iter, int nr_pages)
}
__set_current_state(TASK_RUNNING);
-out:
if (!ret)
ret = blk_status_to_errno(dio->bio.bi_status);
+ if (likely(!ret))
+ ret = dio->size;
bio_put(&dio->bio);
return ret;
-error:
- if (!is_poll)
- blk_finish_plug(&plug);
- goto out;
}
static ssize_t
@@ -1754,7 +1711,10 @@ int blkdev_get(struct block_device *bdev, fmode_t mode, void *holder)
/* finish claiming */
mutex_lock(&bdev->bd_mutex);
- bd_finish_claiming(bdev, whole, holder);
+ if (!res)
+ bd_finish_claiming(bdev, whole, holder);
+ else
+ bd_abort_claiming(bdev, whole, holder);
/*
* Block event polling for write claims if requested. Any
* write holder makes the write_holder state stick until
diff --git a/fs/btrfs/ctree.h b/fs/btrfs/ctree.h
index 299e11e6c554..94660063a162 100644
--- a/fs/btrfs/ctree.h
+++ b/fs/btrfs/ctree.h
@@ -401,7 +401,6 @@ struct btrfs_dev_replace {
struct raid_kobject {
u64 flags;
struct kobject kobj;
- struct list_head list;
};
/*
@@ -915,8 +914,6 @@ struct btrfs_fs_info {
u32 thread_pool_size;
struct kobject *space_info_kobj;
- struct list_head pending_raid_kobjs;
- spinlock_t pending_raid_kobjs_lock; /* uncontended */
u64 total_pinned;
@@ -2698,7 +2695,6 @@ int btrfs_can_relocate(struct btrfs_fs_info *fs_info, u64 bytenr);
int btrfs_make_block_group(struct btrfs_trans_handle *trans,
u64 bytes_used, u64 type, u64 chunk_offset,
u64 size);
-void btrfs_add_raid_kobjects(struct btrfs_fs_info *fs_info);
struct btrfs_trans_handle *btrfs_start_trans_remove_block_group(
struct btrfs_fs_info *fs_info,
const u64 chunk_offset);
diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c
index 5f7ee70b3d1a..97beb351a10c 100644
--- a/fs/btrfs/disk-io.c
+++ b/fs/btrfs/disk-io.c
@@ -2683,8 +2683,6 @@ int open_ctree(struct super_block *sb,
INIT_LIST_HEAD(&fs_info->delayed_iputs);
INIT_LIST_HEAD(&fs_info->delalloc_roots);
INIT_LIST_HEAD(&fs_info->caching_block_groups);
- INIT_LIST_HEAD(&fs_info->pending_raid_kobjs);
- spin_lock_init(&fs_info->pending_raid_kobjs_lock);
spin_lock_init(&fs_info->delalloc_root_lock);
spin_lock_init(&fs_info->trans_lock);
spin_lock_init(&fs_info->fs_roots_radix_lock);
diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c
index d3b58e388535..8b7eb22d508a 100644
--- a/fs/btrfs/extent-tree.c
+++ b/fs/btrfs/extent-tree.c
@@ -4,6 +4,7 @@
*/
#include <linux/sched.h>
+#include <linux/sched/mm.h>
#include <linux/sched/signal.h>
#include <linux/pagemap.h>
#include <linux/writeback.h>
@@ -7888,33 +7889,6 @@ int btrfs_free_block_groups(struct btrfs_fs_info *info)
return 0;
}
-/* link_block_group will queue up kobjects to add when we're reclaim-safe */
-void btrfs_add_raid_kobjects(struct btrfs_fs_info *fs_info)
-{
- struct btrfs_space_info *space_info;
- struct raid_kobject *rkobj;
- LIST_HEAD(list);
- int ret = 0;
-
- spin_lock(&fs_info->pending_raid_kobjs_lock);
- list_splice_init(&fs_info->pending_raid_kobjs, &list);
- spin_unlock(&fs_info->pending_raid_kobjs_lock);
-
- list_for_each_entry(rkobj, &list, list) {
- space_info = btrfs_find_space_info(fs_info, rkobj->flags);
-
- ret = kobject_add(&rkobj->kobj, &space_info->kobj,
- "%s", btrfs_bg_type_to_raid_name(rkobj->flags));
- if (ret) {
- kobject_put(&rkobj->kobj);
- break;
- }
- }
- if (ret)
- btrfs_warn(fs_info,
- "failed to add kobject for block cache, ignoring");
-}
-
static void link_block_group(struct btrfs_block_group_cache *cache)
{
struct btrfs_space_info *space_info = cache->space_info;
@@ -7929,18 +7903,36 @@ static void link_block_group(struct btrfs_block_group_cache *cache)
up_write(&space_info->groups_sem);
if (first) {
- struct raid_kobject *rkobj = kzalloc(sizeof(*rkobj), GFP_NOFS);
+ struct raid_kobject *rkobj;
+ unsigned int nofs_flag;
+ int ret;
+
+ /*
+ * Setup a NOFS context because kobject_add(), deep in its call
+ * chain, does GFP_KERNEL allocations, and we are often called
+ * in a context where if reclaim is triggered we can deadlock
+ * (we are either holding a transaction handle or some lock
+ * required for a transaction commit).
+ */
+ nofs_flag = memalloc_nofs_save();
+ rkobj = kzalloc(sizeof(*rkobj), GFP_KERNEL);
if (!rkobj) {
+ memalloc_nofs_restore(nofs_flag);
btrfs_warn(cache->fs_info,
"couldn't alloc memory for raid level kobject");
return;
}
rkobj->flags = cache->flags;
kobject_init(&rkobj->kobj, &btrfs_raid_ktype);
-
- spin_lock(&fs_info->pending_raid_kobjs_lock);
- list_add_tail(&rkobj->list, &fs_info->pending_raid_kobjs);
- spin_unlock(&fs_info->pending_raid_kobjs_lock);
+ ret = kobject_add(&rkobj->kobj, &space_info->kobj, "%s",
+ btrfs_bg_type_to_raid_name(rkobj->flags));
+ memalloc_nofs_restore(nofs_flag);
+ if (ret) {
+ kobject_put(&rkobj->kobj);
+ btrfs_warn(fs_info,
+ "failed to add kobject for block cache, ignoring");
+ return;
+ }
space_info->block_group_kobjs[index] = &rkobj->kobj;
}
}
@@ -8206,7 +8198,6 @@ int btrfs_read_block_groups(struct btrfs_fs_info *info)
inc_block_group_ro(cache, 1);
}
- btrfs_add_raid_kobjects(info);
btrfs_init_global_block_rsv(info);
ret = check_chunk_block_group_mappings(info);
error:
@@ -8975,6 +8966,7 @@ int btrfs_trim_fs(struct btrfs_fs_info *fs_info, struct fstrim_range *range)
struct btrfs_device *device;
struct list_head *devices;
u64 group_trimmed;
+ u64 range_end = U64_MAX;
u64 start;
u64 end;
u64 trimmed = 0;
@@ -8984,16 +8976,23 @@ int btrfs_trim_fs(struct btrfs_fs_info *fs_info, struct fstrim_range *range)
int dev_ret = 0;
int ret = 0;
+ /*
+ * Check range overflow if range->len is set.
+ * The default range->len is U64_MAX.
+ */
+ if (range->len != U64_MAX &&
+ check_add_overflow(range->start, range->len, &range_end))
+ return -EINVAL;
+
cache = btrfs_lookup_first_block_group(fs_info, range->start);
for (; cache; cache = next_block_group(cache)) {
- if (cache->key.objectid >= (range->start + range->len)) {
+ if (cache->key.objectid >= range_end) {
btrfs_put_block_group(cache);
break;
}
start = max(range->start, cache->key.objectid);
- end = min(range->start + range->len,
- cache->key.objectid + cache->key.offset);
+ end = min(range_end, cache->key.objectid + cache->key.offset);
if (end - start >= range->minlen) {
if (!block_group_cache_done(cache)) {
diff --git a/fs/btrfs/extent_io.c b/fs/btrfs/extent_io.c
index 1ff438fd5bc2..eeb75281894e 100644
--- a/fs/btrfs/extent_io.c
+++ b/fs/btrfs/extent_io.c
@@ -3628,6 +3628,13 @@ void wait_on_extent_buffer_writeback(struct extent_buffer *eb)
TASK_UNINTERRUPTIBLE);
}
+static void end_extent_buffer_writeback(struct extent_buffer *eb)
+{
+ clear_bit(EXTENT_BUFFER_WRITEBACK, &eb->bflags);
+ smp_mb__after_atomic();
+ wake_up_bit(&eb->bflags, EXTENT_BUFFER_WRITEBACK);
+}
+
/*
* Lock eb pages and flush the bio if we can't the locks
*
@@ -3699,8 +3706,11 @@ static noinline_for_stack int lock_extent_buffer_for_io(struct extent_buffer *eb
if (!trylock_page(p)) {
if (!flush) {
- ret = flush_write_bio(epd);
- if (ret < 0) {
+ int err;
+
+ err = flush_write_bio(epd);
+ if (err < 0) {
+ ret = err;
failed_page_nr = i;
goto err_unlock;
}
@@ -3715,16 +3725,23 @@ err_unlock:
/* Unlock already locked pages */
for (i = 0; i < failed_page_nr; i++)
unlock_page(eb->pages[i]);
+ /*
+ * Clear EXTENT_BUFFER_WRITEBACK and wake up anyone waiting on it.
+ * Also set back EXTENT_BUFFER_DIRTY so future attempts to this eb can
+ * be made and undo everything done before.
+ */
+ btrfs_tree_lock(eb);
+ spin_lock(&eb->refs_lock);
+ set_bit(EXTENT_BUFFER_DIRTY, &eb->bflags);
+ end_extent_buffer_writeback(eb);
+ spin_unlock(&eb->refs_lock);
+ percpu_counter_add_batch(&fs_info->dirty_metadata_bytes, eb->len,
+ fs_info->dirty_metadata_batch);
+ btrfs_clear_header_flag(eb, BTRFS_HEADER_FLAG_WRITTEN);
+ btrfs_tree_unlock(eb);
return ret;
}
-static void end_extent_buffer_writeback(struct extent_buffer *eb)
-{
- clear_bit(EXTENT_BUFFER_WRITEBACK, &eb->bflags);
- smp_mb__after_atomic();
- wake_up_bit(&eb->bflags, EXTENT_BUFFER_WRITEBACK);
-}
-
static void set_btree_ioerr(struct page *page)
{
struct extent_buffer *eb = (struct extent_buffer *)page->private;
diff --git a/fs/btrfs/tree-log.c b/fs/btrfs/tree-log.c
index 6c8297bcfeb7..1bfd7e34f31e 100644
--- a/fs/btrfs/tree-log.c
+++ b/fs/btrfs/tree-log.c
@@ -4985,7 +4985,7 @@ static int log_conflicting_inodes(struct btrfs_trans_handle *trans,
BTRFS_I(inode),
LOG_OTHER_INODE_ALL,
0, LLONG_MAX, ctx);
- iput(inode);
+ btrfs_add_delayed_iput(inode);
}
}
continue;
@@ -5000,7 +5000,7 @@ static int log_conflicting_inodes(struct btrfs_trans_handle *trans,
ret = btrfs_log_inode(trans, root, BTRFS_I(inode),
LOG_OTHER_INODE, 0, LLONG_MAX, ctx);
if (ret) {
- iput(inode);
+ btrfs_add_delayed_iput(inode);
continue;
}
@@ -5009,7 +5009,7 @@ static int log_conflicting_inodes(struct btrfs_trans_handle *trans,
key.offset = 0;
ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
if (ret < 0) {
- iput(inode);
+ btrfs_add_delayed_iput(inode);
continue;
}
@@ -5056,7 +5056,7 @@ static int log_conflicting_inodes(struct btrfs_trans_handle *trans,
}
path->slots[0]++;
}
- iput(inode);
+ btrfs_add_delayed_iput(inode);
}
return ret;
@@ -5689,7 +5689,7 @@ process_leaf:
}
if (btrfs_inode_in_log(BTRFS_I(di_inode), trans->transid)) {
- iput(di_inode);
+ btrfs_add_delayed_iput(di_inode);
break;
}
@@ -5701,7 +5701,7 @@ process_leaf:
if (!ret &&
btrfs_must_commit_transaction(trans, BTRFS_I(di_inode)))
ret = 1;
- iput(di_inode);
+ btrfs_add_delayed_iput(di_inode);
if (ret)
goto next_dir_inode;
if (ctx->log_new_dentries) {
@@ -5848,7 +5848,7 @@ static int btrfs_log_all_parents(struct btrfs_trans_handle *trans,
if (!ret && ctx && ctx->log_new_dentries)
ret = log_new_dir_dentries(trans, root,
BTRFS_I(dir_inode), ctx);
- iput(dir_inode);
+ btrfs_add_delayed_iput(dir_inode);
if (ret)
goto out;
}
@@ -5891,7 +5891,7 @@ static int log_new_ancestors(struct btrfs_trans_handle *trans,
ret = btrfs_log_inode(trans, root, BTRFS_I(inode),
LOG_INODE_EXISTS,
0, LLONG_MAX, ctx);
- iput(inode);
+ btrfs_add_delayed_iput(inode);
if (ret)
return ret;
diff --git a/fs/btrfs/volumes.c b/fs/btrfs/volumes.c
index d74b74ca07af..a447d3ec48d5 100644
--- a/fs/btrfs/volumes.c
+++ b/fs/btrfs/volumes.c
@@ -3087,16 +3087,6 @@ static int btrfs_relocate_chunk(struct btrfs_fs_info *fs_info, u64 chunk_offset)
if (ret)
return ret;
- /*
- * We add the kobjects here (and after forcing data chunk creation)
- * since relocation is the only place we'll create chunks of a new
- * type at runtime. The only place where we'll remove the last
- * chunk of a type is the call immediately below this one. Even
- * so, we're protected against races with the cleaner thread since
- * we're covered by the delete_unused_bgs_mutex.
- */
- btrfs_add_raid_kobjects(fs_info);
-
trans = btrfs_start_trans_remove_block_group(root->fs_info,
chunk_offset);
if (IS_ERR(trans)) {
@@ -3223,9 +3213,6 @@ static int btrfs_may_alloc_data_chunk(struct btrfs_fs_info *fs_info,
btrfs_end_transaction(trans);
if (ret < 0)
return ret;
-
- btrfs_add_raid_kobjects(fs_info);
-
return 1;
}
}
diff --git a/fs/ceph/addr.c b/fs/ceph/addr.c
index e078cc55b989..b3c8b886bf64 100644
--- a/fs/ceph/addr.c
+++ b/fs/ceph/addr.c
@@ -913,8 +913,9 @@ get_more_pages:
if (page_offset(page) >= ceph_wbc.i_size) {
dout("%p page eof %llu\n",
page, ceph_wbc.i_size);
- if (ceph_wbc.size_stable ||
- page_offset(page) >= i_size_read(inode))
+ if ((ceph_wbc.size_stable ||
+ page_offset(page) >= i_size_read(inode)) &&
+ clear_page_dirty_for_io(page))
mapping->a_ops->invalidatepage(page,
0, PAGE_SIZE);
unlock_page(page);
diff --git a/fs/ceph/caps.c b/fs/ceph/caps.c
index d98dcd976c80..ce0f5658720a 100644
--- a/fs/ceph/caps.c
+++ b/fs/ceph/caps.c
@@ -1301,6 +1301,7 @@ static int __send_cap(struct ceph_mds_client *mdsc, struct ceph_cap *cap,
{
struct ceph_inode_info *ci = cap->ci;
struct inode *inode = &ci->vfs_inode;
+ struct ceph_buffer *old_blob = NULL;
struct cap_msg_args arg;
int held, revoking;
int wake = 0;
@@ -1365,7 +1366,7 @@ static int __send_cap(struct ceph_mds_client *mdsc, struct ceph_cap *cap,
ci->i_requested_max_size = arg.max_size;
if (flushing & CEPH_CAP_XATTR_EXCL) {
- __ceph_build_xattrs_blob(ci);
+ old_blob = __ceph_build_xattrs_blob(ci);
arg.xattr_version = ci->i_xattrs.version;
arg.xattr_buf = ci->i_xattrs.blob;
} else {
@@ -1409,6 +1410,8 @@ static int __send_cap(struct ceph_mds_client *mdsc, struct ceph_cap *cap,
spin_unlock(&ci->i_ceph_lock);
+ ceph_buffer_put(old_blob);
+
ret = send_cap_msg(&arg);
if (ret < 0) {
dout("error sending cap msg, must requeue %p\n", inode);
diff --git a/fs/ceph/inode.c b/fs/ceph/inode.c
index 791f84a13bb8..18500edefc56 100644
--- a/fs/ceph/inode.c
+++ b/fs/ceph/inode.c
@@ -736,6 +736,7 @@ static int fill_inode(struct inode *inode, struct page *locked_page,
int issued, new_issued, info_caps;
struct timespec64 mtime, atime, ctime;
struct ceph_buffer *xattr_blob = NULL;
+ struct ceph_buffer *old_blob = NULL;
struct ceph_string *pool_ns = NULL;
struct ceph_cap *new_cap = NULL;
int err = 0;
@@ -881,7 +882,7 @@ static int fill_inode(struct inode *inode, struct page *locked_page,
if ((ci->i_xattrs.version == 0 || !(issued & CEPH_CAP_XATTR_EXCL)) &&
le64_to_cpu(info->xattr_version) > ci->i_xattrs.version) {
if (ci->i_xattrs.blob)
- ceph_buffer_put(ci->i_xattrs.blob);
+ old_blob = ci->i_xattrs.blob;
ci->i_xattrs.blob = xattr_blob;
if (xattr_blob)
memcpy(ci->i_xattrs.blob->vec.iov_base,
@@ -1022,8 +1023,8 @@ static int fill_inode(struct inode *inode, struct page *locked_page,
out:
if (new_cap)
ceph_put_cap(mdsc, new_cap);
- if (xattr_blob)
- ceph_buffer_put(xattr_blob);
+ ceph_buffer_put(old_blob);
+ ceph_buffer_put(xattr_blob);
ceph_put_string(pool_ns);
return err;
}
diff --git a/fs/ceph/locks.c b/fs/ceph/locks.c
index ac9b53b89365..5083e238ad15 100644
--- a/fs/ceph/locks.c
+++ b/fs/ceph/locks.c
@@ -111,8 +111,7 @@ static int ceph_lock_message(u8 lock_type, u16 operation, struct inode *inode,
req->r_wait_for_completion = ceph_lock_wait_for_completion;
err = ceph_mdsc_do_request(mdsc, inode, req);
-
- if (operation == CEPH_MDS_OP_GETFILELOCK) {
+ if (!err && operation == CEPH_MDS_OP_GETFILELOCK) {
fl->fl_pid = -le64_to_cpu(req->r_reply_info.filelock_reply->pid);
if (CEPH_LOCK_SHARED == req->r_reply_info.filelock_reply->type)
fl->fl_type = F_RDLCK;
diff --git a/fs/ceph/snap.c b/fs/ceph/snap.c
index 4c6494eb02b5..ccfcc66aaf44 100644
--- a/fs/ceph/snap.c
+++ b/fs/ceph/snap.c
@@ -465,6 +465,7 @@ void ceph_queue_cap_snap(struct ceph_inode_info *ci)
struct inode *inode = &ci->vfs_inode;
struct ceph_cap_snap *capsnap;
struct ceph_snap_context *old_snapc, *new_snapc;
+ struct ceph_buffer *old_blob = NULL;
int used, dirty;
capsnap = kzalloc(sizeof(*capsnap), GFP_NOFS);
@@ -541,7 +542,7 @@ void ceph_queue_cap_snap(struct ceph_inode_info *ci)
capsnap->gid = inode->i_gid;
if (dirty & CEPH_CAP_XATTR_EXCL) {
- __ceph_build_xattrs_blob(ci);
+ old_blob = __ceph_build_xattrs_blob(ci);
capsnap->xattr_blob =
ceph_buffer_get(ci->i_xattrs.blob);
capsnap->xattr_version = ci->i_xattrs.version;
@@ -584,6 +585,7 @@ update_snapc:
}
spin_unlock(&ci->i_ceph_lock);
+ ceph_buffer_put(old_blob);
kfree(capsnap);
ceph_put_snap_context(old_snapc);
}
diff --git a/fs/ceph/super.h b/fs/ceph/super.h
index d2352fd95dbc..6b9f1ee7de85 100644
--- a/fs/ceph/super.h
+++ b/fs/ceph/super.h
@@ -926,7 +926,7 @@ extern int ceph_getattr(const struct path *path, struct kstat *stat,
int __ceph_setxattr(struct inode *, const char *, const void *, size_t, int);
ssize_t __ceph_getxattr(struct inode *, const char *, void *, size_t);
extern ssize_t ceph_listxattr(struct dentry *, char *, size_t);
-extern void __ceph_build_xattrs_blob(struct ceph_inode_info *ci);
+extern struct ceph_buffer *__ceph_build_xattrs_blob(struct ceph_inode_info *ci);
extern void __ceph_destroy_xattrs(struct ceph_inode_info *ci);
extern const struct xattr_handler *ceph_xattr_handlers[];
diff --git a/fs/ceph/xattr.c b/fs/ceph/xattr.c
index 37b458a9af3a..939eab7aa219 100644
--- a/fs/ceph/xattr.c
+++ b/fs/ceph/xattr.c
@@ -754,12 +754,15 @@ static int __get_required_blob_size(struct ceph_inode_info *ci, int name_size,
/*
* If there are dirty xattrs, reencode xattrs into the prealloc_blob
- * and swap into place.
+ * and swap into place. It returns the old i_xattrs.blob (or NULL) so
+ * that it can be freed by the caller as the i_ceph_lock is likely to be
+ * held.
*/
-void __ceph_build_xattrs_blob(struct ceph_inode_info *ci)
+struct ceph_buffer *__ceph_build_xattrs_blob(struct ceph_inode_info *ci)
{
struct rb_node *p;
struct ceph_inode_xattr *xattr = NULL;
+ struct ceph_buffer *old_blob = NULL;
void *dest;
dout("__build_xattrs_blob %p\n", &ci->vfs_inode);
@@ -790,12 +793,14 @@ void __ceph_build_xattrs_blob(struct ceph_inode_info *ci)
dest - ci->i_xattrs.prealloc_blob->vec.iov_base;
if (ci->i_xattrs.blob)
- ceph_buffer_put(ci->i_xattrs.blob);
+ old_blob = ci->i_xattrs.blob;
ci->i_xattrs.blob = ci->i_xattrs.prealloc_blob;
ci->i_xattrs.prealloc_blob = NULL;
ci->i_xattrs.dirty = false;
ci->i_xattrs.version++;
}
+
+ return old_blob;
}
static inline int __get_request_mask(struct inode *in) {
@@ -1036,6 +1041,7 @@ int __ceph_setxattr(struct inode *inode, const char *name,
struct ceph_inode_info *ci = ceph_inode(inode);
struct ceph_mds_client *mdsc = ceph_sb_to_client(inode->i_sb)->mdsc;
struct ceph_cap_flush *prealloc_cf = NULL;
+ struct ceph_buffer *old_blob = NULL;
int issued;
int err;
int dirty = 0;
@@ -1109,13 +1115,15 @@ retry:
struct ceph_buffer *blob;
spin_unlock(&ci->i_ceph_lock);
- dout(" preaallocating new blob size=%d\n", required_blob_size);
+ ceph_buffer_put(old_blob); /* Shouldn't be required */
+ dout(" pre-allocating new blob size=%d\n", required_blob_size);
blob = ceph_buffer_new(required_blob_size, GFP_NOFS);
if (!blob)
goto do_sync_unlocked;
spin_lock(&ci->i_ceph_lock);
+ /* prealloc_blob can't be released while holding i_ceph_lock */
if (ci->i_xattrs.prealloc_blob)
- ceph_buffer_put(ci->i_xattrs.prealloc_blob);
+ old_blob = ci->i_xattrs.prealloc_blob;
ci->i_xattrs.prealloc_blob = blob;
goto retry;
}
@@ -1131,6 +1139,7 @@ retry:
}
spin_unlock(&ci->i_ceph_lock);
+ ceph_buffer_put(old_blob);
if (lock_snap_rwsem)
up_read(&mdsc->snap_rwsem);
if (dirty)
diff --git a/fs/cifs/Kconfig b/fs/cifs/Kconfig
index b16219e5dac9..350bc3061656 100644
--- a/fs/cifs/Kconfig
+++ b/fs/cifs/Kconfig
@@ -16,7 +16,7 @@ config CIFS
select CRYPTO_GCM
select CRYPTO_ECB
select CRYPTO_AES
- select CRYPTO_DES
+ select CRYPTO_LIB_DES
select KEYS
help
This is the client VFS module for the SMB3 family of NAS protocols,
diff --git a/fs/cifs/cifsfs.c b/fs/cifs/cifsfs.c
index 3289b566463f..4e2f74894e9b 100644
--- a/fs/cifs/cifsfs.c
+++ b/fs/cifs/cifsfs.c
@@ -1601,7 +1601,6 @@ MODULE_DESCRIPTION
("VFS to access SMB3 servers e.g. Samba, Macs, Azure and Windows (and "
"also older servers complying with the SNIA CIFS Specification)");
MODULE_VERSION(CIFS_VERSION);
-MODULE_SOFTDEP("pre: des");
MODULE_SOFTDEP("pre: ecb");
MODULE_SOFTDEP("pre: hmac");
MODULE_SOFTDEP("pre: md4");
diff --git a/fs/cifs/cifsfs.h b/fs/cifs/cifsfs.h
index 4b21a90015a9..99caf77df4a2 100644
--- a/fs/cifs/cifsfs.h
+++ b/fs/cifs/cifsfs.h
@@ -152,5 +152,5 @@ extern long cifs_ioctl(struct file *filep, unsigned int cmd, unsigned long arg);
extern const struct export_operations cifs_export_ops;
#endif /* CONFIG_CIFS_NFSD_EXPORT */
-#define CIFS_VERSION "2.21"
+#define CIFS_VERSION "2.22"
#endif /* _CIFSFS_H */
diff --git a/fs/cifs/cifsproto.h b/fs/cifs/cifsproto.h
index e23234207fc2..592a6cea2b79 100644
--- a/fs/cifs/cifsproto.h
+++ b/fs/cifs/cifsproto.h
@@ -579,6 +579,7 @@ extern void rqst_page_get_length(struct smb_rqst *rqst, unsigned int page,
unsigned int *len, unsigned int *offset);
void extract_unc_hostname(const char *unc, const char **h, size_t *len);
+int copy_path_name(char *dst, const char *src);
#ifdef CONFIG_CIFS_DFS_UPCALL
static inline int get_dfs_path(const unsigned int xid, struct cifs_ses *ses,
diff --git a/fs/cifs/cifssmb.c b/fs/cifs/cifssmb.c
index e2f95965065d..3907653e63c7 100644
--- a/fs/cifs/cifssmb.c
+++ b/fs/cifs/cifssmb.c
@@ -942,10 +942,8 @@ PsxDelete:
PATH_MAX, nls_codepage, remap);
name_len++; /* trailing null */
name_len *= 2;
- } else { /* BB add path length overrun check */
- name_len = strnlen(fileName, PATH_MAX);
- name_len++; /* trailing null */
- strncpy(pSMB->FileName, fileName, name_len);
+ } else {
+ name_len = copy_path_name(pSMB->FileName, fileName);
}
params = 6 + name_len;
@@ -1015,10 +1013,8 @@ DelFileRetry:
remap);
name_len++; /* trailing null */
name_len *= 2;
- } else { /* BB improve check for buffer overruns BB */
- name_len = strnlen(name, PATH_MAX);
- name_len++; /* trailing null */
- strncpy(pSMB->fileName, name, name_len);
+ } else {
+ name_len = copy_path_name(pSMB->fileName, name);
}
pSMB->SearchAttributes =
cpu_to_le16(ATTR_READONLY | ATTR_HIDDEN | ATTR_SYSTEM);
@@ -1062,10 +1058,8 @@ RmDirRetry:
remap);
name_len++; /* trailing null */
name_len *= 2;
- } else { /* BB improve check for buffer overruns BB */
- name_len = strnlen(name, PATH_MAX);
- name_len++; /* trailing null */
- strncpy(pSMB->DirName, name, name_len);
+ } else {
+ name_len = copy_path_name(pSMB->DirName, name);
}
pSMB->BufferFormat = 0x04;
@@ -1107,10 +1101,8 @@ MkDirRetry:
remap);
name_len++; /* trailing null */
name_len *= 2;
- } else { /* BB improve check for buffer overruns BB */
- name_len = strnlen(name, PATH_MAX);
- name_len++; /* trailing null */
- strncpy(pSMB->DirName, name, name_len);
+ } else {
+ name_len = copy_path_name(pSMB->DirName, name);
}
pSMB->BufferFormat = 0x04;
@@ -1157,10 +1149,8 @@ PsxCreat:
PATH_MAX, nls_codepage, remap);
name_len++; /* trailing null */
name_len *= 2;
- } else { /* BB improve the check for buffer overruns BB */
- name_len = strnlen(name, PATH_MAX);
- name_len++; /* trailing null */
- strncpy(pSMB->FileName, name, name_len);
+ } else {
+ name_len = copy_path_name(pSMB->FileName, name);
}
params = 6 + name_len;
@@ -1324,11 +1314,9 @@ OldOpenRetry:
fileName, PATH_MAX, nls_codepage, remap);
name_len++; /* trailing null */
name_len *= 2;
- } else { /* BB improve check for buffer overruns BB */
+ } else {
count = 0; /* no pad */
- name_len = strnlen(fileName, PATH_MAX);
- name_len++; /* trailing null */
- strncpy(pSMB->fileName, fileName, name_len);
+ name_len = copy_path_name(pSMB->fileName, fileName);
}
if (*pOplock & REQ_OPLOCK)
pSMB->OpenFlags = cpu_to_le16(REQ_OPLOCK);
@@ -1442,11 +1430,8 @@ openRetry:
/* BB improve check for buffer overruns BB */
/* no pad */
count = 0;
- name_len = strnlen(path, PATH_MAX);
- /* trailing null */
- name_len++;
+ name_len = copy_path_name(req->fileName, path);
req->NameLength = cpu_to_le16(name_len);
- strncpy(req->fileName, path, name_len);
}
if (*oplock & REQ_OPLOCK)
@@ -2812,15 +2797,10 @@ renameRetry:
remap);
name_len2 += 1 /* trailing null */ + 1 /* Signature word */ ;
name_len2 *= 2; /* convert to bytes */
- } else { /* BB improve the check for buffer overruns BB */
- name_len = strnlen(from_name, PATH_MAX);
- name_len++; /* trailing null */
- strncpy(pSMB->OldFileName, from_name, name_len);
- name_len2 = strnlen(to_name, PATH_MAX);
- name_len2++; /* trailing null */
+ } else {
+ name_len = copy_path_name(pSMB->OldFileName, from_name);
+ name_len2 = copy_path_name(pSMB->OldFileName+name_len+1, to_name);
pSMB->OldFileName[name_len] = 0x04; /* 2nd buffer format */
- strncpy(&pSMB->OldFileName[name_len + 1], to_name, name_len2);
- name_len2++; /* trailing null */
name_len2++; /* signature byte */
}
@@ -2962,15 +2942,10 @@ copyRetry:
toName, PATH_MAX, nls_codepage, remap);
name_len2 += 1 /* trailing null */ + 1 /* Signature word */ ;
name_len2 *= 2; /* convert to bytes */
- } else { /* BB improve the check for buffer overruns BB */
- name_len = strnlen(fromName, PATH_MAX);
- name_len++; /* trailing null */
- strncpy(pSMB->OldFileName, fromName, name_len);
- name_len2 = strnlen(toName, PATH_MAX);
- name_len2++; /* trailing null */
+ } else {
+ name_len = copy_path_name(pSMB->OldFileName, fromName);
pSMB->OldFileName[name_len] = 0x04; /* 2nd buffer format */
- strncpy(&pSMB->OldFileName[name_len + 1], toName, name_len2);
- name_len2++; /* trailing null */
+ name_len2 = copy_path_name(pSMB->OldFileName+name_len+1, toName);
name_len2++; /* signature byte */
}
@@ -3021,10 +2996,8 @@ createSymLinkRetry:
name_len++; /* trailing null */
name_len *= 2;
- } else { /* BB improve the check for buffer overruns BB */
- name_len = strnlen(fromName, PATH_MAX);
- name_len++; /* trailing null */
- strncpy(pSMB->FileName, fromName, name_len);
+ } else {
+ name_len = copy_path_name(pSMB->FileName, fromName);
}
params = 6 + name_len;
pSMB->MaxSetupCount = 0;
@@ -3044,10 +3017,8 @@ createSymLinkRetry:
PATH_MAX, nls_codepage, remap);
name_len_target++; /* trailing null */
name_len_target *= 2;
- } else { /* BB improve the check for buffer overruns BB */
- name_len_target = strnlen(toName, PATH_MAX);
- name_len_target++; /* trailing null */
- strncpy(data_offset, toName, name_len_target);
+ } else {
+ name_len_target = copy_path_name(data_offset, toName);
}
pSMB->MaxParameterCount = cpu_to_le16(2);
@@ -3109,10 +3080,8 @@ createHardLinkRetry:
name_len++; /* trailing null */
name_len *= 2;
- } else { /* BB improve the check for buffer overruns BB */
- name_len = strnlen(toName, PATH_MAX);
- name_len++; /* trailing null */
- strncpy(pSMB->FileName, toName, name_len);
+ } else {
+ name_len = copy_path_name(pSMB->FileName, toName);
}
params = 6 + name_len;
pSMB->MaxSetupCount = 0;
@@ -3131,10 +3100,8 @@ createHardLinkRetry:
PATH_MAX, nls_codepage, remap);
name_len_target++; /* trailing null */
name_len_target *= 2;
- } else { /* BB improve the check for buffer overruns BB */
- name_len_target = strnlen(fromName, PATH_MAX);
- name_len_target++; /* trailing null */
- strncpy(data_offset, fromName, name_len_target);
+ } else {
+ name_len_target = copy_path_name(data_offset, fromName);
}
pSMB->MaxParameterCount = cpu_to_le16(2);
@@ -3213,15 +3180,10 @@ winCreateHardLinkRetry:
remap);
name_len2 += 1 /* trailing null */ + 1 /* Signature word */ ;
name_len2 *= 2; /* convert to bytes */
- } else { /* BB improve the check for buffer overruns BB */
- name_len = strnlen(from_name, PATH_MAX);
- name_len++; /* trailing null */
- strncpy(pSMB->OldFileName, from_name, name_len);
- name_len2 = strnlen(to_name, PATH_MAX);
- name_len2++; /* trailing null */
+ } else {
+ name_len = copy_path_name(pSMB->OldFileName, from_name);
pSMB->OldFileName[name_len] = 0x04; /* 2nd buffer format */
- strncpy(&pSMB->OldFileName[name_len + 1], to_name, name_len2);
- name_len2++; /* trailing null */
+ name_len2 = copy_path_name(pSMB->OldFileName+name_len+1, to_name);
name_len2++; /* signature byte */
}
@@ -3271,10 +3233,8 @@ querySymLinkRetry:
remap);
name_len++; /* trailing null */
name_len *= 2;
- } else { /* BB improve the check for buffer overruns BB */
- name_len = strnlen(searchName, PATH_MAX);
- name_len++; /* trailing null */
- strncpy(pSMB->FileName, searchName, name_len);
+ } else {
+ name_len = copy_path_name(pSMB->FileName, searchName);
}
params = 2 /* level */ + 4 /* rsrvd */ + name_len /* incl null */ ;
@@ -3691,10 +3651,8 @@ queryAclRetry:
name_len *= 2;
pSMB->FileName[name_len] = 0;
pSMB->FileName[name_len+1] = 0;
- } else { /* BB improve the check for buffer overruns BB */
- name_len = strnlen(searchName, PATH_MAX);
- name_len++; /* trailing null */
- strncpy(pSMB->FileName, searchName, name_len);
+ } else {
+ name_len = copy_path_name(pSMB->FileName, searchName);
}
params = 2 /* level */ + 4 /* rsrvd */ + name_len /* incl null */ ;
@@ -3776,10 +3734,8 @@ setAclRetry:
PATH_MAX, nls_codepage, remap);
name_len++; /* trailing null */
name_len *= 2;
- } else { /* BB improve the check for buffer overruns BB */
- name_len = strnlen(fileName, PATH_MAX);
- name_len++; /* trailing null */
- strncpy(pSMB->FileName, fileName, name_len);
+ } else {
+ name_len = copy_path_name(pSMB->FileName, fileName);
}
params = 6 + name_len;
pSMB->MaxParameterCount = cpu_to_le16(2);
@@ -4184,9 +4140,7 @@ QInfRetry:
name_len++; /* trailing null */
name_len *= 2;
} else {
- name_len = strnlen(search_name, PATH_MAX);
- name_len++; /* trailing null */
- strncpy(pSMB->FileName, search_name, name_len);
+ name_len = copy_path_name(pSMB->FileName, search_name);
}
pSMB->BufferFormat = 0x04;
name_len++; /* account for buffer type byte */
@@ -4321,10 +4275,8 @@ QPathInfoRetry:
PATH_MAX, nls_codepage, remap);
name_len++; /* trailing null */
name_len *= 2;
- } else { /* BB improve the check for buffer overruns BB */
- name_len = strnlen(search_name, PATH_MAX);
- name_len++; /* trailing null */
- strncpy(pSMB->FileName, search_name, name_len);
+ } else {
+ name_len = copy_path_name(pSMB->FileName, search_name);
}
params = 2 /* level */ + 4 /* reserved */ + name_len /* includes NUL */;
@@ -4490,10 +4442,8 @@ UnixQPathInfoRetry:
PATH_MAX, nls_codepage, remap);
name_len++; /* trailing null */
name_len *= 2;
- } else { /* BB improve the check for buffer overruns BB */
- name_len = strnlen(searchName, PATH_MAX);
- name_len++; /* trailing null */
- strncpy(pSMB->FileName, searchName, name_len);
+ } else {
+ name_len = copy_path_name(pSMB->FileName, searchName);
}
params = 2 /* level */ + 4 /* reserved */ + name_len /* includes NUL */;
@@ -4593,17 +4543,16 @@ findFirstRetry:
pSMB->FileName[name_len+1] = 0;
name_len += 2;
}
- } else { /* BB add check for overrun of SMB buf BB */
- name_len = strnlen(searchName, PATH_MAX);
-/* BB fix here and in unicode clause above ie
- if (name_len > buffersize-header)
- free buffer exit; BB */
- strncpy(pSMB->FileName, searchName, name_len);
+ } else {
+ name_len = copy_path_name(pSMB->FileName, searchName);
if (msearch) {
- pSMB->FileName[name_len] = CIFS_DIR_SEP(cifs_sb);
- pSMB->FileName[name_len+1] = '*';
- pSMB->FileName[name_len+2] = 0;
- name_len += 3;
+ if (WARN_ON_ONCE(name_len > PATH_MAX-2))
+ name_len = PATH_MAX-2;
+ /* overwrite nul byte */
+ pSMB->FileName[name_len-1] = CIFS_DIR_SEP(cifs_sb);
+ pSMB->FileName[name_len] = '*';
+ pSMB->FileName[name_len+1] = 0;
+ name_len += 2;
}
}
@@ -4898,10 +4847,8 @@ GetInodeNumberRetry:
remap);
name_len++; /* trailing null */
name_len *= 2;
- } else { /* BB improve the check for buffer overruns BB */
- name_len = strnlen(search_name, PATH_MAX);
- name_len++; /* trailing null */
- strncpy(pSMB->FileName, search_name, name_len);
+ } else {
+ name_len = copy_path_name(pSMB->FileName, search_name);
}
params = 2 /* level */ + 4 /* rsrvd */ + name_len /* incl null */ ;
@@ -5008,9 +4955,7 @@ getDFSRetry:
name_len++; /* trailing null */
name_len *= 2;
} else { /* BB improve the check for buffer overruns BB */
- name_len = strnlen(search_name, PATH_MAX);
- name_len++; /* trailing null */
- strncpy(pSMB->RequestFileName, search_name, name_len);
+ name_len = copy_path_name(pSMB->RequestFileName, search_name);
}
if (ses->server->sign)
@@ -5663,10 +5608,8 @@ SetEOFRetry:
PATH_MAX, cifs_sb->local_nls, remap);
name_len++; /* trailing null */
name_len *= 2;
- } else { /* BB improve the check for buffer overruns BB */
- name_len = strnlen(file_name, PATH_MAX);
- name_len++; /* trailing null */
- strncpy(pSMB->FileName, file_name, name_len);
+ } else {
+ name_len = copy_path_name(pSMB->FileName, file_name);
}
params = 6 + name_len;
data_count = sizeof(struct file_end_of_file_info);
@@ -5959,10 +5902,8 @@ SetTimesRetry:
PATH_MAX, nls_codepage, remap);
name_len++; /* trailing null */
name_len *= 2;
- } else { /* BB improve the check for buffer overruns BB */
- name_len = strnlen(fileName, PATH_MAX);
- name_len++; /* trailing null */
- strncpy(pSMB->FileName, fileName, name_len);
+ } else {
+ name_len = copy_path_name(pSMB->FileName, fileName);
}
params = 6 + name_len;
@@ -6040,10 +5981,8 @@ SetAttrLgcyRetry:
PATH_MAX, nls_codepage);
name_len++; /* trailing null */
name_len *= 2;
- } else { /* BB improve the check for buffer overruns BB */
- name_len = strnlen(fileName, PATH_MAX);
- name_len++; /* trailing null */
- strncpy(pSMB->fileName, fileName, name_len);
+ } else {
+ name_len = copy_path_name(pSMB->fileName, fileName);
}
pSMB->attr = cpu_to_le16(dos_attrs);
pSMB->BufferFormat = 0x04;
@@ -6203,10 +6142,8 @@ setPermsRetry:
PATH_MAX, nls_codepage, remap);
name_len++; /* trailing null */
name_len *= 2;
- } else { /* BB improve the check for buffer overruns BB */
- name_len = strnlen(file_name, PATH_MAX);
- name_len++; /* trailing null */
- strncpy(pSMB->FileName, file_name, name_len);
+ } else {
+ name_len = copy_path_name(pSMB->FileName, file_name);
}
params = 6 + name_len;
@@ -6298,10 +6235,8 @@ QAllEAsRetry:
PATH_MAX, nls_codepage, remap);
list_len++; /* trailing null */
list_len *= 2;
- } else { /* BB improve the check for buffer overruns BB */
- list_len = strnlen(searchName, PATH_MAX);
- list_len++; /* trailing null */
- strncpy(pSMB->FileName, searchName, list_len);
+ } else {
+ list_len = copy_path_name(pSMB->FileName, searchName);
}
params = 2 /* level */ + 4 /* reserved */ + list_len /* includes NUL */;
@@ -6480,10 +6415,8 @@ SetEARetry:
PATH_MAX, nls_codepage, remap);
name_len++; /* trailing null */
name_len *= 2;
- } else { /* BB improve the check for buffer overruns BB */
- name_len = strnlen(fileName, PATH_MAX);
- name_len++; /* trailing null */
- strncpy(pSMB->FileName, fileName, name_len);
+ } else {
+ name_len = copy_path_name(pSMB->FileName, fileName);
}
params = 6 + name_len;
diff --git a/fs/cifs/connect.c b/fs/cifs/connect.c
index a4830ced0f98..5299effa6f7d 100644
--- a/fs/cifs/connect.c
+++ b/fs/cifs/connect.c
@@ -1113,6 +1113,7 @@ cifs_demultiplex_thread(void *p)
mempool_resize(cifs_req_poolp, length + cifs_min_rcv);
set_freezable();
+ allow_kernel_signal(SIGKILL);
while (server->tcpStatus != CifsExiting) {
if (try_to_freeze())
continue;
@@ -2980,6 +2981,7 @@ static int
cifs_set_cifscreds(struct smb_vol *vol, struct cifs_ses *ses)
{
int rc = 0;
+ int is_domain = 0;
const char *delim, *payload;
char *desc;
ssize_t len;
@@ -3027,6 +3029,7 @@ cifs_set_cifscreds(struct smb_vol *vol, struct cifs_ses *ses)
rc = PTR_ERR(key);
goto out_err;
}
+ is_domain = 1;
}
down_read(&key->sem);
@@ -3084,6 +3087,26 @@ cifs_set_cifscreds(struct smb_vol *vol, struct cifs_ses *ses)
goto out_key_put;
}
+ /*
+ * If we have a domain key then we must set the domainName in the
+ * for the request.
+ */
+ if (is_domain && ses->domainName) {
+ vol->domainname = kstrndup(ses->domainName,
+ strlen(ses->domainName),
+ GFP_KERNEL);
+ if (!vol->domainname) {
+ cifs_dbg(FYI, "Unable to allocate %zd bytes for "
+ "domain\n", len);
+ rc = -ENOMEM;
+ kfree(vol->username);
+ vol->username = NULL;
+ kzfree(vol->password);
+ vol->password = NULL;
+ goto out_key_put;
+ }
+ }
+
out_key_put:
up_read(&key->sem);
key_put(key);
@@ -4208,16 +4231,19 @@ build_unc_path_to_root(const struct smb_vol *vol,
strlen(vol->prepath) + 1 : 0;
unsigned int unc_len = strnlen(vol->UNC, MAX_TREE_SIZE + 1);
+ if (unc_len > MAX_TREE_SIZE)
+ return ERR_PTR(-EINVAL);
+
full_path = kmalloc(unc_len + pplen + 1, GFP_KERNEL);
if (full_path == NULL)
return ERR_PTR(-ENOMEM);
- strncpy(full_path, vol->UNC, unc_len);
+ memcpy(full_path, vol->UNC, unc_len);
pos = full_path + unc_len;
if (pplen) {
*pos = CIFS_DIR_SEP(cifs_sb);
- strncpy(pos + 1, vol->prepath, pplen);
+ memcpy(pos + 1, vol->prepath, pplen);
pos += pplen;
}
diff --git a/fs/cifs/dir.c b/fs/cifs/dir.c
index f26a48dd2e39..be424e81e3ad 100644
--- a/fs/cifs/dir.c
+++ b/fs/cifs/dir.c
@@ -69,11 +69,10 @@ cifs_build_path_to_root(struct smb_vol *vol, struct cifs_sb_info *cifs_sb,
return full_path;
if (dfsplen)
- strncpy(full_path, tcon->treeName, dfsplen);
+ memcpy(full_path, tcon->treeName, dfsplen);
full_path[dfsplen] = CIFS_DIR_SEP(cifs_sb);
- strncpy(full_path + dfsplen + 1, vol->prepath, pplen);
+ memcpy(full_path + dfsplen + 1, vol->prepath, pplen);
convert_delimiter(full_path, CIFS_DIR_SEP(cifs_sb));
- full_path[dfsplen + pplen] = 0; /* add trailing null */
return full_path;
}
diff --git a/fs/cifs/export.c b/fs/cifs/export.c
index ce8b7f677c58..eb0bb8ca8e63 100644
--- a/fs/cifs/export.c
+++ b/fs/cifs/export.c
@@ -24,7 +24,7 @@
*/
/*
- * See Documentation/filesystems/nfs/Exporting
+ * See Documentation/filesystems/nfs/exporting.rst
* and examples in fs/exportfs
*
* Since cifs is a network file system, an "fsid" must be included for
diff --git a/fs/cifs/misc.c b/fs/cifs/misc.c
index f383877a6511..5ad83bdb9bea 100644
--- a/fs/cifs/misc.c
+++ b/fs/cifs/misc.c
@@ -1011,3 +1011,25 @@ void extract_unc_hostname(const char *unc, const char **h, size_t *len)
*h = unc;
*len = end - unc;
}
+
+/**
+ * copy_path_name - copy src path to dst, possibly truncating
+ *
+ * returns number of bytes written (including trailing nul)
+ */
+int copy_path_name(char *dst, const char *src)
+{
+ int name_len;
+
+ /*
+ * PATH_MAX includes nul, so if strlen(src) >= PATH_MAX it
+ * will truncate and strlen(dst) will be PATH_MAX-1
+ */
+ name_len = strscpy(dst, src, PATH_MAX);
+ if (WARN_ON_ONCE(name_len < 0))
+ name_len = PATH_MAX-1;
+
+ /* we count the trailing nul */
+ name_len++;
+ return name_len;
+}
diff --git a/fs/cifs/sess.c b/fs/cifs/sess.c
index dcd49ad60c83..4c764ff7edd2 100644
--- a/fs/cifs/sess.c
+++ b/fs/cifs/sess.c
@@ -159,13 +159,16 @@ static void ascii_ssetup_strings(char **pbcc_area, struct cifs_ses *ses,
const struct nls_table *nls_cp)
{
char *bcc_ptr = *pbcc_area;
+ int len;
/* copy user */
/* BB what about null user mounts - check that we do this BB */
/* copy user */
if (ses->user_name != NULL) {
- strncpy(bcc_ptr, ses->user_name, CIFS_MAX_USERNAME_LEN);
- bcc_ptr += strnlen(ses->user_name, CIFS_MAX_USERNAME_LEN);
+ len = strscpy(bcc_ptr, ses->user_name, CIFS_MAX_USERNAME_LEN);
+ if (WARN_ON_ONCE(len < 0))
+ len = CIFS_MAX_USERNAME_LEN - 1;
+ bcc_ptr += len;
}
/* else null user mount */
*bcc_ptr = 0;
@@ -173,8 +176,10 @@ static void ascii_ssetup_strings(char **pbcc_area, struct cifs_ses *ses,
/* copy domain */
if (ses->domainName != NULL) {
- strncpy(bcc_ptr, ses->domainName, CIFS_MAX_DOMAINNAME_LEN);
- bcc_ptr += strnlen(ses->domainName, CIFS_MAX_DOMAINNAME_LEN);
+ len = strscpy(bcc_ptr, ses->domainName, CIFS_MAX_DOMAINNAME_LEN);
+ if (WARN_ON_ONCE(len < 0))
+ len = CIFS_MAX_DOMAINNAME_LEN - 1;
+ bcc_ptr += len;
} /* else we will send a null domain name
so the server will default to its own domain */
*bcc_ptr = 0;
@@ -242,9 +247,10 @@ static void decode_ascii_ssetup(char **pbcc_area, __u16 bleft,
kfree(ses->serverOS);
- ses->serverOS = kzalloc(len + 1, GFP_KERNEL);
+ ses->serverOS = kmalloc(len + 1, GFP_KERNEL);
if (ses->serverOS) {
- strncpy(ses->serverOS, bcc_ptr, len);
+ memcpy(ses->serverOS, bcc_ptr, len);
+ ses->serverOS[len] = 0;
if (strncmp(ses->serverOS, "OS/2", 4) == 0)
cifs_dbg(FYI, "OS/2 server\n");
}
@@ -258,9 +264,11 @@ static void decode_ascii_ssetup(char **pbcc_area, __u16 bleft,
kfree(ses->serverNOS);
- ses->serverNOS = kzalloc(len + 1, GFP_KERNEL);
- if (ses->serverNOS)
- strncpy(ses->serverNOS, bcc_ptr, len);
+ ses->serverNOS = kmalloc(len + 1, GFP_KERNEL);
+ if (ses->serverNOS) {
+ memcpy(ses->serverNOS, bcc_ptr, len);
+ ses->serverNOS[len] = 0;
+ }
bcc_ptr += len + 1;
bleft -= len + 1;
diff --git a/fs/cifs/smb2ops.c b/fs/cifs/smb2ops.c
index a5bc1b671c12..64a5864127be 100644
--- a/fs/cifs/smb2ops.c
+++ b/fs/cifs/smb2ops.c
@@ -3489,7 +3489,15 @@ fill_transform_hdr(struct smb2_transform_hdr *tr_hdr, unsigned int orig_len,
static inline void smb2_sg_set_buf(struct scatterlist *sg, const void *buf,
unsigned int buflen)
{
- sg_set_page(sg, virt_to_page(buf), buflen, offset_in_page(buf));
+ void *addr;
+ /*
+ * VMAP_STACK (at least) puts stack into the vmalloc address space
+ */
+ if (is_vmalloc_addr(buf))
+ addr = vmalloc_to_page(buf);
+ else
+ addr = virt_to_page(buf);
+ sg_set_page(sg, addr, buflen, offset_in_page(buf));
}
/* Assumes the first rqst has a transform header as the first iov.
@@ -4070,7 +4078,6 @@ receive_encrypted_standard(struct TCP_Server_Info *server,
{
int ret, length;
char *buf = server->smallbuf;
- char *tmpbuf;
struct smb2_sync_hdr *shdr;
unsigned int pdu_length = server->pdu_size;
unsigned int buf_size;
@@ -4100,18 +4107,15 @@ receive_encrypted_standard(struct TCP_Server_Info *server,
return length;
next_is_large = server->large_buf;
- one_more:
+one_more:
shdr = (struct smb2_sync_hdr *)buf;
if (shdr->NextCommand) {
- if (next_is_large) {
- tmpbuf = server->bigbuf;
+ if (next_is_large)
next_buffer = (char *)cifs_buf_get();
- } else {
- tmpbuf = server->smallbuf;
+ else
next_buffer = (char *)cifs_small_buf_get();
- }
memcpy(next_buffer,
- tmpbuf + le32_to_cpu(shdr->NextCommand),
+ buf + le32_to_cpu(shdr->NextCommand),
pdu_length - le32_to_cpu(shdr->NextCommand));
}
@@ -4140,12 +4144,21 @@ receive_encrypted_standard(struct TCP_Server_Info *server,
pdu_length -= le32_to_cpu(shdr->NextCommand);
server->large_buf = next_is_large;
if (next_is_large)
- server->bigbuf = next_buffer;
+ server->bigbuf = buf = next_buffer;
else
- server->smallbuf = next_buffer;
-
- buf += le32_to_cpu(shdr->NextCommand);
+ server->smallbuf = buf = next_buffer;
goto one_more;
+ } else if (ret != 0) {
+ /*
+ * ret != 0 here means that we didn't get to handle_mid() thus
+ * server->smallbuf and server->bigbuf are still valid. We need
+ * to free next_buffer because it is not going to be used
+ * anywhere.
+ */
+ if (next_is_large)
+ free_rsp_buf(CIFS_LARGE_BUFFER, next_buffer);
+ else
+ free_rsp_buf(CIFS_SMALL_BUFFER, next_buffer);
}
return ret;
diff --git a/fs/cifs/smb2pdu.c b/fs/cifs/smb2pdu.c
index c8cd7b6cdda2..31e4a1b0b170 100644
--- a/fs/cifs/smb2pdu.c
+++ b/fs/cifs/smb2pdu.c
@@ -252,7 +252,7 @@ smb2_reconnect(__le16 smb2_command, struct cifs_tcon *tcon)
if (tcon == NULL)
return 0;
- if (smb2_command == SMB2_TREE_CONNECT)
+ if (smb2_command == SMB2_TREE_CONNECT || smb2_command == SMB2_IOCTL)
return 0;
if (tcon->tidStatus == CifsExiting) {
@@ -1196,7 +1196,12 @@ SMB2_sess_alloc_buffer(struct SMB2_sess_data *sess_data)
else
req->SecurityMode = 0;
+#ifdef CONFIG_CIFS_DFS_UPCALL
+ req->Capabilities = cpu_to_le32(SMB2_GLOBAL_CAP_DFS);
+#else
req->Capabilities = 0;
+#endif /* DFS_UPCALL */
+
req->Channel = 0; /* MBZ */
sess_data->iov[0].iov_base = (char *)req;
diff --git a/fs/cifs/smbencrypt.c b/fs/cifs/smbencrypt.c
index 2b6d87bfdf8e..39a938443e3e 100644
--- a/fs/cifs/smbencrypt.c
+++ b/fs/cifs/smbencrypt.c
@@ -11,13 +11,14 @@
*/
-#include <linux/crypto.h>
#include <linux/module.h>
#include <linux/slab.h>
+#include <linux/fips.h>
#include <linux/fs.h>
#include <linux/string.h>
#include <linux/kernel.h>
#include <linux/random.h>
+#include <crypto/des.h>
#include "cifs_fs_sb.h"
#include "cifs_unicode.h"
#include "cifspdu.h"
@@ -58,19 +59,18 @@ static int
smbhash(unsigned char *out, const unsigned char *in, unsigned char *key)
{
unsigned char key2[8];
- struct crypto_cipher *tfm_des;
+ struct des_ctx ctx;
str_to_key(key, key2);
- tfm_des = crypto_alloc_cipher("des", 0, 0);
- if (IS_ERR(tfm_des)) {
- cifs_dbg(VFS, "could not allocate des crypto API\n");
- return PTR_ERR(tfm_des);
+ if (fips_enabled) {
+ cifs_dbg(VFS, "FIPS compliance enabled: DES not permitted\n");
+ return -ENOENT;
}
- crypto_cipher_setkey(tfm_des, key2, 8);
- crypto_cipher_encrypt_one(tfm_des, out, in);
- crypto_free_cipher(tfm_des);
+ des_expand_key(&ctx, key2, DES_KEY_SIZE);
+ des_encrypt(&ctx, out, in);
+ memzero_explicit(&ctx, sizeof(ctx));
return 0;
}
diff --git a/fs/coda/pioctl.c b/fs/coda/pioctl.c
index 644d48c12ce8..3aec27e5eb82 100644
--- a/fs/coda/pioctl.c
+++ b/fs/coda/pioctl.c
@@ -63,11 +63,8 @@ static long coda_pioctl(struct file *filp, unsigned int cmd,
* Look up the pathname. Note that the pathname is in
* user memory, and namei takes care of this
*/
- if (data.follow)
- error = user_path(data.path, &path);
- else
- error = user_lpath(data.path, &path);
-
+ error = user_path_at(AT_FDCWD, data.path,
+ data.follow ? LOOKUP_FOLLOW : 0, &path);
if (error)
return error;
diff --git a/fs/compat_ioctl.c b/fs/compat_ioctl.c
index 6e30949d9f77..a7ec2d3dff92 100644
--- a/fs/compat_ioctl.c
+++ b/fs/compat_ioctl.c
@@ -638,9 +638,6 @@ COMPATIBLE_IOCTL(PPPIOCDISCONN)
COMPATIBLE_IOCTL(PPPIOCATTCHAN)
COMPATIBLE_IOCTL(PPPIOCGCHAN)
COMPATIBLE_IOCTL(PPPIOCGL2TPSTATS)
-/* PPPOX */
-COMPATIBLE_IOCTL(PPPOEIOCSFWD)
-COMPATIBLE_IOCTL(PPPOEIOCDFWD)
/* Big A */
/* sparc only */
/* Big Q for sound/OSS */
diff --git a/fs/configfs/configfs_internal.h b/fs/configfs/configfs_internal.h
index f752d83a9c44..520f1813e789 100644
--- a/fs/configfs/configfs_internal.h
+++ b/fs/configfs/configfs_internal.h
@@ -20,6 +20,15 @@
#include <linux/list.h>
#include <linux/spinlock.h>
+struct configfs_fragment {
+ atomic_t frag_count;
+ struct rw_semaphore frag_sem;
+ bool frag_dead;
+};
+
+void put_fragment(struct configfs_fragment *);
+struct configfs_fragment *get_fragment(struct configfs_fragment *);
+
struct configfs_dirent {
atomic_t s_count;
int s_dependent_count;
@@ -34,6 +43,7 @@ struct configfs_dirent {
#ifdef CONFIG_LOCKDEP
int s_depth;
#endif
+ struct configfs_fragment *s_frag;
};
#define CONFIGFS_ROOT 0x0001
@@ -61,8 +71,8 @@ extern int configfs_create(struct dentry *, umode_t mode, void (*init)(struct in
extern int configfs_create_file(struct config_item *, const struct configfs_attribute *);
extern int configfs_create_bin_file(struct config_item *,
const struct configfs_bin_attribute *);
-extern int configfs_make_dirent(struct configfs_dirent *,
- struct dentry *, void *, umode_t, int);
+extern int configfs_make_dirent(struct configfs_dirent *, struct dentry *,
+ void *, umode_t, int, struct configfs_fragment *);
extern int configfs_dirent_is_ready(struct configfs_dirent *);
extern void configfs_hash_and_remove(struct dentry * dir, const char * name);
@@ -137,6 +147,7 @@ static inline void release_configfs_dirent(struct configfs_dirent * sd)
{
if (!(sd->s_type & CONFIGFS_ROOT)) {
kfree(sd->s_iattr);
+ put_fragment(sd->s_frag);
kmem_cache_free(configfs_dir_cachep, sd);
}
}
diff --git a/fs/configfs/dir.c b/fs/configfs/dir.c
index 92112915de8e..79fc25aaa8cd 100644
--- a/fs/configfs/dir.c
+++ b/fs/configfs/dir.c
@@ -151,11 +151,38 @@ configfs_adjust_dir_dirent_depth_after_populate(struct configfs_dirent *sd)
#endif /* CONFIG_LOCKDEP */
+static struct configfs_fragment *new_fragment(void)
+{
+ struct configfs_fragment *p;
+
+ p = kmalloc(sizeof(struct configfs_fragment), GFP_KERNEL);
+ if (p) {
+ atomic_set(&p->frag_count, 1);
+ init_rwsem(&p->frag_sem);
+ p->frag_dead = false;
+ }
+ return p;
+}
+
+void put_fragment(struct configfs_fragment *frag)
+{
+ if (frag && atomic_dec_and_test(&frag->frag_count))
+ kfree(frag);
+}
+
+struct configfs_fragment *get_fragment(struct configfs_fragment *frag)
+{
+ if (likely(frag))
+ atomic_inc(&frag->frag_count);
+ return frag;
+}
+
/*
* Allocates a new configfs_dirent and links it to the parent configfs_dirent
*/
static struct configfs_dirent *configfs_new_dirent(struct configfs_dirent *parent_sd,
- void *element, int type)
+ void *element, int type,
+ struct configfs_fragment *frag)
{
struct configfs_dirent * sd;
@@ -175,6 +202,7 @@ static struct configfs_dirent *configfs_new_dirent(struct configfs_dirent *paren
kmem_cache_free(configfs_dir_cachep, sd);
return ERR_PTR(-ENOENT);
}
+ sd->s_frag = get_fragment(frag);
list_add(&sd->s_sibling, &parent_sd->s_children);
spin_unlock(&configfs_dirent_lock);
@@ -209,11 +237,11 @@ static int configfs_dirent_exists(struct configfs_dirent *parent_sd,
int configfs_make_dirent(struct configfs_dirent * parent_sd,
struct dentry * dentry, void * element,
- umode_t mode, int type)
+ umode_t mode, int type, struct configfs_fragment *frag)
{
struct configfs_dirent * sd;
- sd = configfs_new_dirent(parent_sd, element, type);
+ sd = configfs_new_dirent(parent_sd, element, type, frag);
if (IS_ERR(sd))
return PTR_ERR(sd);
@@ -260,7 +288,8 @@ static void init_symlink(struct inode * inode)
* until it is validated by configfs_dir_set_ready()
*/
-static int configfs_create_dir(struct config_item *item, struct dentry *dentry)
+static int configfs_create_dir(struct config_item *item, struct dentry *dentry,
+ struct configfs_fragment *frag)
{
int error;
umode_t mode = S_IFDIR| S_IRWXU | S_IRUGO | S_IXUGO;
@@ -273,7 +302,8 @@ static int configfs_create_dir(struct config_item *item, struct dentry *dentry)
return error;
error = configfs_make_dirent(p->d_fsdata, dentry, item, mode,
- CONFIGFS_DIR | CONFIGFS_USET_CREATING);
+ CONFIGFS_DIR | CONFIGFS_USET_CREATING,
+ frag);
if (unlikely(error))
return error;
@@ -338,9 +368,10 @@ int configfs_create_link(struct configfs_symlink *sl,
{
int err = 0;
umode_t mode = S_IFLNK | S_IRWXUGO;
+ struct configfs_dirent *p = parent->d_fsdata;
- err = configfs_make_dirent(parent->d_fsdata, dentry, sl, mode,
- CONFIGFS_ITEM_LINK);
+ err = configfs_make_dirent(p, dentry, sl, mode,
+ CONFIGFS_ITEM_LINK, p->s_frag);
if (!err) {
err = configfs_create(dentry, mode, init_symlink);
if (err) {
@@ -599,7 +630,8 @@ static int populate_attrs(struct config_item *item)
static int configfs_attach_group(struct config_item *parent_item,
struct config_item *item,
- struct dentry *dentry);
+ struct dentry *dentry,
+ struct configfs_fragment *frag);
static void configfs_detach_group(struct config_item *item);
static void detach_groups(struct config_group *group)
@@ -647,7 +679,8 @@ static void detach_groups(struct config_group *group)
* try using vfs_mkdir. Just a thought.
*/
static int create_default_group(struct config_group *parent_group,
- struct config_group *group)
+ struct config_group *group,
+ struct configfs_fragment *frag)
{
int ret;
struct configfs_dirent *sd;
@@ -663,7 +696,7 @@ static int create_default_group(struct config_group *parent_group,
d_add(child, NULL);
ret = configfs_attach_group(&parent_group->cg_item,
- &group->cg_item, child);
+ &group->cg_item, child, frag);
if (!ret) {
sd = child->d_fsdata;
sd->s_type |= CONFIGFS_USET_DEFAULT;
@@ -677,13 +710,14 @@ static int create_default_group(struct config_group *parent_group,
return ret;
}
-static int populate_groups(struct config_group *group)
+static int populate_groups(struct config_group *group,
+ struct configfs_fragment *frag)
{
struct config_group *new_group;
int ret = 0;
list_for_each_entry(new_group, &group->default_groups, group_entry) {
- ret = create_default_group(group, new_group);
+ ret = create_default_group(group, new_group, frag);
if (ret) {
detach_groups(group);
break;
@@ -797,11 +831,12 @@ static void link_group(struct config_group *parent_group, struct config_group *g
*/
static int configfs_attach_item(struct config_item *parent_item,
struct config_item *item,
- struct dentry *dentry)
+ struct dentry *dentry,
+ struct configfs_fragment *frag)
{
int ret;
- ret = configfs_create_dir(item, dentry);
+ ret = configfs_create_dir(item, dentry, frag);
if (!ret) {
ret = populate_attrs(item);
if (ret) {
@@ -831,12 +866,13 @@ static void configfs_detach_item(struct config_item *item)
static int configfs_attach_group(struct config_item *parent_item,
struct config_item *item,
- struct dentry *dentry)
+ struct dentry *dentry,
+ struct configfs_fragment *frag)
{
int ret;
struct configfs_dirent *sd;
- ret = configfs_attach_item(parent_item, item, dentry);
+ ret = configfs_attach_item(parent_item, item, dentry, frag);
if (!ret) {
sd = dentry->d_fsdata;
sd->s_type |= CONFIGFS_USET_DIR;
@@ -852,7 +888,7 @@ static int configfs_attach_group(struct config_item *parent_item,
*/
inode_lock_nested(d_inode(dentry), I_MUTEX_CHILD);
configfs_adjust_dir_dirent_depth_before_populate(sd);
- ret = populate_groups(to_config_group(item));
+ ret = populate_groups(to_config_group(item), frag);
if (ret) {
configfs_detach_item(item);
d_inode(dentry)->i_flags |= S_DEAD;
@@ -1247,6 +1283,7 @@ static int configfs_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode
struct configfs_dirent *sd;
const struct config_item_type *type;
struct module *subsys_owner = NULL, *new_item_owner = NULL;
+ struct configfs_fragment *frag;
char *name;
sd = dentry->d_parent->d_fsdata;
@@ -1265,6 +1302,12 @@ static int configfs_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode
goto out;
}
+ frag = new_fragment();
+ if (!frag) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
/* Get a working ref for the duration of this function */
parent_item = configfs_get_config_item(dentry->d_parent);
type = parent_item->ci_type;
@@ -1367,9 +1410,9 @@ static int configfs_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode
spin_unlock(&configfs_dirent_lock);
if (group)
- ret = configfs_attach_group(parent_item, item, dentry);
+ ret = configfs_attach_group(parent_item, item, dentry, frag);
else
- ret = configfs_attach_item(parent_item, item, dentry);
+ ret = configfs_attach_item(parent_item, item, dentry, frag);
spin_lock(&configfs_dirent_lock);
sd->s_type &= ~CONFIGFS_USET_IN_MKDIR;
@@ -1406,6 +1449,7 @@ out_put:
* reference.
*/
config_item_put(parent_item);
+ put_fragment(frag);
out:
return ret;
@@ -1417,6 +1461,7 @@ static int configfs_rmdir(struct inode *dir, struct dentry *dentry)
struct config_item *item;
struct configfs_subsystem *subsys;
struct configfs_dirent *sd;
+ struct configfs_fragment *frag;
struct module *subsys_owner = NULL, *dead_item_owner = NULL;
int ret;
@@ -1474,6 +1519,16 @@ static int configfs_rmdir(struct inode *dir, struct dentry *dentry)
}
} while (ret == -EAGAIN);
+ frag = sd->s_frag;
+ if (down_write_killable(&frag->frag_sem)) {
+ spin_lock(&configfs_dirent_lock);
+ configfs_detach_rollback(dentry);
+ spin_unlock(&configfs_dirent_lock);
+ return -EINTR;
+ }
+ frag->frag_dead = true;
+ up_write(&frag->frag_sem);
+
/* Get a working ref for the duration of this function */
item = configfs_get_config_item(dentry);
@@ -1574,7 +1629,7 @@ static int configfs_dir_open(struct inode *inode, struct file *file)
*/
err = -ENOENT;
if (configfs_dirent_is_ready(parent_sd)) {
- file->private_data = configfs_new_dirent(parent_sd, NULL, 0);
+ file->private_data = configfs_new_dirent(parent_sd, NULL, 0, NULL);
if (IS_ERR(file->private_data))
err = PTR_ERR(file->private_data);
else
@@ -1732,8 +1787,13 @@ int configfs_register_group(struct config_group *parent_group,
{
struct configfs_subsystem *subsys = parent_group->cg_subsys;
struct dentry *parent;
+ struct configfs_fragment *frag;
int ret;
+ frag = new_fragment();
+ if (!frag)
+ return -ENOMEM;
+
mutex_lock(&subsys->su_mutex);
link_group(parent_group, group);
mutex_unlock(&subsys->su_mutex);
@@ -1741,7 +1801,7 @@ int configfs_register_group(struct config_group *parent_group,
parent = parent_group->cg_item.ci_dentry;
inode_lock_nested(d_inode(parent), I_MUTEX_PARENT);
- ret = create_default_group(parent_group, group);
+ ret = create_default_group(parent_group, group, frag);
if (ret)
goto err_out;
@@ -1749,12 +1809,14 @@ int configfs_register_group(struct config_group *parent_group,
configfs_dir_set_ready(group->cg_item.ci_dentry->d_fsdata);
spin_unlock(&configfs_dirent_lock);
inode_unlock(d_inode(parent));
+ put_fragment(frag);
return 0;
err_out:
inode_unlock(d_inode(parent));
mutex_lock(&subsys->su_mutex);
unlink_group(group);
mutex_unlock(&subsys->su_mutex);
+ put_fragment(frag);
return ret;
}
EXPORT_SYMBOL(configfs_register_group);
@@ -1770,16 +1832,12 @@ void configfs_unregister_group(struct config_group *group)
struct configfs_subsystem *subsys = group->cg_subsys;
struct dentry *dentry = group->cg_item.ci_dentry;
struct dentry *parent = group->cg_item.ci_parent->ci_dentry;
+ struct configfs_dirent *sd = dentry->d_fsdata;
+ struct configfs_fragment *frag = sd->s_frag;
- mutex_lock(&subsys->su_mutex);
- if (!group->cg_item.ci_parent->ci_group) {
- /*
- * The parent has already been unlinked and detached
- * due to a rmdir.
- */
- goto unlink_group;
- }
- mutex_unlock(&subsys->su_mutex);
+ down_write(&frag->frag_sem);
+ frag->frag_dead = true;
+ up_write(&frag->frag_sem);
inode_lock_nested(d_inode(parent), I_MUTEX_PARENT);
spin_lock(&configfs_dirent_lock);
@@ -1796,7 +1854,6 @@ void configfs_unregister_group(struct config_group *group)
dput(dentry);
mutex_lock(&subsys->su_mutex);
-unlink_group:
unlink_group(group);
mutex_unlock(&subsys->su_mutex);
}
@@ -1853,10 +1910,17 @@ int configfs_register_subsystem(struct configfs_subsystem *subsys)
struct dentry *dentry;
struct dentry *root;
struct configfs_dirent *sd;
+ struct configfs_fragment *frag;
+
+ frag = new_fragment();
+ if (!frag)
+ return -ENOMEM;
root = configfs_pin_fs();
- if (IS_ERR(root))
+ if (IS_ERR(root)) {
+ put_fragment(frag);
return PTR_ERR(root);
+ }
if (!group->cg_item.ci_name)
group->cg_item.ci_name = group->cg_item.ci_namebuf;
@@ -1872,7 +1936,7 @@ int configfs_register_subsystem(struct configfs_subsystem *subsys)
d_add(dentry, NULL);
err = configfs_attach_group(sd->s_element, &group->cg_item,
- dentry);
+ dentry, frag);
if (err) {
BUG_ON(d_inode(dentry));
d_drop(dentry);
@@ -1890,6 +1954,7 @@ int configfs_register_subsystem(struct configfs_subsystem *subsys)
unlink_group(group);
configfs_release_fs();
}
+ put_fragment(frag);
return err;
}
@@ -1899,12 +1964,18 @@ void configfs_unregister_subsystem(struct configfs_subsystem *subsys)
struct config_group *group = &subsys->su_group;
struct dentry *dentry = group->cg_item.ci_dentry;
struct dentry *root = dentry->d_sb->s_root;
+ struct configfs_dirent *sd = dentry->d_fsdata;
+ struct configfs_fragment *frag = sd->s_frag;
if (dentry->d_parent != root) {
pr_err("Tried to unregister non-subsystem!\n");
return;
}
+ down_write(&frag->frag_sem);
+ frag->frag_dead = true;
+ up_write(&frag->frag_sem);
+
inode_lock_nested(d_inode(root),
I_MUTEX_PARENT);
inode_lock_nested(d_inode(dentry), I_MUTEX_CHILD);
diff --git a/fs/configfs/file.c b/fs/configfs/file.c
index 61e4db4390a1..fb65b706cc0d 100644
--- a/fs/configfs/file.c
+++ b/fs/configfs/file.c
@@ -39,40 +39,44 @@ struct configfs_buffer {
bool write_in_progress;
char *bin_buffer;
int bin_buffer_size;
+ int cb_max_size;
+ struct config_item *item;
+ struct module *owner;
+ union {
+ struct configfs_attribute *attr;
+ struct configfs_bin_attribute *bin_attr;
+ };
};
+static inline struct configfs_fragment *to_frag(struct file *file)
+{
+ struct configfs_dirent *sd = file->f_path.dentry->d_fsdata;
-/**
- * fill_read_buffer - allocate and fill buffer from item.
- * @dentry: dentry pointer.
- * @buffer: data buffer for file.
- *
- * Allocate @buffer->page, if it hasn't been already, then call the
- * config_item's show() method to fill the buffer with this attribute's
- * data.
- * This is called only once, on the file's first read.
- */
-static int fill_read_buffer(struct dentry * dentry, struct configfs_buffer * buffer)
+ return sd->s_frag;
+}
+
+static int fill_read_buffer(struct file *file, struct configfs_buffer *buffer)
{
- struct configfs_attribute * attr = to_attr(dentry);
- struct config_item * item = to_item(dentry->d_parent);
- int ret = 0;
- ssize_t count;
+ struct configfs_fragment *frag = to_frag(file);
+ ssize_t count = -ENOENT;
if (!buffer->page)
buffer->page = (char *) get_zeroed_page(GFP_KERNEL);
if (!buffer->page)
return -ENOMEM;
- count = attr->show(item, buffer->page);
-
- BUG_ON(count > (ssize_t)SIMPLE_ATTR_SIZE);
- if (count >= 0) {
- buffer->needs_read_fill = 0;
- buffer->count = count;
- } else
- ret = count;
- return ret;
+ down_read(&frag->frag_sem);
+ if (!frag->frag_dead)
+ count = buffer->attr->show(buffer->item, buffer->page);
+ up_read(&frag->frag_sem);
+
+ if (count < 0)
+ return count;
+ if (WARN_ON_ONCE(count > (ssize_t)SIMPLE_ATTR_SIZE))
+ return -EIO;
+ buffer->needs_read_fill = 0;
+ buffer->count = count;
+ return 0;
}
/**
@@ -97,12 +101,13 @@ static int fill_read_buffer(struct dentry * dentry, struct configfs_buffer * buf
static ssize_t
configfs_read_file(struct file *file, char __user *buf, size_t count, loff_t *ppos)
{
- struct configfs_buffer * buffer = file->private_data;
+ struct configfs_buffer *buffer = file->private_data;
ssize_t retval = 0;
mutex_lock(&buffer->mutex);
if (buffer->needs_read_fill) {
- if ((retval = fill_read_buffer(file->f_path.dentry,buffer)))
+ retval = fill_read_buffer(file, buffer);
+ if (retval)
goto out;
}
pr_debug("%s: count = %zd, ppos = %lld, buf = %s\n",
@@ -138,10 +143,8 @@ static ssize_t
configfs_read_bin_file(struct file *file, char __user *buf,
size_t count, loff_t *ppos)
{
+ struct configfs_fragment *frag = to_frag(file);
struct configfs_buffer *buffer = file->private_data;
- struct dentry *dentry = file->f_path.dentry;
- struct config_item *item = to_item(dentry->d_parent);
- struct configfs_bin_attribute *bin_attr = to_bin_attr(dentry);
ssize_t retval = 0;
ssize_t len = min_t(size_t, count, PAGE_SIZE);
@@ -156,14 +159,19 @@ configfs_read_bin_file(struct file *file, char __user *buf,
if (buffer->needs_read_fill) {
/* perform first read with buf == NULL to get extent */
- len = bin_attr->read(item, NULL, 0);
+ down_read(&frag->frag_sem);
+ if (!frag->frag_dead)
+ len = buffer->bin_attr->read(buffer->item, NULL, 0);
+ else
+ len = -ENOENT;
+ up_read(&frag->frag_sem);
if (len <= 0) {
retval = len;
goto out;
}
/* do not exceed the maximum value */
- if (bin_attr->cb_max_size && len > bin_attr->cb_max_size) {
+ if (buffer->cb_max_size && len > buffer->cb_max_size) {
retval = -EFBIG;
goto out;
}
@@ -176,7 +184,13 @@ configfs_read_bin_file(struct file *file, char __user *buf,
buffer->bin_buffer_size = len;
/* perform second read to fill buffer */
- len = bin_attr->read(item, buffer->bin_buffer, len);
+ down_read(&frag->frag_sem);
+ if (!frag->frag_dead)
+ len = buffer->bin_attr->read(buffer->item,
+ buffer->bin_buffer, len);
+ else
+ len = -ENOENT;
+ up_read(&frag->frag_sem);
if (len < 0) {
retval = len;
vfree(buffer->bin_buffer);
@@ -226,25 +240,17 @@ fill_write_buffer(struct configfs_buffer * buffer, const char __user * buf, size
return error ? -EFAULT : count;
}
-
-/**
- * flush_write_buffer - push buffer to config_item.
- * @dentry: dentry to the attribute
- * @buffer: data buffer for file.
- * @count: number of bytes
- *
- * Get the correct pointers for the config_item and the attribute we're
- * dealing with, then call the store() method for the attribute,
- * passing the buffer that we acquired in fill_write_buffer().
- */
-
static int
-flush_write_buffer(struct dentry * dentry, struct configfs_buffer * buffer, size_t count)
+flush_write_buffer(struct file *file, struct configfs_buffer *buffer, size_t count)
{
- struct configfs_attribute * attr = to_attr(dentry);
- struct config_item * item = to_item(dentry->d_parent);
-
- return attr->store(item, buffer->page, count);
+ struct configfs_fragment *frag = to_frag(file);
+ int res = -ENOENT;
+
+ down_read(&frag->frag_sem);
+ if (!frag->frag_dead)
+ res = buffer->attr->store(buffer->item, buffer->page, count);
+ up_read(&frag->frag_sem);
+ return res;
}
@@ -268,13 +274,13 @@ flush_write_buffer(struct dentry * dentry, struct configfs_buffer * buffer, size
static ssize_t
configfs_write_file(struct file *file, const char __user *buf, size_t count, loff_t *ppos)
{
- struct configfs_buffer * buffer = file->private_data;
+ struct configfs_buffer *buffer = file->private_data;
ssize_t len;
mutex_lock(&buffer->mutex);
len = fill_write_buffer(buffer, buf, count);
if (len > 0)
- len = flush_write_buffer(file->f_path.dentry, buffer, len);
+ len = flush_write_buffer(file, buffer, len);
if (len > 0)
*ppos += len;
mutex_unlock(&buffer->mutex);
@@ -299,8 +305,6 @@ configfs_write_bin_file(struct file *file, const char __user *buf,
size_t count, loff_t *ppos)
{
struct configfs_buffer *buffer = file->private_data;
- struct dentry *dentry = file->f_path.dentry;
- struct configfs_bin_attribute *bin_attr = to_bin_attr(dentry);
void *tbuf = NULL;
ssize_t len;
@@ -316,8 +320,8 @@ configfs_write_bin_file(struct file *file, const char __user *buf,
/* buffer grows? */
if (*ppos + count > buffer->bin_buffer_size) {
- if (bin_attr->cb_max_size &&
- *ppos + count > bin_attr->cb_max_size) {
+ if (buffer->cb_max_size &&
+ *ppos + count > buffer->cb_max_size) {
len = -EFBIG;
goto out;
}
@@ -349,31 +353,51 @@ out:
return len;
}
-static int check_perm(struct inode * inode, struct file * file, int type)
+static int __configfs_open_file(struct inode *inode, struct file *file, int type)
{
- struct config_item *item = configfs_get_config_item(file->f_path.dentry->d_parent);
- struct configfs_attribute * attr = to_attr(file->f_path.dentry);
- struct configfs_bin_attribute *bin_attr = NULL;
- struct configfs_buffer * buffer;
- struct configfs_item_operations * ops = NULL;
- int error = 0;
+ struct dentry *dentry = file->f_path.dentry;
+ struct configfs_fragment *frag = to_frag(file);
+ struct configfs_attribute *attr;
+ struct configfs_buffer *buffer;
+ int error;
- if (!item || !attr)
- goto Einval;
+ error = -ENOMEM;
+ buffer = kzalloc(sizeof(struct configfs_buffer), GFP_KERNEL);
+ if (!buffer)
+ goto out;
- if (type & CONFIGFS_ITEM_BIN_ATTR)
- bin_attr = to_bin_attr(file->f_path.dentry);
+ error = -ENOENT;
+ down_read(&frag->frag_sem);
+ if (unlikely(frag->frag_dead))
+ goto out_free_buffer;
- /* Grab the module reference for this attribute if we have one */
- if (!try_module_get(attr->ca_owner)) {
- error = -ENODEV;
- goto Done;
+ error = -EINVAL;
+ buffer->item = to_item(dentry->d_parent);
+ if (!buffer->item)
+ goto out_free_buffer;
+
+ attr = to_attr(dentry);
+ if (!attr)
+ goto out_put_item;
+
+ if (type & CONFIGFS_ITEM_BIN_ATTR) {
+ buffer->bin_attr = to_bin_attr(dentry);
+ buffer->cb_max_size = buffer->bin_attr->cb_max_size;
+ } else {
+ buffer->attr = attr;
}
- if (item->ci_type)
- ops = item->ci_type->ct_item_ops;
- else
- goto Eaccess;
+ buffer->owner = attr->ca_owner;
+ /* Grab the module reference for this attribute if we have one */
+ error = -ENODEV;
+ if (!try_module_get(buffer->owner))
+ goto out_put_item;
+
+ error = -EACCES;
+ if (!buffer->item->ci_type)
+ goto out_put_module;
+
+ buffer->ops = buffer->item->ci_type->ct_item_ops;
/* File needs write support.
* The inode's perms must say it's ok,
@@ -381,13 +405,11 @@ static int check_perm(struct inode * inode, struct file * file, int type)
*/
if (file->f_mode & FMODE_WRITE) {
if (!(inode->i_mode & S_IWUGO))
- goto Eaccess;
-
+ goto out_put_module;
if ((type & CONFIGFS_ITEM_ATTR) && !attr->store)
- goto Eaccess;
-
- if ((type & CONFIGFS_ITEM_BIN_ATTR) && !bin_attr->write)
- goto Eaccess;
+ goto out_put_module;
+ if ((type & CONFIGFS_ITEM_BIN_ATTR) && !buffer->bin_attr->write)
+ goto out_put_module;
}
/* File needs read support.
@@ -396,92 +418,72 @@ static int check_perm(struct inode * inode, struct file * file, int type)
*/
if (file->f_mode & FMODE_READ) {
if (!(inode->i_mode & S_IRUGO))
- goto Eaccess;
-
+ goto out_put_module;
if ((type & CONFIGFS_ITEM_ATTR) && !attr->show)
- goto Eaccess;
-
- if ((type & CONFIGFS_ITEM_BIN_ATTR) && !bin_attr->read)
- goto Eaccess;
+ goto out_put_module;
+ if ((type & CONFIGFS_ITEM_BIN_ATTR) && !buffer->bin_attr->read)
+ goto out_put_module;
}
- /* No error? Great, allocate a buffer for the file, and store it
- * it in file->private_data for easy access.
- */
- buffer = kzalloc(sizeof(struct configfs_buffer),GFP_KERNEL);
- if (!buffer) {
- error = -ENOMEM;
- goto Enomem;
- }
mutex_init(&buffer->mutex);
buffer->needs_read_fill = 1;
buffer->read_in_progress = false;
buffer->write_in_progress = false;
- buffer->ops = ops;
file->private_data = buffer;
- goto Done;
+ up_read(&frag->frag_sem);
+ return 0;
- Einval:
- error = -EINVAL;
- goto Done;
- Eaccess:
- error = -EACCES;
- Enomem:
- module_put(attr->ca_owner);
- Done:
- if (error && item)
- config_item_put(item);
+out_put_module:
+ module_put(buffer->owner);
+out_put_item:
+ config_item_put(buffer->item);
+out_free_buffer:
+ up_read(&frag->frag_sem);
+ kfree(buffer);
+out:
return error;
}
static int configfs_release(struct inode *inode, struct file *filp)
{
- struct config_item * item = to_item(filp->f_path.dentry->d_parent);
- struct configfs_attribute * attr = to_attr(filp->f_path.dentry);
- struct module * owner = attr->ca_owner;
- struct configfs_buffer * buffer = filp->private_data;
-
- if (item)
- config_item_put(item);
- /* After this point, attr should not be accessed. */
- module_put(owner);
-
- if (buffer) {
- if (buffer->page)
- free_page((unsigned long)buffer->page);
- mutex_destroy(&buffer->mutex);
- kfree(buffer);
- }
+ struct configfs_buffer *buffer = filp->private_data;
+
+ module_put(buffer->owner);
+ if (buffer->page)
+ free_page((unsigned long)buffer->page);
+ mutex_destroy(&buffer->mutex);
+ kfree(buffer);
return 0;
}
static int configfs_open_file(struct inode *inode, struct file *filp)
{
- return check_perm(inode, filp, CONFIGFS_ITEM_ATTR);
+ return __configfs_open_file(inode, filp, CONFIGFS_ITEM_ATTR);
}
static int configfs_open_bin_file(struct inode *inode, struct file *filp)
{
- return check_perm(inode, filp, CONFIGFS_ITEM_BIN_ATTR);
+ return __configfs_open_file(inode, filp, CONFIGFS_ITEM_BIN_ATTR);
}
-static int configfs_release_bin_file(struct inode *inode, struct file *filp)
+static int configfs_release_bin_file(struct inode *inode, struct file *file)
{
- struct configfs_buffer *buffer = filp->private_data;
- struct dentry *dentry = filp->f_path.dentry;
- struct config_item *item = to_item(dentry->d_parent);
- struct configfs_bin_attribute *bin_attr = to_bin_attr(dentry);
- ssize_t len = 0;
- int ret;
+ struct configfs_buffer *buffer = file->private_data;
buffer->read_in_progress = false;
if (buffer->write_in_progress) {
+ struct configfs_fragment *frag = to_frag(file);
buffer->write_in_progress = false;
- len = bin_attr->write(item, buffer->bin_buffer,
- buffer->bin_buffer_size);
-
+ down_read(&frag->frag_sem);
+ if (!frag->frag_dead) {
+ /* result of ->release() is ignored */
+ buffer->bin_attr->write(buffer->item,
+ buffer->bin_buffer,
+ buffer->bin_buffer_size);
+ }
+ up_read(&frag->frag_sem);
/* vfree on NULL is safe */
vfree(buffer->bin_buffer);
buffer->bin_buffer = NULL;
@@ -489,10 +491,8 @@ static int configfs_release_bin_file(struct inode *inode, struct file *filp)
buffer->needs_read_fill = 1;
}
- ret = configfs_release(inode, filp);
- if (len < 0)
- return len;
- return ret;
+ configfs_release(inode, file);
+ return 0;
}
@@ -527,7 +527,7 @@ int configfs_create_file(struct config_item * item, const struct configfs_attrib
inode_lock_nested(d_inode(dir), I_MUTEX_NORMAL);
error = configfs_make_dirent(parent_sd, NULL, (void *) attr, mode,
- CONFIGFS_ITEM_ATTR);
+ CONFIGFS_ITEM_ATTR, parent_sd->s_frag);
inode_unlock(d_inode(dir));
return error;
@@ -549,7 +549,7 @@ int configfs_create_bin_file(struct config_item *item,
inode_lock_nested(dir->d_inode, I_MUTEX_NORMAL);
error = configfs_make_dirent(parent_sd, NULL, (void *) bin_attr, mode,
- CONFIGFS_ITEM_BIN_ATTR);
+ CONFIGFS_ITEM_BIN_ATTR, parent_sd->s_frag);
inode_unlock(dir->d_inode);
return error;
diff --git a/fs/d_path.c b/fs/d_path.c
index a7d0a96b35ce..0f1fc1743302 100644
--- a/fs/d_path.c
+++ b/fs/d_path.c
@@ -116,8 +116,10 @@ restart:
vfsmnt = &mnt->mnt;
continue;
}
- if (!error)
- error = is_mounted(vfsmnt) ? 1 : 2;
+ if (is_mounted(vfsmnt) && !is_anon_ns(mnt->mnt_ns))
+ error = 1; // absolute root
+ else
+ error = 2; // detached or not attached yet
break;
}
parent = dentry->d_parent;
diff --git a/fs/dax.c b/fs/dax.c
index b64964ef44f6..6bf81f931de3 100644
--- a/fs/dax.c
+++ b/fs/dax.c
@@ -600,7 +600,7 @@ struct page *dax_layout_busy_page(struct address_space *mapping)
* guaranteed to either see new references or prevent new
* references from being established.
*/
- unmap_mapping_range(mapping, 0, 0, 1);
+ unmap_mapping_range(mapping, 0, 0, 0);
xas_lock_irq(&xas);
xas_for_each(&xas, entry, ULONG_MAX) {
diff --git a/fs/devpts/inode.c b/fs/devpts/inode.c
index beeadca23b05..42e5a766d33c 100644
--- a/fs/devpts/inode.c
+++ b/fs/devpts/inode.c
@@ -622,7 +622,7 @@ void devpts_pty_kill(struct dentry *dentry)
dentry->d_fsdata = NULL;
drop_nlink(dentry->d_inode);
fsnotify_unlink(d_inode(dentry->d_parent), dentry);
- d_delete(dentry);
+ d_drop(dentry);
dput(dentry); /* d_alloc_name() in devpts_pty_new() */
}
diff --git a/fs/erofs/Kconfig b/fs/erofs/Kconfig
new file mode 100644
index 000000000000..9d634d3a1845
--- /dev/null
+++ b/fs/erofs/Kconfig
@@ -0,0 +1,91 @@
+# SPDX-License-Identifier: GPL-2.0-only
+
+config EROFS_FS
+ tristate "EROFS filesystem support"
+ depends on BLOCK
+ help
+ EROFS (Enhanced Read-Only File System) is a lightweight
+ read-only file system with modern designs (eg. page-sized
+ blocks, inline xattrs/data, etc.) for scenarios which need
+ high-performance read-only requirements, e.g. Android OS
+ for mobile phones and LIVECDs.
+
+ It also provides fixed-sized output compression support,
+ which improves storage density, keeps relatively higher
+ compression ratios, which is more useful to achieve high
+ performance for embedded devices with limited memory.
+
+ If unsure, say N.
+
+config EROFS_FS_DEBUG
+ bool "EROFS debugging feature"
+ depends on EROFS_FS
+ help
+ Print debugging messages and enable more BUG_ONs which check
+ filesystem consistency and find potential issues aggressively,
+ which can be used for Android eng build, for example.
+
+ For daily use, say N.
+
+config EROFS_FS_XATTR
+ bool "EROFS extended attributes"
+ depends on EROFS_FS
+ default y
+ help
+ Extended attributes are name:value pairs associated with inodes by
+ the kernel or by users (see the attr(5) manual page, or visit
+ <http://acl.bestbits.at/> for details).
+
+ If unsure, say N.
+
+config EROFS_FS_POSIX_ACL
+ bool "EROFS Access Control Lists"
+ depends on EROFS_FS_XATTR
+ select FS_POSIX_ACL
+ default y
+ help
+ Posix Access Control Lists (ACLs) support permissions for users and
+ groups beyond the owner/group/world scheme.
+
+ To learn more about Access Control Lists, visit the POSIX ACLs for
+ Linux website <http://acl.bestbits.at/>.
+
+ If you don't know what Access Control Lists are, say N.
+
+config EROFS_FS_SECURITY
+ bool "EROFS Security Labels"
+ depends on EROFS_FS_XATTR
+ default y
+ help
+ Security labels provide an access control facility to support Linux
+ Security Models (LSMs) accepted by AppArmor, SELinux, Smack and TOMOYO
+ Linux. This option enables an extended attribute handler for file
+ security labels in the erofs filesystem, so that it requires enabling
+ the extended attribute support in advance.
+
+ If you are not using a security module, say N.
+
+config EROFS_FS_ZIP
+ bool "EROFS Data Compression Support"
+ depends on EROFS_FS
+ select LZ4_DECOMPRESS
+ default y
+ help
+ Enable fixed-sized output compression for EROFS.
+
+ If you don't want to enable compression feature, say N.
+
+config EROFS_FS_CLUSTER_PAGE_LIMIT
+ int "EROFS Cluster Pages Hard Limit"
+ depends on EROFS_FS_ZIP
+ range 1 256
+ default "1"
+ help
+ Indicates maximum # of pages of a compressed
+ physical cluster.
+
+ For example, if files in a image were compressed
+ into 8k-unit, hard limit should not be configured
+ less than 2. Otherwise, the image will be refused
+ to mount on this kernel.
+
diff --git a/fs/erofs/Makefile b/fs/erofs/Makefile
new file mode 100644
index 000000000000..46f2aa4ba46c
--- /dev/null
+++ b/fs/erofs/Makefile
@@ -0,0 +1,11 @@
+# SPDX-License-Identifier: GPL-2.0-only
+
+EROFS_VERSION = "1.0"
+
+ccflags-y += -DEROFS_VERSION=\"$(EROFS_VERSION)\"
+
+obj-$(CONFIG_EROFS_FS) += erofs.o
+erofs-objs := super.o inode.o data.o namei.o dir.o utils.o
+erofs-$(CONFIG_EROFS_FS_XATTR) += xattr.o
+erofs-$(CONFIG_EROFS_FS_ZIP) += decompressor.o zmap.o zdata.o
+
diff --git a/fs/erofs/compress.h b/fs/erofs/compress.h
new file mode 100644
index 000000000000..07d279fd5d67
--- /dev/null
+++ b/fs/erofs/compress.h
@@ -0,0 +1,60 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (C) 2019 HUAWEI, Inc.
+ * http://www.huawei.com/
+ * Created by Gao Xiang <gaoxiang25@huawei.com>
+ */
+#ifndef __EROFS_FS_COMPRESS_H
+#define __EROFS_FS_COMPRESS_H
+
+#include "internal.h"
+
+enum {
+ Z_EROFS_COMPRESSION_SHIFTED = Z_EROFS_COMPRESSION_MAX,
+ Z_EROFS_COMPRESSION_RUNTIME_MAX
+};
+
+struct z_erofs_decompress_req {
+ struct super_block *sb;
+ struct page **in, **out;
+
+ unsigned short pageofs_out;
+ unsigned int inputsize, outputsize;
+
+ /* indicate the algorithm will be used for decompression */
+ unsigned int alg;
+ bool inplace_io, partial_decoding;
+};
+
+/*
+ * - 0x5A110C8D ('sallocated', Z_EROFS_MAPPING_STAGING) -
+ * used to mark temporary allocated pages from other
+ * file/cached pages and NULL mapping pages.
+ */
+#define Z_EROFS_MAPPING_STAGING ((void *)0x5A110C8D)
+
+/* check if a page is marked as staging */
+static inline bool z_erofs_page_is_staging(struct page *page)
+{
+ return page->mapping == Z_EROFS_MAPPING_STAGING;
+}
+
+static inline bool z_erofs_put_stagingpage(struct list_head *pagepool,
+ struct page *page)
+{
+ if (!z_erofs_page_is_staging(page))
+ return false;
+
+ /* staging pages should not be used by others at the same time */
+ if (page_ref_count(page) > 1)
+ put_page(page);
+ else
+ list_add(&page->lru, pagepool);
+ return true;
+}
+
+int z_erofs_decompress(struct z_erofs_decompress_req *rq,
+ struct list_head *pagepool);
+
+#endif
+
diff --git a/fs/erofs/data.c b/fs/erofs/data.c
new file mode 100644
index 000000000000..8a9fcbd0e8ac
--- /dev/null
+++ b/fs/erofs/data.c
@@ -0,0 +1,360 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2017-2018 HUAWEI, Inc.
+ * http://www.huawei.com/
+ * Created by Gao Xiang <gaoxiang25@huawei.com>
+ */
+#include "internal.h"
+#include <linux/prefetch.h>
+
+#include <trace/events/erofs.h>
+
+static void erofs_readendio(struct bio *bio)
+{
+ struct bio_vec *bvec;
+ blk_status_t err = bio->bi_status;
+ struct bvec_iter_all iter_all;
+
+ bio_for_each_segment_all(bvec, bio, iter_all) {
+ struct page *page = bvec->bv_page;
+
+ /* page is already locked */
+ DBG_BUGON(PageUptodate(page));
+
+ if (err)
+ SetPageError(page);
+ else
+ SetPageUptodate(page);
+
+ unlock_page(page);
+ /* page could be reclaimed now */
+ }
+ bio_put(bio);
+}
+
+struct page *erofs_get_meta_page(struct super_block *sb, erofs_blk_t blkaddr)
+{
+ struct inode *const bd_inode = sb->s_bdev->bd_inode;
+ struct address_space *const mapping = bd_inode->i_mapping;
+
+ return read_cache_page_gfp(mapping, blkaddr,
+ mapping_gfp_constraint(mapping, ~__GFP_FS));
+}
+
+static int erofs_map_blocks_flatmode(struct inode *inode,
+ struct erofs_map_blocks *map,
+ int flags)
+{
+ int err = 0;
+ erofs_blk_t nblocks, lastblk;
+ u64 offset = map->m_la;
+ struct erofs_inode *vi = EROFS_I(inode);
+ bool tailendpacking = (vi->datalayout == EROFS_INODE_FLAT_INLINE);
+
+ trace_erofs_map_blocks_flatmode_enter(inode, map, flags);
+
+ nblocks = DIV_ROUND_UP(inode->i_size, PAGE_SIZE);
+ lastblk = nblocks - tailendpacking;
+
+ if (offset >= inode->i_size) {
+ /* leave out-of-bound access unmapped */
+ map->m_flags = 0;
+ map->m_plen = 0;
+ goto out;
+ }
+
+ /* there is no hole in flatmode */
+ map->m_flags = EROFS_MAP_MAPPED;
+
+ if (offset < blknr_to_addr(lastblk)) {
+ map->m_pa = blknr_to_addr(vi->raw_blkaddr) + map->m_la;
+ map->m_plen = blknr_to_addr(lastblk) - offset;
+ } else if (tailendpacking) {
+ /* 2 - inode inline B: inode, [xattrs], inline last blk... */
+ struct erofs_sb_info *sbi = EROFS_SB(inode->i_sb);
+
+ map->m_pa = iloc(sbi, vi->nid) + vi->inode_isize +
+ vi->xattr_isize + erofs_blkoff(map->m_la);
+ map->m_plen = inode->i_size - offset;
+
+ /* inline data should be located in one meta block */
+ if (erofs_blkoff(map->m_pa) + map->m_plen > PAGE_SIZE) {
+ erofs_err(inode->i_sb,
+ "inline data cross block boundary @ nid %llu",
+ vi->nid);
+ DBG_BUGON(1);
+ err = -EFSCORRUPTED;
+ goto err_out;
+ }
+
+ map->m_flags |= EROFS_MAP_META;
+ } else {
+ erofs_err(inode->i_sb,
+ "internal error @ nid: %llu (size %llu), m_la 0x%llx",
+ vi->nid, inode->i_size, map->m_la);
+ DBG_BUGON(1);
+ err = -EIO;
+ goto err_out;
+ }
+
+out:
+ map->m_llen = map->m_plen;
+
+err_out:
+ trace_erofs_map_blocks_flatmode_exit(inode, map, flags, 0);
+ return err;
+}
+
+int erofs_map_blocks(struct inode *inode,
+ struct erofs_map_blocks *map, int flags)
+{
+ if (erofs_inode_is_data_compressed(EROFS_I(inode)->datalayout)) {
+ int err = z_erofs_map_blocks_iter(inode, map, flags);
+
+ if (map->mpage) {
+ put_page(map->mpage);
+ map->mpage = NULL;
+ }
+ return err;
+ }
+ return erofs_map_blocks_flatmode(inode, map, flags);
+}
+
+static inline struct bio *erofs_read_raw_page(struct bio *bio,
+ struct address_space *mapping,
+ struct page *page,
+ erofs_off_t *last_block,
+ unsigned int nblocks,
+ bool ra)
+{
+ struct inode *const inode = mapping->host;
+ struct super_block *const sb = inode->i_sb;
+ erofs_off_t current_block = (erofs_off_t)page->index;
+ int err;
+
+ DBG_BUGON(!nblocks);
+
+ if (PageUptodate(page)) {
+ err = 0;
+ goto has_updated;
+ }
+
+ /* note that for readpage case, bio also equals to NULL */
+ if (bio &&
+ /* not continuous */
+ *last_block + 1 != current_block) {
+submit_bio_retry:
+ submit_bio(bio);
+ bio = NULL;
+ }
+
+ if (!bio) {
+ struct erofs_map_blocks map = {
+ .m_la = blknr_to_addr(current_block),
+ };
+ erofs_blk_t blknr;
+ unsigned int blkoff;
+
+ err = erofs_map_blocks(inode, &map, EROFS_GET_BLOCKS_RAW);
+ if (err)
+ goto err_out;
+
+ /* zero out the holed page */
+ if (!(map.m_flags & EROFS_MAP_MAPPED)) {
+ zero_user_segment(page, 0, PAGE_SIZE);
+ SetPageUptodate(page);
+
+ /* imply err = 0, see erofs_map_blocks */
+ goto has_updated;
+ }
+
+ /* for RAW access mode, m_plen must be equal to m_llen */
+ DBG_BUGON(map.m_plen != map.m_llen);
+
+ blknr = erofs_blknr(map.m_pa);
+ blkoff = erofs_blkoff(map.m_pa);
+
+ /* deal with inline page */
+ if (map.m_flags & EROFS_MAP_META) {
+ void *vsrc, *vto;
+ struct page *ipage;
+
+ DBG_BUGON(map.m_plen > PAGE_SIZE);
+
+ ipage = erofs_get_meta_page(inode->i_sb, blknr);
+
+ if (IS_ERR(ipage)) {
+ err = PTR_ERR(ipage);
+ goto err_out;
+ }
+
+ vsrc = kmap_atomic(ipage);
+ vto = kmap_atomic(page);
+ memcpy(vto, vsrc + blkoff, map.m_plen);
+ memset(vto + map.m_plen, 0, PAGE_SIZE - map.m_plen);
+ kunmap_atomic(vto);
+ kunmap_atomic(vsrc);
+ flush_dcache_page(page);
+
+ SetPageUptodate(page);
+ /* TODO: could we unlock the page earlier? */
+ unlock_page(ipage);
+ put_page(ipage);
+
+ /* imply err = 0, see erofs_map_blocks */
+ goto has_updated;
+ }
+
+ /* pa must be block-aligned for raw reading */
+ DBG_BUGON(erofs_blkoff(map.m_pa));
+
+ /* max # of continuous pages */
+ if (nblocks > DIV_ROUND_UP(map.m_plen, PAGE_SIZE))
+ nblocks = DIV_ROUND_UP(map.m_plen, PAGE_SIZE);
+ if (nblocks > BIO_MAX_PAGES)
+ nblocks = BIO_MAX_PAGES;
+
+ bio = bio_alloc(GFP_NOIO, nblocks);
+
+ bio->bi_end_io = erofs_readendio;
+ bio_set_dev(bio, sb->s_bdev);
+ bio->bi_iter.bi_sector = (sector_t)blknr <<
+ LOG_SECTORS_PER_BLOCK;
+ bio->bi_opf = REQ_OP_READ;
+ }
+
+ err = bio_add_page(bio, page, PAGE_SIZE, 0);
+ /* out of the extent or bio is full */
+ if (err < PAGE_SIZE)
+ goto submit_bio_retry;
+
+ *last_block = current_block;
+
+ /* shift in advance in case of it followed by too many gaps */
+ if (bio->bi_iter.bi_size >= bio->bi_max_vecs * PAGE_SIZE) {
+ /* err should reassign to 0 after submitting */
+ err = 0;
+ goto submit_bio_out;
+ }
+
+ return bio;
+
+err_out:
+ /* for sync reading, set page error immediately */
+ if (!ra) {
+ SetPageError(page);
+ ClearPageUptodate(page);
+ }
+has_updated:
+ unlock_page(page);
+
+ /* if updated manually, continuous pages has a gap */
+ if (bio)
+submit_bio_out:
+ submit_bio(bio);
+ return err ? ERR_PTR(err) : NULL;
+}
+
+/*
+ * since we dont have write or truncate flows, so no inode
+ * locking needs to be held at the moment.
+ */
+static int erofs_raw_access_readpage(struct file *file, struct page *page)
+{
+ erofs_off_t last_block;
+ struct bio *bio;
+
+ trace_erofs_readpage(page, true);
+
+ bio = erofs_read_raw_page(NULL, page->mapping,
+ page, &last_block, 1, false);
+
+ if (IS_ERR(bio))
+ return PTR_ERR(bio);
+
+ DBG_BUGON(bio); /* since we have only one bio -- must be NULL */
+ return 0;
+}
+
+static int erofs_raw_access_readpages(struct file *filp,
+ struct address_space *mapping,
+ struct list_head *pages,
+ unsigned int nr_pages)
+{
+ erofs_off_t last_block;
+ struct bio *bio = NULL;
+ gfp_t gfp = readahead_gfp_mask(mapping);
+ struct page *page = list_last_entry(pages, struct page, lru);
+
+ trace_erofs_readpages(mapping->host, page, nr_pages, true);
+
+ for (; nr_pages; --nr_pages) {
+ page = list_entry(pages->prev, struct page, lru);
+
+ prefetchw(&page->flags);
+ list_del(&page->lru);
+
+ if (!add_to_page_cache_lru(page, mapping, page->index, gfp)) {
+ bio = erofs_read_raw_page(bio, mapping, page,
+ &last_block, nr_pages, true);
+
+ /* all the page errors are ignored when readahead */
+ if (IS_ERR(bio)) {
+ pr_err("%s, readahead error at page %lu of nid %llu\n",
+ __func__, page->index,
+ EROFS_I(mapping->host)->nid);
+
+ bio = NULL;
+ }
+ }
+
+ /* pages could still be locked */
+ put_page(page);
+ }
+ DBG_BUGON(!list_empty(pages));
+
+ /* the rare case (end in gaps) */
+ if (bio)
+ submit_bio(bio);
+ return 0;
+}
+
+static int erofs_get_block(struct inode *inode, sector_t iblock,
+ struct buffer_head *bh, int create)
+{
+ struct erofs_map_blocks map = {
+ .m_la = iblock << 9,
+ };
+ int err;
+
+ err = erofs_map_blocks(inode, &map, EROFS_GET_BLOCKS_RAW);
+ if (err)
+ return err;
+
+ if (map.m_flags & EROFS_MAP_MAPPED)
+ bh->b_blocknr = erofs_blknr(map.m_pa);
+
+ return err;
+}
+
+static sector_t erofs_bmap(struct address_space *mapping, sector_t block)
+{
+ struct inode *inode = mapping->host;
+
+ if (EROFS_I(inode)->datalayout == EROFS_INODE_FLAT_INLINE) {
+ erofs_blk_t blks = i_size_read(inode) >> LOG_BLOCK_SIZE;
+
+ if (block >> LOG_SECTORS_PER_BLOCK >= blks)
+ return 0;
+ }
+
+ return generic_block_bmap(mapping, block, erofs_get_block);
+}
+
+/* for uncompressed (aligned) files and raw access for other files */
+const struct address_space_operations erofs_raw_access_aops = {
+ .readpage = erofs_raw_access_readpage,
+ .readpages = erofs_raw_access_readpages,
+ .bmap = erofs_bmap,
+};
+
diff --git a/fs/erofs/decompressor.c b/fs/erofs/decompressor.c
new file mode 100644
index 000000000000..19f89f9fb10c
--- /dev/null
+++ b/fs/erofs/decompressor.c
@@ -0,0 +1,338 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2019 HUAWEI, Inc.
+ * http://www.huawei.com/
+ * Created by Gao Xiang <gaoxiang25@huawei.com>
+ */
+#include "compress.h"
+#include <linux/module.h>
+#include <linux/lz4.h>
+
+#ifndef LZ4_DISTANCE_MAX /* history window size */
+#define LZ4_DISTANCE_MAX 65535 /* set to maximum value by default */
+#endif
+
+#define LZ4_MAX_DISTANCE_PAGES (DIV_ROUND_UP(LZ4_DISTANCE_MAX, PAGE_SIZE) + 1)
+#ifndef LZ4_DECOMPRESS_INPLACE_MARGIN
+#define LZ4_DECOMPRESS_INPLACE_MARGIN(srcsize) (((srcsize) >> 8) + 32)
+#endif
+
+struct z_erofs_decompressor {
+ /*
+ * if destpages have sparsed pages, fill them with bounce pages.
+ * it also check whether destpages indicate continuous physical memory.
+ */
+ int (*prepare_destpages)(struct z_erofs_decompress_req *rq,
+ struct list_head *pagepool);
+ int (*decompress)(struct z_erofs_decompress_req *rq, u8 *out);
+ char *name;
+};
+
+static int z_erofs_lz4_prepare_destpages(struct z_erofs_decompress_req *rq,
+ struct list_head *pagepool)
+{
+ const unsigned int nr =
+ PAGE_ALIGN(rq->pageofs_out + rq->outputsize) >> PAGE_SHIFT;
+ struct page *availables[LZ4_MAX_DISTANCE_PAGES] = { NULL };
+ unsigned long bounced[DIV_ROUND_UP(LZ4_MAX_DISTANCE_PAGES,
+ BITS_PER_LONG)] = { 0 };
+ void *kaddr = NULL;
+ unsigned int i, j, top;
+
+ top = 0;
+ for (i = j = 0; i < nr; ++i, ++j) {
+ struct page *const page = rq->out[i];
+ struct page *victim;
+
+ if (j >= LZ4_MAX_DISTANCE_PAGES)
+ j = 0;
+
+ /* 'valid' bounced can only be tested after a complete round */
+ if (test_bit(j, bounced)) {
+ DBG_BUGON(i < LZ4_MAX_DISTANCE_PAGES);
+ DBG_BUGON(top >= LZ4_MAX_DISTANCE_PAGES);
+ availables[top++] = rq->out[i - LZ4_MAX_DISTANCE_PAGES];
+ }
+
+ if (page) {
+ __clear_bit(j, bounced);
+ if (kaddr) {
+ if (kaddr + PAGE_SIZE == page_address(page))
+ kaddr += PAGE_SIZE;
+ else
+ kaddr = NULL;
+ } else if (!i) {
+ kaddr = page_address(page);
+ }
+ continue;
+ }
+ kaddr = NULL;
+ __set_bit(j, bounced);
+
+ if (top) {
+ victim = availables[--top];
+ get_page(victim);
+ } else {
+ victim = erofs_allocpage(pagepool, GFP_KERNEL, false);
+ if (!victim)
+ return -ENOMEM;
+ victim->mapping = Z_EROFS_MAPPING_STAGING;
+ }
+ rq->out[i] = victim;
+ }
+ return kaddr ? 1 : 0;
+}
+
+static void *generic_copy_inplace_data(struct z_erofs_decompress_req *rq,
+ u8 *src, unsigned int pageofs_in)
+{
+ /*
+ * if in-place decompression is ongoing, those decompressed
+ * pages should be copied in order to avoid being overlapped.
+ */
+ struct page **in = rq->in;
+ u8 *const tmp = erofs_get_pcpubuf(0);
+ u8 *tmpp = tmp;
+ unsigned int inlen = rq->inputsize - pageofs_in;
+ unsigned int count = min_t(uint, inlen, PAGE_SIZE - pageofs_in);
+
+ while (tmpp < tmp + inlen) {
+ if (!src)
+ src = kmap_atomic(*in);
+ memcpy(tmpp, src + pageofs_in, count);
+ kunmap_atomic(src);
+ src = NULL;
+ tmpp += count;
+ pageofs_in = 0;
+ count = PAGE_SIZE;
+ ++in;
+ }
+ return tmp;
+}
+
+static int z_erofs_lz4_decompress(struct z_erofs_decompress_req *rq, u8 *out)
+{
+ unsigned int inputmargin, inlen;
+ u8 *src;
+ bool copied, support_0padding;
+ int ret;
+
+ if (rq->inputsize > PAGE_SIZE)
+ return -EOPNOTSUPP;
+
+ src = kmap_atomic(*rq->in);
+ inputmargin = 0;
+ support_0padding = false;
+
+ /* decompression inplace is only safe when 0padding is enabled */
+ if (EROFS_SB(rq->sb)->feature_incompat &
+ EROFS_FEATURE_INCOMPAT_LZ4_0PADDING) {
+ support_0padding = true;
+
+ while (!src[inputmargin & ~PAGE_MASK])
+ if (!(++inputmargin & ~PAGE_MASK))
+ break;
+
+ if (inputmargin >= rq->inputsize) {
+ kunmap_atomic(src);
+ return -EIO;
+ }
+ }
+
+ copied = false;
+ inlen = rq->inputsize - inputmargin;
+ if (rq->inplace_io) {
+ const uint oend = (rq->pageofs_out +
+ rq->outputsize) & ~PAGE_MASK;
+ const uint nr = PAGE_ALIGN(rq->pageofs_out +
+ rq->outputsize) >> PAGE_SHIFT;
+
+ if (rq->partial_decoding || !support_0padding ||
+ rq->out[nr - 1] != rq->in[0] ||
+ rq->inputsize - oend <
+ LZ4_DECOMPRESS_INPLACE_MARGIN(inlen)) {
+ src = generic_copy_inplace_data(rq, src, inputmargin);
+ inputmargin = 0;
+ copied = true;
+ }
+ }
+
+ ret = LZ4_decompress_safe_partial(src + inputmargin, out,
+ inlen, rq->outputsize,
+ rq->outputsize);
+ if (ret < 0) {
+ erofs_err(rq->sb, "failed to decompress, in[%u, %u] out[%u]",
+ inlen, inputmargin, rq->outputsize);
+ WARN_ON(1);
+ print_hex_dump(KERN_DEBUG, "[ in]: ", DUMP_PREFIX_OFFSET,
+ 16, 1, src + inputmargin, inlen, true);
+ print_hex_dump(KERN_DEBUG, "[out]: ", DUMP_PREFIX_OFFSET,
+ 16, 1, out, rq->outputsize, true);
+ ret = -EIO;
+ }
+
+ if (copied)
+ erofs_put_pcpubuf(src);
+ else
+ kunmap_atomic(src);
+ return ret;
+}
+
+static struct z_erofs_decompressor decompressors[] = {
+ [Z_EROFS_COMPRESSION_SHIFTED] = {
+ .name = "shifted"
+ },
+ [Z_EROFS_COMPRESSION_LZ4] = {
+ .prepare_destpages = z_erofs_lz4_prepare_destpages,
+ .decompress = z_erofs_lz4_decompress,
+ .name = "lz4"
+ },
+};
+
+static void copy_from_pcpubuf(struct page **out, const char *dst,
+ unsigned short pageofs_out,
+ unsigned int outputsize)
+{
+ const char *end = dst + outputsize;
+ const unsigned int righthalf = PAGE_SIZE - pageofs_out;
+ const char *cur = dst - pageofs_out;
+
+ while (cur < end) {
+ struct page *const page = *out++;
+
+ if (page) {
+ char *buf = kmap_atomic(page);
+
+ if (cur >= dst) {
+ memcpy(buf, cur, min_t(uint, PAGE_SIZE,
+ end - cur));
+ } else {
+ memcpy(buf + pageofs_out, cur + pageofs_out,
+ min_t(uint, righthalf, end - cur));
+ }
+ kunmap_atomic(buf);
+ }
+ cur += PAGE_SIZE;
+ }
+}
+
+static int z_erofs_decompress_generic(struct z_erofs_decompress_req *rq,
+ struct list_head *pagepool)
+{
+ const unsigned int nrpages_out =
+ PAGE_ALIGN(rq->pageofs_out + rq->outputsize) >> PAGE_SHIFT;
+ const struct z_erofs_decompressor *alg = decompressors + rq->alg;
+ unsigned int dst_maptype;
+ void *dst;
+ int ret, i;
+
+ if (nrpages_out == 1 && !rq->inplace_io) {
+ DBG_BUGON(!*rq->out);
+ dst = kmap_atomic(*rq->out);
+ dst_maptype = 0;
+ goto dstmap_out;
+ }
+
+ /*
+ * For the case of small output size (especially much less
+ * than PAGE_SIZE), memcpy the decompressed data rather than
+ * compressed data is preferred.
+ */
+ if (rq->outputsize <= PAGE_SIZE * 7 / 8) {
+ dst = erofs_get_pcpubuf(0);
+ if (IS_ERR(dst))
+ return PTR_ERR(dst);
+
+ rq->inplace_io = false;
+ ret = alg->decompress(rq, dst);
+ if (!ret)
+ copy_from_pcpubuf(rq->out, dst, rq->pageofs_out,
+ rq->outputsize);
+
+ erofs_put_pcpubuf(dst);
+ return ret;
+ }
+
+ ret = alg->prepare_destpages(rq, pagepool);
+ if (ret < 0) {
+ return ret;
+ } else if (ret) {
+ dst = page_address(*rq->out);
+ dst_maptype = 1;
+ goto dstmap_out;
+ }
+
+ i = 0;
+ while (1) {
+ dst = vm_map_ram(rq->out, nrpages_out, -1, PAGE_KERNEL);
+
+ /* retry two more times (totally 3 times) */
+ if (dst || ++i >= 3)
+ break;
+ vm_unmap_aliases();
+ }
+
+ if (!dst)
+ return -ENOMEM;
+
+ dst_maptype = 2;
+
+dstmap_out:
+ ret = alg->decompress(rq, dst + rq->pageofs_out);
+
+ if (!dst_maptype)
+ kunmap_atomic(dst);
+ else if (dst_maptype == 2)
+ vm_unmap_ram(dst, nrpages_out);
+ return ret;
+}
+
+static int z_erofs_shifted_transform(const struct z_erofs_decompress_req *rq,
+ struct list_head *pagepool)
+{
+ const unsigned int nrpages_out =
+ PAGE_ALIGN(rq->pageofs_out + rq->outputsize) >> PAGE_SHIFT;
+ const unsigned int righthalf = PAGE_SIZE - rq->pageofs_out;
+ unsigned char *src, *dst;
+
+ if (nrpages_out > 2) {
+ DBG_BUGON(1);
+ return -EIO;
+ }
+
+ if (rq->out[0] == *rq->in) {
+ DBG_BUGON(nrpages_out != 1);
+ return 0;
+ }
+
+ src = kmap_atomic(*rq->in);
+ if (!rq->out[0]) {
+ dst = NULL;
+ } else {
+ dst = kmap_atomic(rq->out[0]);
+ memcpy(dst + rq->pageofs_out, src, righthalf);
+ }
+
+ if (rq->out[1] == *rq->in) {
+ memmove(src, src + righthalf, rq->pageofs_out);
+ } else if (nrpages_out == 2) {
+ if (dst)
+ kunmap_atomic(dst);
+ DBG_BUGON(!rq->out[1]);
+ dst = kmap_atomic(rq->out[1]);
+ memcpy(dst, src + righthalf, rq->pageofs_out);
+ }
+ if (dst)
+ kunmap_atomic(dst);
+ kunmap_atomic(src);
+ return 0;
+}
+
+int z_erofs_decompress(struct z_erofs_decompress_req *rq,
+ struct list_head *pagepool)
+{
+ if (rq->alg == Z_EROFS_COMPRESSION_SHIFTED)
+ return z_erofs_shifted_transform(rq, pagepool);
+ return z_erofs_decompress_generic(rq, pagepool);
+}
+
diff --git a/fs/erofs/dir.c b/fs/erofs/dir.c
new file mode 100644
index 000000000000..d28c623dfef9
--- /dev/null
+++ b/fs/erofs/dir.c
@@ -0,0 +1,142 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2017-2018 HUAWEI, Inc.
+ * http://www.huawei.com/
+ * Created by Gao Xiang <gaoxiang25@huawei.com>
+ */
+#include "internal.h"
+
+static void debug_one_dentry(unsigned char d_type, const char *de_name,
+ unsigned int de_namelen)
+{
+#ifdef CONFIG_EROFS_FS_DEBUG
+ /* since the on-disk name could not have the trailing '\0' */
+ unsigned char dbg_namebuf[EROFS_NAME_LEN + 1];
+
+ memcpy(dbg_namebuf, de_name, de_namelen);
+ dbg_namebuf[de_namelen] = '\0';
+
+ erofs_dbg("found dirent %s de_len %u d_type %d", dbg_namebuf,
+ de_namelen, d_type);
+#endif
+}
+
+static int erofs_fill_dentries(struct inode *dir, struct dir_context *ctx,
+ void *dentry_blk, unsigned int *ofs,
+ unsigned int nameoff, unsigned int maxsize)
+{
+ struct erofs_dirent *de = dentry_blk + *ofs;
+ const struct erofs_dirent *end = dentry_blk + nameoff;
+
+ while (de < end) {
+ const char *de_name;
+ unsigned int de_namelen;
+ unsigned char d_type;
+
+ d_type = fs_ftype_to_dtype(de->file_type);
+
+ nameoff = le16_to_cpu(de->nameoff);
+ de_name = (char *)dentry_blk + nameoff;
+
+ /* the last dirent in the block? */
+ if (de + 1 >= end)
+ de_namelen = strnlen(de_name, maxsize - nameoff);
+ else
+ de_namelen = le16_to_cpu(de[1].nameoff) - nameoff;
+
+ /* a corrupted entry is found */
+ if (nameoff + de_namelen > maxsize ||
+ de_namelen > EROFS_NAME_LEN) {
+ erofs_err(dir->i_sb, "bogus dirent @ nid %llu",
+ EROFS_I(dir)->nid);
+ DBG_BUGON(1);
+ return -EFSCORRUPTED;
+ }
+
+ debug_one_dentry(d_type, de_name, de_namelen);
+ if (!dir_emit(ctx, de_name, de_namelen,
+ le64_to_cpu(de->nid), d_type))
+ /* stopped by some reason */
+ return 1;
+ ++de;
+ *ofs += sizeof(struct erofs_dirent);
+ }
+ *ofs = maxsize;
+ return 0;
+}
+
+static int erofs_readdir(struct file *f, struct dir_context *ctx)
+{
+ struct inode *dir = file_inode(f);
+ struct address_space *mapping = dir->i_mapping;
+ const size_t dirsize = i_size_read(dir);
+ unsigned int i = ctx->pos / EROFS_BLKSIZ;
+ unsigned int ofs = ctx->pos % EROFS_BLKSIZ;
+ int err = 0;
+ bool initial = true;
+
+ while (ctx->pos < dirsize) {
+ struct page *dentry_page;
+ struct erofs_dirent *de;
+ unsigned int nameoff, maxsize;
+
+ dentry_page = read_mapping_page(mapping, i, NULL);
+ if (dentry_page == ERR_PTR(-ENOMEM)) {
+ err = -ENOMEM;
+ break;
+ } else if (IS_ERR(dentry_page)) {
+ erofs_err(dir->i_sb,
+ "fail to readdir of logical block %u of nid %llu",
+ i, EROFS_I(dir)->nid);
+ err = -EFSCORRUPTED;
+ break;
+ }
+
+ de = (struct erofs_dirent *)kmap(dentry_page);
+
+ nameoff = le16_to_cpu(de->nameoff);
+
+ if (nameoff < sizeof(struct erofs_dirent) ||
+ nameoff >= PAGE_SIZE) {
+ erofs_err(dir->i_sb,
+ "invalid de[0].nameoff %u @ nid %llu",
+ nameoff, EROFS_I(dir)->nid);
+ err = -EFSCORRUPTED;
+ goto skip_this;
+ }
+
+ maxsize = min_t(unsigned int,
+ dirsize - ctx->pos + ofs, PAGE_SIZE);
+
+ /* search dirents at the arbitrary position */
+ if (initial) {
+ initial = false;
+
+ ofs = roundup(ofs, sizeof(struct erofs_dirent));
+ if (ofs >= nameoff)
+ goto skip_this;
+ }
+
+ err = erofs_fill_dentries(dir, ctx, de, &ofs,
+ nameoff, maxsize);
+skip_this:
+ kunmap(dentry_page);
+
+ put_page(dentry_page);
+
+ ctx->pos = blknr_to_addr(i) + ofs;
+
+ if (err)
+ break;
+ ++i;
+ ofs = 0;
+ }
+ return err < 0 ? err : 0;
+}
+
+const struct file_operations erofs_dir_fops = {
+ .llseek = generic_file_llseek,
+ .read = generic_read_dir,
+ .iterate_shared = erofs_readdir,
+};
+
diff --git a/fs/erofs/erofs_fs.h b/fs/erofs/erofs_fs.h
new file mode 100644
index 000000000000..b1ee5654750d
--- /dev/null
+++ b/fs/erofs/erofs_fs.h
@@ -0,0 +1,316 @@
+/* SPDX-License-Identifier: GPL-2.0-only OR Apache-2.0 */
+/*
+ * EROFS (Enhanced ROM File System) on-disk format definition
+ *
+ * Copyright (C) 2017-2018 HUAWEI, Inc.
+ * http://www.huawei.com/
+ * Created by Gao Xiang <gaoxiang25@huawei.com>
+ */
+#ifndef __EROFS_FS_H
+#define __EROFS_FS_H
+
+#define EROFS_SUPER_OFFSET 1024
+
+/*
+ * Any bits that aren't in EROFS_ALL_FEATURE_INCOMPAT should
+ * be incompatible with this kernel version.
+ */
+#define EROFS_FEATURE_INCOMPAT_LZ4_0PADDING 0x00000001
+#define EROFS_ALL_FEATURE_INCOMPAT EROFS_FEATURE_INCOMPAT_LZ4_0PADDING
+
+/* 128-byte erofs on-disk super block */
+struct erofs_super_block {
+ __le32 magic; /* file system magic number */
+ __le32 checksum; /* crc32c(super_block) */
+ __le32 feature_compat;
+ __u8 blkszbits; /* support block_size == PAGE_SIZE only */
+ __u8 reserved;
+
+ __le16 root_nid; /* nid of root directory */
+ __le64 inos; /* total valid ino # (== f_files - f_favail) */
+
+ __le64 build_time; /* inode v1 time derivation */
+ __le32 build_time_nsec; /* inode v1 time derivation in nano scale */
+ __le32 blocks; /* used for statfs */
+ __le32 meta_blkaddr; /* start block address of metadata area */
+ __le32 xattr_blkaddr; /* start block address of shared xattr area */
+ __u8 uuid[16]; /* 128-bit uuid for volume */
+ __u8 volume_name[16]; /* volume name */
+ __le32 feature_incompat;
+
+ __u8 reserved2[44];
+};
+
+/*
+ * erofs inode datalayout (i_format in on-disk inode):
+ * 0 - inode plain without inline data A:
+ * inode, [xattrs], ... | ... | no-holed data
+ * 1 - inode VLE compression B (legacy):
+ * inode, [xattrs], extents ... | ...
+ * 2 - inode plain with inline data C:
+ * inode, [xattrs], last_inline_data, ... | ... | no-holed data
+ * 3 - inode compression D:
+ * inode, [xattrs], map_header, extents ... | ...
+ * 4~7 - reserved
+ */
+enum {
+ EROFS_INODE_FLAT_PLAIN = 0,
+ EROFS_INODE_FLAT_COMPRESSION_LEGACY = 1,
+ EROFS_INODE_FLAT_INLINE = 2,
+ EROFS_INODE_FLAT_COMPRESSION = 3,
+ EROFS_INODE_DATALAYOUT_MAX
+};
+
+static inline bool erofs_inode_is_data_compressed(unsigned int datamode)
+{
+ return datamode == EROFS_INODE_FLAT_COMPRESSION ||
+ datamode == EROFS_INODE_FLAT_COMPRESSION_LEGACY;
+}
+
+/* bit definitions of inode i_advise */
+#define EROFS_I_VERSION_BITS 1
+#define EROFS_I_DATALAYOUT_BITS 3
+
+#define EROFS_I_VERSION_BIT 0
+#define EROFS_I_DATALAYOUT_BIT 1
+
+/* 32-byte reduced form of an ondisk inode */
+struct erofs_inode_compact {
+ __le16 i_format; /* inode format hints */
+
+/* 1 header + n-1 * 4 bytes inline xattr to keep continuity */
+ __le16 i_xattr_icount;
+ __le16 i_mode;
+ __le16 i_nlink;
+ __le32 i_size;
+ __le32 i_reserved;
+ union {
+ /* file total compressed blocks for data mapping 1 */
+ __le32 compressed_blocks;
+ __le32 raw_blkaddr;
+
+ /* for device files, used to indicate old/new device # */
+ __le32 rdev;
+ } i_u;
+ __le32 i_ino; /* only used for 32-bit stat compatibility */
+ __le16 i_uid;
+ __le16 i_gid;
+ __le32 i_reserved2;
+};
+
+/* 32 bytes on-disk inode */
+#define EROFS_INODE_LAYOUT_COMPACT 0
+/* 64 bytes on-disk inode */
+#define EROFS_INODE_LAYOUT_EXTENDED 1
+
+/* 64-byte complete form of an ondisk inode */
+struct erofs_inode_extended {
+ __le16 i_format; /* inode format hints */
+
+/* 1 header + n-1 * 4 bytes inline xattr to keep continuity */
+ __le16 i_xattr_icount;
+ __le16 i_mode;
+ __le16 i_reserved;
+ __le64 i_size;
+ union {
+ /* file total compressed blocks for data mapping 1 */
+ __le32 compressed_blocks;
+ __le32 raw_blkaddr;
+
+ /* for device files, used to indicate old/new device # */
+ __le32 rdev;
+ } i_u;
+
+ /* only used for 32-bit stat compatibility */
+ __le32 i_ino;
+
+ __le32 i_uid;
+ __le32 i_gid;
+ __le64 i_ctime;
+ __le32 i_ctime_nsec;
+ __le32 i_nlink;
+ __u8 i_reserved2[16];
+};
+
+#define EROFS_MAX_SHARED_XATTRS (128)
+/* h_shared_count between 129 ... 255 are special # */
+#define EROFS_SHARED_XATTR_EXTENT (255)
+
+/*
+ * inline xattrs (n == i_xattr_icount):
+ * erofs_xattr_ibody_header(1) + (n - 1) * 4 bytes
+ * 12 bytes / \
+ * / \
+ * /-----------------------\
+ * | erofs_xattr_entries+ |
+ * +-----------------------+
+ * inline xattrs must starts in erofs_xattr_ibody_header,
+ * for read-only fs, no need to introduce h_refcount
+ */
+struct erofs_xattr_ibody_header {
+ __le32 h_reserved;
+ __u8 h_shared_count;
+ __u8 h_reserved2[7];
+ __le32 h_shared_xattrs[0]; /* shared xattr id array */
+};
+
+/* Name indexes */
+#define EROFS_XATTR_INDEX_USER 1
+#define EROFS_XATTR_INDEX_POSIX_ACL_ACCESS 2
+#define EROFS_XATTR_INDEX_POSIX_ACL_DEFAULT 3
+#define EROFS_XATTR_INDEX_TRUSTED 4
+#define EROFS_XATTR_INDEX_LUSTRE 5
+#define EROFS_XATTR_INDEX_SECURITY 6
+
+/* xattr entry (for both inline & shared xattrs) */
+struct erofs_xattr_entry {
+ __u8 e_name_len; /* length of name */
+ __u8 e_name_index; /* attribute name index */
+ __le16 e_value_size; /* size of attribute value */
+ /* followed by e_name and e_value */
+ char e_name[0]; /* attribute name */
+};
+
+static inline unsigned int erofs_xattr_ibody_size(__le16 i_xattr_icount)
+{
+ if (!i_xattr_icount)
+ return 0;
+
+ return sizeof(struct erofs_xattr_ibody_header) +
+ sizeof(__u32) * (le16_to_cpu(i_xattr_icount) - 1);
+}
+
+#define EROFS_XATTR_ALIGN(size) round_up(size, sizeof(struct erofs_xattr_entry))
+
+static inline unsigned int erofs_xattr_entry_size(struct erofs_xattr_entry *e)
+{
+ return EROFS_XATTR_ALIGN(sizeof(struct erofs_xattr_entry) +
+ e->e_name_len + le16_to_cpu(e->e_value_size));
+}
+
+/* available compression algorithm types (for h_algorithmtype) */
+enum {
+ Z_EROFS_COMPRESSION_LZ4 = 0,
+ Z_EROFS_COMPRESSION_MAX
+};
+
+/*
+ * bit 0 : COMPACTED_2B indexes (0 - off; 1 - on)
+ * e.g. for 4k logical cluster size, 4B if compacted 2B is off;
+ * (4B) + 2B + (4B) if compacted 2B is on.
+ */
+#define Z_EROFS_ADVISE_COMPACTED_2B_BIT 0
+
+#define Z_EROFS_ADVISE_COMPACTED_2B (1 << Z_EROFS_ADVISE_COMPACTED_2B_BIT)
+
+struct z_erofs_map_header {
+ __le32 h_reserved1;
+ __le16 h_advise;
+ /*
+ * bit 0-3 : algorithm type of head 1 (logical cluster type 01);
+ * bit 4-7 : algorithm type of head 2 (logical cluster type 11).
+ */
+ __u8 h_algorithmtype;
+ /*
+ * bit 0-2 : logical cluster bits - 12, e.g. 0 for 4096;
+ * bit 3-4 : (physical - logical) cluster bits of head 1:
+ * For example, if logical clustersize = 4096, 1 for 8192.
+ * bit 5-7 : (physical - logical) cluster bits of head 2.
+ */
+ __u8 h_clusterbits;
+};
+
+#define Z_EROFS_VLE_LEGACY_HEADER_PADDING 8
+
+/*
+ * Fixed-sized output compression ondisk Logical Extent cluster type:
+ * 0 - literal (uncompressed) cluster
+ * 1 - compressed cluster (for the head logical cluster)
+ * 2 - compressed cluster (for the other logical clusters)
+ *
+ * In detail,
+ * 0 - literal (uncompressed) cluster,
+ * di_advise = 0
+ * di_clusterofs = the literal data offset of the cluster
+ * di_blkaddr = the blkaddr of the literal cluster
+ *
+ * 1 - compressed cluster (for the head logical cluster)
+ * di_advise = 1
+ * di_clusterofs = the decompressed data offset of the cluster
+ * di_blkaddr = the blkaddr of the compressed cluster
+ *
+ * 2 - compressed cluster (for the other logical clusters)
+ * di_advise = 2
+ * di_clusterofs =
+ * the decompressed data offset in its own head cluster
+ * di_u.delta[0] = distance to its corresponding head cluster
+ * di_u.delta[1] = distance to its corresponding tail cluster
+ * (di_advise could be 0, 1 or 2)
+ */
+enum {
+ Z_EROFS_VLE_CLUSTER_TYPE_PLAIN = 0,
+ Z_EROFS_VLE_CLUSTER_TYPE_HEAD = 1,
+ Z_EROFS_VLE_CLUSTER_TYPE_NONHEAD = 2,
+ Z_EROFS_VLE_CLUSTER_TYPE_RESERVED = 3,
+ Z_EROFS_VLE_CLUSTER_TYPE_MAX
+};
+
+#define Z_EROFS_VLE_DI_CLUSTER_TYPE_BITS 2
+#define Z_EROFS_VLE_DI_CLUSTER_TYPE_BIT 0
+
+struct z_erofs_vle_decompressed_index {
+ __le16 di_advise;
+ /* where to decompress in the head cluster */
+ __le16 di_clusterofs;
+
+ union {
+ /* for the head cluster */
+ __le32 blkaddr;
+ /*
+ * for the rest clusters
+ * eg. for 4k page-sized cluster, maximum 4K*64k = 256M)
+ * [0] - pointing to the head cluster
+ * [1] - pointing to the tail cluster
+ */
+ __le16 delta[2];
+ } di_u;
+};
+
+#define Z_EROFS_VLE_LEGACY_INDEX_ALIGN(size) \
+ (round_up(size, sizeof(struct z_erofs_vle_decompressed_index)) + \
+ sizeof(struct z_erofs_map_header) + Z_EROFS_VLE_LEGACY_HEADER_PADDING)
+
+/* dirent sorts in alphabet order, thus we can do binary search */
+struct erofs_dirent {
+ __le64 nid; /* node number */
+ __le16 nameoff; /* start offset of file name */
+ __u8 file_type; /* file type */
+ __u8 reserved; /* reserved */
+} __packed;
+
+/*
+ * EROFS file types should match generic FT_* types and
+ * it seems no need to add BUILD_BUG_ONs since potential
+ * unmatchness will break other fses as well...
+ */
+
+#define EROFS_NAME_LEN 255
+
+/* check the EROFS on-disk layout strictly at compile time */
+static inline void erofs_check_ondisk_layout_definitions(void)
+{
+ BUILD_BUG_ON(sizeof(struct erofs_super_block) != 128);
+ BUILD_BUG_ON(sizeof(struct erofs_inode_compact) != 32);
+ BUILD_BUG_ON(sizeof(struct erofs_inode_extended) != 64);
+ BUILD_BUG_ON(sizeof(struct erofs_xattr_ibody_header) != 12);
+ BUILD_BUG_ON(sizeof(struct erofs_xattr_entry) != 4);
+ BUILD_BUG_ON(sizeof(struct z_erofs_map_header) != 8);
+ BUILD_BUG_ON(sizeof(struct z_erofs_vle_decompressed_index) != 8);
+ BUILD_BUG_ON(sizeof(struct erofs_dirent) != 12);
+
+ BUILD_BUG_ON(BIT(Z_EROFS_VLE_DI_CLUSTER_TYPE_BITS) <
+ Z_EROFS_VLE_CLUSTER_TYPE_MAX - 1);
+}
+
+#endif
+
diff --git a/fs/erofs/inode.c b/fs/erofs/inode.c
new file mode 100644
index 000000000000..3350ab65d892
--- /dev/null
+++ b/fs/erofs/inode.c
@@ -0,0 +1,337 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2017-2018 HUAWEI, Inc.
+ * http://www.huawei.com/
+ * Created by Gao Xiang <gaoxiang25@huawei.com>
+ */
+#include "xattr.h"
+
+#include <trace/events/erofs.h>
+
+/* no locking */
+static int erofs_read_inode(struct inode *inode, void *data)
+{
+ struct erofs_inode *vi = EROFS_I(inode);
+ struct erofs_inode_compact *dic = data;
+ struct erofs_inode_extended *die;
+
+ const unsigned int ifmt = le16_to_cpu(dic->i_format);
+ struct erofs_sb_info *sbi = EROFS_SB(inode->i_sb);
+ erofs_blk_t nblks = 0;
+
+ vi->datalayout = erofs_inode_datalayout(ifmt);
+
+ if (vi->datalayout >= EROFS_INODE_DATALAYOUT_MAX) {
+ erofs_err(inode->i_sb, "unsupported datalayout %u of nid %llu",
+ vi->datalayout, vi->nid);
+ DBG_BUGON(1);
+ return -EOPNOTSUPP;
+ }
+
+ switch (erofs_inode_version(ifmt)) {
+ case EROFS_INODE_LAYOUT_EXTENDED:
+ die = data;
+
+ vi->inode_isize = sizeof(struct erofs_inode_extended);
+ vi->xattr_isize = erofs_xattr_ibody_size(die->i_xattr_icount);
+
+ inode->i_mode = le16_to_cpu(die->i_mode);
+ switch (inode->i_mode & S_IFMT) {
+ case S_IFREG:
+ case S_IFDIR:
+ case S_IFLNK:
+ vi->raw_blkaddr = le32_to_cpu(die->i_u.raw_blkaddr);
+ break;
+ case S_IFCHR:
+ case S_IFBLK:
+ inode->i_rdev =
+ new_decode_dev(le32_to_cpu(die->i_u.rdev));
+ break;
+ case S_IFIFO:
+ case S_IFSOCK:
+ inode->i_rdev = 0;
+ break;
+ default:
+ goto bogusimode;
+ }
+ i_uid_write(inode, le32_to_cpu(die->i_uid));
+ i_gid_write(inode, le32_to_cpu(die->i_gid));
+ set_nlink(inode, le32_to_cpu(die->i_nlink));
+
+ /* ns timestamp */
+ inode->i_mtime.tv_sec = inode->i_ctime.tv_sec =
+ le64_to_cpu(die->i_ctime);
+ inode->i_mtime.tv_nsec = inode->i_ctime.tv_nsec =
+ le32_to_cpu(die->i_ctime_nsec);
+
+ inode->i_size = le64_to_cpu(die->i_size);
+
+ /* total blocks for compressed files */
+ if (erofs_inode_is_data_compressed(vi->datalayout))
+ nblks = le32_to_cpu(die->i_u.compressed_blocks);
+ break;
+ case EROFS_INODE_LAYOUT_COMPACT:
+ vi->inode_isize = sizeof(struct erofs_inode_compact);
+ vi->xattr_isize = erofs_xattr_ibody_size(dic->i_xattr_icount);
+
+ inode->i_mode = le16_to_cpu(dic->i_mode);
+ switch (inode->i_mode & S_IFMT) {
+ case S_IFREG:
+ case S_IFDIR:
+ case S_IFLNK:
+ vi->raw_blkaddr = le32_to_cpu(dic->i_u.raw_blkaddr);
+ break;
+ case S_IFCHR:
+ case S_IFBLK:
+ inode->i_rdev =
+ new_decode_dev(le32_to_cpu(dic->i_u.rdev));
+ break;
+ case S_IFIFO:
+ case S_IFSOCK:
+ inode->i_rdev = 0;
+ break;
+ default:
+ goto bogusimode;
+ }
+ i_uid_write(inode, le16_to_cpu(dic->i_uid));
+ i_gid_write(inode, le16_to_cpu(dic->i_gid));
+ set_nlink(inode, le16_to_cpu(dic->i_nlink));
+
+ /* use build time to derive all file time */
+ inode->i_mtime.tv_sec = inode->i_ctime.tv_sec =
+ sbi->build_time;
+ inode->i_mtime.tv_nsec = inode->i_ctime.tv_nsec =
+ sbi->build_time_nsec;
+
+ inode->i_size = le32_to_cpu(dic->i_size);
+ if (erofs_inode_is_data_compressed(vi->datalayout))
+ nblks = le32_to_cpu(dic->i_u.compressed_blocks);
+ break;
+ default:
+ erofs_err(inode->i_sb,
+ "unsupported on-disk inode version %u of nid %llu",
+ erofs_inode_version(ifmt), vi->nid);
+ DBG_BUGON(1);
+ return -EOPNOTSUPP;
+ }
+
+ if (!nblks)
+ /* measure inode.i_blocks as generic filesystems */
+ inode->i_blocks = roundup(inode->i_size, EROFS_BLKSIZ) >> 9;
+ else
+ inode->i_blocks = nblks << LOG_SECTORS_PER_BLOCK;
+ return 0;
+
+bogusimode:
+ erofs_err(inode->i_sb, "bogus i_mode (%o) @ nid %llu",
+ inode->i_mode, vi->nid);
+ DBG_BUGON(1);
+ return -EFSCORRUPTED;
+}
+
+static int erofs_fill_symlink(struct inode *inode, void *data,
+ unsigned int m_pofs)
+{
+ struct erofs_inode *vi = EROFS_I(inode);
+ char *lnk;
+
+ /* if it cannot be handled with fast symlink scheme */
+ if (vi->datalayout != EROFS_INODE_FLAT_INLINE ||
+ inode->i_size >= PAGE_SIZE) {
+ inode->i_op = &erofs_symlink_iops;
+ return 0;
+ }
+
+ lnk = kmalloc(inode->i_size + 1, GFP_KERNEL);
+ if (!lnk)
+ return -ENOMEM;
+
+ m_pofs += vi->inode_isize + vi->xattr_isize;
+ /* inline symlink data shouldn't cross page boundary as well */
+ if (m_pofs + inode->i_size > PAGE_SIZE) {
+ kfree(lnk);
+ erofs_err(inode->i_sb,
+ "inline data cross block boundary @ nid %llu",
+ vi->nid);
+ DBG_BUGON(1);
+ return -EFSCORRUPTED;
+ }
+
+ memcpy(lnk, data + m_pofs, inode->i_size);
+ lnk[inode->i_size] = '\0';
+
+ inode->i_link = lnk;
+ inode->i_op = &erofs_fast_symlink_iops;
+ return 0;
+}
+
+static int erofs_fill_inode(struct inode *inode, int isdir)
+{
+ struct super_block *sb = inode->i_sb;
+ struct erofs_inode *vi = EROFS_I(inode);
+ struct page *page;
+ void *data;
+ int err;
+ erofs_blk_t blkaddr;
+ unsigned int ofs;
+ erofs_off_t inode_loc;
+
+ trace_erofs_fill_inode(inode, isdir);
+ inode_loc = iloc(EROFS_SB(sb), vi->nid);
+ blkaddr = erofs_blknr(inode_loc);
+ ofs = erofs_blkoff(inode_loc);
+
+ erofs_dbg("%s, reading inode nid %llu at %u of blkaddr %u",
+ __func__, vi->nid, ofs, blkaddr);
+
+ page = erofs_get_meta_page(sb, blkaddr);
+
+ if (IS_ERR(page)) {
+ erofs_err(sb, "failed to get inode (nid: %llu) page, err %ld",
+ vi->nid, PTR_ERR(page));
+ return PTR_ERR(page);
+ }
+
+ DBG_BUGON(!PageUptodate(page));
+ data = page_address(page);
+
+ err = erofs_read_inode(inode, data + ofs);
+ if (err)
+ goto out_unlock;
+
+ /* setup the new inode */
+ switch (inode->i_mode & S_IFMT) {
+ case S_IFREG:
+ inode->i_op = &erofs_generic_iops;
+ inode->i_fop = &generic_ro_fops;
+ break;
+ case S_IFDIR:
+ inode->i_op = &erofs_dir_iops;
+ inode->i_fop = &erofs_dir_fops;
+ break;
+ case S_IFLNK:
+ err = erofs_fill_symlink(inode, data, ofs);
+ if (err)
+ goto out_unlock;
+ inode_nohighmem(inode);
+ break;
+ case S_IFCHR:
+ case S_IFBLK:
+ case S_IFIFO:
+ case S_IFSOCK:
+ inode->i_op = &erofs_generic_iops;
+ init_special_inode(inode, inode->i_mode, inode->i_rdev);
+ goto out_unlock;
+ default:
+ err = -EFSCORRUPTED;
+ goto out_unlock;
+ }
+
+ if (erofs_inode_is_data_compressed(vi->datalayout)) {
+ err = z_erofs_fill_inode(inode);
+ goto out_unlock;
+ }
+ inode->i_mapping->a_ops = &erofs_raw_access_aops;
+
+out_unlock:
+ unlock_page(page);
+ put_page(page);
+ return err;
+}
+
+/*
+ * erofs nid is 64bits, but i_ino is 'unsigned long', therefore
+ * we should do more for 32-bit platform to find the right inode.
+ */
+static int erofs_ilookup_test_actor(struct inode *inode, void *opaque)
+{
+ const erofs_nid_t nid = *(erofs_nid_t *)opaque;
+
+ return EROFS_I(inode)->nid == nid;
+}
+
+static int erofs_iget_set_actor(struct inode *inode, void *opaque)
+{
+ const erofs_nid_t nid = *(erofs_nid_t *)opaque;
+
+ inode->i_ino = erofs_inode_hash(nid);
+ return 0;
+}
+
+static inline struct inode *erofs_iget_locked(struct super_block *sb,
+ erofs_nid_t nid)
+{
+ const unsigned long hashval = erofs_inode_hash(nid);
+
+ return iget5_locked(sb, hashval, erofs_ilookup_test_actor,
+ erofs_iget_set_actor, &nid);
+}
+
+struct inode *erofs_iget(struct super_block *sb,
+ erofs_nid_t nid,
+ bool isdir)
+{
+ struct inode *inode = erofs_iget_locked(sb, nid);
+
+ if (!inode)
+ return ERR_PTR(-ENOMEM);
+
+ if (inode->i_state & I_NEW) {
+ int err;
+ struct erofs_inode *vi = EROFS_I(inode);
+
+ vi->nid = nid;
+
+ err = erofs_fill_inode(inode, isdir);
+ if (!err)
+ unlock_new_inode(inode);
+ else {
+ iget_failed(inode);
+ inode = ERR_PTR(err);
+ }
+ }
+ return inode;
+}
+
+int erofs_getattr(const struct path *path, struct kstat *stat,
+ u32 request_mask, unsigned int query_flags)
+{
+ struct inode *const inode = d_inode(path->dentry);
+
+ if (erofs_inode_is_data_compressed(EROFS_I(inode)->datalayout))
+ stat->attributes |= STATX_ATTR_COMPRESSED;
+
+ stat->attributes |= STATX_ATTR_IMMUTABLE;
+ stat->attributes_mask |= (STATX_ATTR_COMPRESSED |
+ STATX_ATTR_IMMUTABLE);
+
+ generic_fillattr(inode, stat);
+ return 0;
+}
+
+const struct inode_operations erofs_generic_iops = {
+ .getattr = erofs_getattr,
+#ifdef CONFIG_EROFS_FS_XATTR
+ .listxattr = erofs_listxattr,
+#endif
+ .get_acl = erofs_get_acl,
+};
+
+const struct inode_operations erofs_symlink_iops = {
+ .get_link = page_get_link,
+ .getattr = erofs_getattr,
+#ifdef CONFIG_EROFS_FS_XATTR
+ .listxattr = erofs_listxattr,
+#endif
+ .get_acl = erofs_get_acl,
+};
+
+const struct inode_operations erofs_fast_symlink_iops = {
+ .get_link = simple_get_link,
+ .getattr = erofs_getattr,
+#ifdef CONFIG_EROFS_FS_XATTR
+ .listxattr = erofs_listxattr,
+#endif
+ .get_acl = erofs_get_acl,
+};
+
diff --git a/fs/erofs/internal.h b/fs/erofs/internal.h
new file mode 100644
index 000000000000..544a453f3076
--- /dev/null
+++ b/fs/erofs/internal.h
@@ -0,0 +1,431 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (C) 2017-2018 HUAWEI, Inc.
+ * http://www.huawei.com/
+ * Created by Gao Xiang <gaoxiang25@huawei.com>
+ */
+#ifndef __EROFS_INTERNAL_H
+#define __EROFS_INTERNAL_H
+
+#include <linux/fs.h>
+#include <linux/dcache.h>
+#include <linux/mm.h>
+#include <linux/pagemap.h>
+#include <linux/bio.h>
+#include <linux/buffer_head.h>
+#include <linux/magic.h>
+#include <linux/slab.h>
+#include <linux/vmalloc.h>
+#include "erofs_fs.h"
+
+/* redefine pr_fmt "erofs: " */
+#undef pr_fmt
+#define pr_fmt(fmt) "erofs: " fmt
+
+__printf(3, 4) void _erofs_err(struct super_block *sb,
+ const char *function, const char *fmt, ...);
+#define erofs_err(sb, fmt, ...) \
+ _erofs_err(sb, __func__, fmt "\n", ##__VA_ARGS__)
+__printf(3, 4) void _erofs_info(struct super_block *sb,
+ const char *function, const char *fmt, ...);
+#define erofs_info(sb, fmt, ...) \
+ _erofs_info(sb, __func__, fmt "\n", ##__VA_ARGS__)
+#ifdef CONFIG_EROFS_FS_DEBUG
+#define erofs_dbg(x, ...) pr_debug(x "\n", ##__VA_ARGS__)
+#define DBG_BUGON BUG_ON
+#else
+#define erofs_dbg(x, ...) ((void)0)
+#define DBG_BUGON(x) ((void)(x))
+#endif /* !CONFIG_EROFS_FS_DEBUG */
+
+/* EROFS_SUPER_MAGIC_V1 to represent the whole file system */
+#define EROFS_SUPER_MAGIC EROFS_SUPER_MAGIC_V1
+
+typedef u64 erofs_nid_t;
+typedef u64 erofs_off_t;
+/* data type for filesystem-wide blocks number */
+typedef u32 erofs_blk_t;
+
+struct erofs_sb_info {
+#ifdef CONFIG_EROFS_FS_ZIP
+ /* list for all registered superblocks, mainly for shrinker */
+ struct list_head list;
+ struct mutex umount_mutex;
+
+ /* the dedicated workstation for compression */
+ struct radix_tree_root workstn_tree;
+
+ /* threshold for decompression synchronously */
+ unsigned int max_sync_decompress_pages;
+
+ unsigned int shrinker_run_no;
+
+ /* current strategy of how to use managed cache */
+ unsigned char cache_strategy;
+
+ /* pseudo inode to manage cached pages */
+ struct inode *managed_cache;
+#endif /* CONFIG_EROFS_FS_ZIP */
+ u32 blocks;
+ u32 meta_blkaddr;
+#ifdef CONFIG_EROFS_FS_XATTR
+ u32 xattr_blkaddr;
+#endif
+
+ /* inode slot unit size in bit shift */
+ unsigned char islotbits;
+
+ u32 build_time_nsec;
+ u64 build_time;
+
+ /* what we really care is nid, rather than ino.. */
+ erofs_nid_t root_nid;
+ /* used for statfs, f_files - f_favail */
+ u64 inos;
+
+ u8 uuid[16]; /* 128-bit uuid for volume */
+ u8 volume_name[16]; /* volume name */
+ u32 feature_incompat;
+
+ unsigned int mount_opt;
+};
+
+#define EROFS_SB(sb) ((struct erofs_sb_info *)(sb)->s_fs_info)
+#define EROFS_I_SB(inode) ((struct erofs_sb_info *)(inode)->i_sb->s_fs_info)
+
+/* Mount flags set via mount options or defaults */
+#define EROFS_MOUNT_XATTR_USER 0x00000010
+#define EROFS_MOUNT_POSIX_ACL 0x00000020
+
+#define clear_opt(sbi, option) ((sbi)->mount_opt &= ~EROFS_MOUNT_##option)
+#define set_opt(sbi, option) ((sbi)->mount_opt |= EROFS_MOUNT_##option)
+#define test_opt(sbi, option) ((sbi)->mount_opt & EROFS_MOUNT_##option)
+
+#ifdef CONFIG_EROFS_FS_ZIP
+enum {
+ EROFS_ZIP_CACHE_DISABLED,
+ EROFS_ZIP_CACHE_READAHEAD,
+ EROFS_ZIP_CACHE_READAROUND
+};
+
+#define EROFS_LOCKED_MAGIC (INT_MIN | 0xE0F510CCL)
+
+/* basic unit of the workstation of a super_block */
+struct erofs_workgroup {
+ /* the workgroup index in the workstation */
+ pgoff_t index;
+
+ /* overall workgroup reference count */
+ atomic_t refcount;
+};
+
+#if defined(CONFIG_SMP)
+static inline bool erofs_workgroup_try_to_freeze(struct erofs_workgroup *grp,
+ int val)
+{
+ preempt_disable();
+ if (val != atomic_cmpxchg(&grp->refcount, val, EROFS_LOCKED_MAGIC)) {
+ preempt_enable();
+ return false;
+ }
+ return true;
+}
+
+static inline void erofs_workgroup_unfreeze(struct erofs_workgroup *grp,
+ int orig_val)
+{
+ /*
+ * other observers should notice all modifications
+ * in the freezing period.
+ */
+ smp_mb();
+ atomic_set(&grp->refcount, orig_val);
+ preempt_enable();
+}
+
+static inline int erofs_wait_on_workgroup_freezed(struct erofs_workgroup *grp)
+{
+ return atomic_cond_read_relaxed(&grp->refcount,
+ VAL != EROFS_LOCKED_MAGIC);
+}
+#else
+static inline bool erofs_workgroup_try_to_freeze(struct erofs_workgroup *grp,
+ int val)
+{
+ preempt_disable();
+ /* no need to spin on UP platforms, let's just disable preemption. */
+ if (val != atomic_read(&grp->refcount)) {
+ preempt_enable();
+ return false;
+ }
+ return true;
+}
+
+static inline void erofs_workgroup_unfreeze(struct erofs_workgroup *grp,
+ int orig_val)
+{
+ preempt_enable();
+}
+
+static inline int erofs_wait_on_workgroup_freezed(struct erofs_workgroup *grp)
+{
+ int v = atomic_read(&grp->refcount);
+
+ /* workgroup is never freezed on uniprocessor systems */
+ DBG_BUGON(v == EROFS_LOCKED_MAGIC);
+ return v;
+}
+#endif /* !CONFIG_SMP */
+
+/* hard limit of pages per compressed cluster */
+#define Z_EROFS_CLUSTER_MAX_PAGES (CONFIG_EROFS_FS_CLUSTER_PAGE_LIMIT)
+#define EROFS_PCPUBUF_NR_PAGES Z_EROFS_CLUSTER_MAX_PAGES
+#else
+#define EROFS_PCPUBUF_NR_PAGES 0
+#endif /* !CONFIG_EROFS_FS_ZIP */
+
+/* we strictly follow PAGE_SIZE and no buffer head yet */
+#define LOG_BLOCK_SIZE PAGE_SHIFT
+
+#undef LOG_SECTORS_PER_BLOCK
+#define LOG_SECTORS_PER_BLOCK (PAGE_SHIFT - 9)
+
+#undef SECTORS_PER_BLOCK
+#define SECTORS_PER_BLOCK (1 << SECTORS_PER_BLOCK)
+
+#define EROFS_BLKSIZ (1 << LOG_BLOCK_SIZE)
+
+#if (EROFS_BLKSIZ % 4096 || !EROFS_BLKSIZ)
+#error erofs cannot be used in this platform
+#endif
+
+#define ROOT_NID(sb) ((sb)->root_nid)
+
+#define erofs_blknr(addr) ((addr) / EROFS_BLKSIZ)
+#define erofs_blkoff(addr) ((addr) % EROFS_BLKSIZ)
+#define blknr_to_addr(nr) ((erofs_off_t)(nr) * EROFS_BLKSIZ)
+
+static inline erofs_off_t iloc(struct erofs_sb_info *sbi, erofs_nid_t nid)
+{
+ return blknr_to_addr(sbi->meta_blkaddr) + (nid << sbi->islotbits);
+}
+
+/* atomic flag definitions */
+#define EROFS_I_EA_INITED_BIT 0
+#define EROFS_I_Z_INITED_BIT 1
+
+/* bitlock definitions (arranged in reverse order) */
+#define EROFS_I_BL_XATTR_BIT (BITS_PER_LONG - 1)
+#define EROFS_I_BL_Z_BIT (BITS_PER_LONG - 2)
+
+struct erofs_inode {
+ erofs_nid_t nid;
+
+ /* atomic flags (including bitlocks) */
+ unsigned long flags;
+
+ unsigned char datalayout;
+ unsigned char inode_isize;
+ unsigned short xattr_isize;
+
+ unsigned int xattr_shared_count;
+ unsigned int *xattr_shared_xattrs;
+
+ union {
+ erofs_blk_t raw_blkaddr;
+#ifdef CONFIG_EROFS_FS_ZIP
+ struct {
+ unsigned short z_advise;
+ unsigned char z_algorithmtype[2];
+ unsigned char z_logical_clusterbits;
+ unsigned char z_physical_clusterbits[2];
+ };
+#endif /* CONFIG_EROFS_FS_ZIP */
+ };
+ /* the corresponding vfs inode */
+ struct inode vfs_inode;
+};
+
+#define EROFS_I(ptr) \
+ container_of(ptr, struct erofs_inode, vfs_inode)
+
+static inline unsigned long erofs_inode_datablocks(struct inode *inode)
+{
+ /* since i_size cannot be changed */
+ return DIV_ROUND_UP(inode->i_size, EROFS_BLKSIZ);
+}
+
+static inline unsigned int erofs_bitrange(unsigned int value, unsigned int bit,
+ unsigned int bits)
+{
+
+ return (value >> bit) & ((1 << bits) - 1);
+}
+
+
+static inline unsigned int erofs_inode_version(unsigned int value)
+{
+ return erofs_bitrange(value, EROFS_I_VERSION_BIT,
+ EROFS_I_VERSION_BITS);
+}
+
+static inline unsigned int erofs_inode_datalayout(unsigned int value)
+{
+ return erofs_bitrange(value, EROFS_I_DATALAYOUT_BIT,
+ EROFS_I_DATALAYOUT_BITS);
+}
+
+extern const struct super_operations erofs_sops;
+
+extern const struct address_space_operations erofs_raw_access_aops;
+#ifdef CONFIG_EROFS_FS_ZIP
+extern const struct address_space_operations z_erofs_vle_normalaccess_aops;
+#endif
+
+/*
+ * Logical to physical block mapping, used by erofs_map_blocks()
+ *
+ * Different with other file systems, it is used for 2 access modes:
+ *
+ * 1) RAW access mode:
+ *
+ * Users pass a valid (m_lblk, m_lofs -- usually 0) pair,
+ * and get the valid m_pblk, m_pofs and the longest m_len(in bytes).
+ *
+ * Note that m_lblk in the RAW access mode refers to the number of
+ * the compressed ondisk block rather than the uncompressed
+ * in-memory block for the compressed file.
+ *
+ * m_pofs equals to m_lofs except for the inline data page.
+ *
+ * 2) Normal access mode:
+ *
+ * If the inode is not compressed, it has no difference with
+ * the RAW access mode. However, if the inode is compressed,
+ * users should pass a valid (m_lblk, m_lofs) pair, and get
+ * the needed m_pblk, m_pofs, m_len to get the compressed data
+ * and the updated m_lblk, m_lofs which indicates the start
+ * of the corresponding uncompressed data in the file.
+ */
+enum {
+ BH_Zipped = BH_PrivateStart,
+ BH_FullMapped,
+};
+
+/* Has a disk mapping */
+#define EROFS_MAP_MAPPED (1 << BH_Mapped)
+/* Located in metadata (could be copied from bd_inode) */
+#define EROFS_MAP_META (1 << BH_Meta)
+/* The extent has been compressed */
+#define EROFS_MAP_ZIPPED (1 << BH_Zipped)
+/* The length of extent is full */
+#define EROFS_MAP_FULL_MAPPED (1 << BH_FullMapped)
+
+struct erofs_map_blocks {
+ erofs_off_t m_pa, m_la;
+ u64 m_plen, m_llen;
+
+ unsigned int m_flags;
+
+ struct page *mpage;
+};
+
+/* Flags used by erofs_map_blocks() */
+#define EROFS_GET_BLOCKS_RAW 0x0001
+
+/* zmap.c */
+#ifdef CONFIG_EROFS_FS_ZIP
+int z_erofs_fill_inode(struct inode *inode);
+int z_erofs_map_blocks_iter(struct inode *inode,
+ struct erofs_map_blocks *map,
+ int flags);
+#else
+static inline int z_erofs_fill_inode(struct inode *inode) { return -EOPNOTSUPP; }
+static inline int z_erofs_map_blocks_iter(struct inode *inode,
+ struct erofs_map_blocks *map,
+ int flags)
+{
+ return -EOPNOTSUPP;
+}
+#endif /* !CONFIG_EROFS_FS_ZIP */
+
+/* data.c */
+struct page *erofs_get_meta_page(struct super_block *sb, erofs_blk_t blkaddr);
+
+int erofs_map_blocks(struct inode *, struct erofs_map_blocks *, int);
+
+/* inode.c */
+static inline unsigned long erofs_inode_hash(erofs_nid_t nid)
+{
+#if BITS_PER_LONG == 32
+ return (nid >> 32) ^ (nid & 0xffffffff);
+#else
+ return nid;
+#endif
+}
+
+extern const struct inode_operations erofs_generic_iops;
+extern const struct inode_operations erofs_symlink_iops;
+extern const struct inode_operations erofs_fast_symlink_iops;
+
+struct inode *erofs_iget(struct super_block *sb, erofs_nid_t nid, bool dir);
+int erofs_getattr(const struct path *path, struct kstat *stat,
+ u32 request_mask, unsigned int query_flags);
+
+/* namei.c */
+extern const struct inode_operations erofs_dir_iops;
+
+int erofs_namei(struct inode *dir, struct qstr *name,
+ erofs_nid_t *nid, unsigned int *d_type);
+
+/* dir.c */
+extern const struct file_operations erofs_dir_fops;
+
+/* utils.c / zdata.c */
+struct page *erofs_allocpage(struct list_head *pool, gfp_t gfp, bool nofail);
+
+#if (EROFS_PCPUBUF_NR_PAGES > 0)
+void *erofs_get_pcpubuf(unsigned int pagenr);
+#define erofs_put_pcpubuf(buf) do { \
+ (void)&(buf); \
+ preempt_enable(); \
+} while (0)
+#else
+static inline void *erofs_get_pcpubuf(unsigned int pagenr)
+{
+ return ERR_PTR(-EOPNOTSUPP);
+}
+
+#define erofs_put_pcpubuf(buf) do {} while (0)
+#endif
+
+#ifdef CONFIG_EROFS_FS_ZIP
+int erofs_workgroup_put(struct erofs_workgroup *grp);
+struct erofs_workgroup *erofs_find_workgroup(struct super_block *sb,
+ pgoff_t index, bool *tag);
+int erofs_register_workgroup(struct super_block *sb,
+ struct erofs_workgroup *grp, bool tag);
+void erofs_workgroup_free_rcu(struct erofs_workgroup *grp);
+void erofs_shrinker_register(struct super_block *sb);
+void erofs_shrinker_unregister(struct super_block *sb);
+int __init erofs_init_shrinker(void);
+void erofs_exit_shrinker(void);
+int __init z_erofs_init_zip_subsystem(void);
+void z_erofs_exit_zip_subsystem(void);
+int erofs_try_to_free_all_cached_pages(struct erofs_sb_info *sbi,
+ struct erofs_workgroup *egrp);
+int erofs_try_to_free_cached_page(struct address_space *mapping,
+ struct page *page);
+#else
+static inline void erofs_shrinker_register(struct super_block *sb) {}
+static inline void erofs_shrinker_unregister(struct super_block *sb) {}
+static inline int erofs_init_shrinker(void) { return 0; }
+static inline void erofs_exit_shrinker(void) {}
+static inline int z_erofs_init_zip_subsystem(void) { return 0; }
+static inline void z_erofs_exit_zip_subsystem(void) {}
+#endif /* !CONFIG_EROFS_FS_ZIP */
+
+#define EFSCORRUPTED EUCLEAN /* Filesystem is corrupted */
+
+#endif /* __EROFS_INTERNAL_H */
+
diff --git a/fs/erofs/namei.c b/fs/erofs/namei.c
new file mode 100644
index 000000000000..3abbecbf73de
--- /dev/null
+++ b/fs/erofs/namei.c
@@ -0,0 +1,252 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2017-2018 HUAWEI, Inc.
+ * http://www.huawei.com/
+ * Created by Gao Xiang <gaoxiang25@huawei.com>
+ */
+#include "xattr.h"
+
+#include <trace/events/erofs.h>
+
+struct erofs_qstr {
+ const unsigned char *name;
+ const unsigned char *end;
+};
+
+/* based on the end of qn is accurate and it must have the trailing '\0' */
+static inline int erofs_dirnamecmp(const struct erofs_qstr *qn,
+ const struct erofs_qstr *qd,
+ unsigned int *matched)
+{
+ unsigned int i = *matched;
+
+ /*
+ * on-disk error, let's only BUG_ON in the debugging mode.
+ * otherwise, it will return 1 to just skip the invalid name
+ * and go on (in consideration of the lookup performance).
+ */
+ DBG_BUGON(qd->name > qd->end);
+
+ /* qd could not have trailing '\0' */
+ /* However it is absolutely safe if < qd->end */
+ while (qd->name + i < qd->end && qd->name[i] != '\0') {
+ if (qn->name[i] != qd->name[i]) {
+ *matched = i;
+ return qn->name[i] > qd->name[i] ? 1 : -1;
+ }
+ ++i;
+ }
+ *matched = i;
+ /* See comments in __d_alloc on the terminating NUL character */
+ return qn->name[i] == '\0' ? 0 : 1;
+}
+
+#define nameoff_from_disk(off, sz) (le16_to_cpu(off) & ((sz) - 1))
+
+static struct erofs_dirent *find_target_dirent(struct erofs_qstr *name,
+ u8 *data,
+ unsigned int dirblksize,
+ const int ndirents)
+{
+ int head, back;
+ unsigned int startprfx, endprfx;
+ struct erofs_dirent *const de = (struct erofs_dirent *)data;
+
+ /* since the 1st dirent has been evaluated previously */
+ head = 1;
+ back = ndirents - 1;
+ startprfx = endprfx = 0;
+
+ while (head <= back) {
+ const int mid = head + (back - head) / 2;
+ const int nameoff = nameoff_from_disk(de[mid].nameoff,
+ dirblksize);
+ unsigned int matched = min(startprfx, endprfx);
+ struct erofs_qstr dname = {
+ .name = data + nameoff,
+ .end = mid >= ndirents - 1 ?
+ data + dirblksize :
+ data + nameoff_from_disk(de[mid + 1].nameoff,
+ dirblksize)
+ };
+
+ /* string comparison without already matched prefix */
+ int ret = erofs_dirnamecmp(name, &dname, &matched);
+
+ if (!ret) {
+ return de + mid;
+ } else if (ret > 0) {
+ head = mid + 1;
+ startprfx = matched;
+ } else {
+ back = mid - 1;
+ endprfx = matched;
+ }
+ }
+
+ return ERR_PTR(-ENOENT);
+}
+
+static struct page *find_target_block_classic(struct inode *dir,
+ struct erofs_qstr *name,
+ int *_ndirents)
+{
+ unsigned int startprfx, endprfx;
+ int head, back;
+ struct address_space *const mapping = dir->i_mapping;
+ struct page *candidate = ERR_PTR(-ENOENT);
+
+ startprfx = endprfx = 0;
+ head = 0;
+ back = erofs_inode_datablocks(dir) - 1;
+
+ while (head <= back) {
+ const int mid = head + (back - head) / 2;
+ struct page *page = read_mapping_page(mapping, mid, NULL);
+
+ if (!IS_ERR(page)) {
+ struct erofs_dirent *de = kmap_atomic(page);
+ const int nameoff = nameoff_from_disk(de->nameoff,
+ EROFS_BLKSIZ);
+ const int ndirents = nameoff / sizeof(*de);
+ int diff;
+ unsigned int matched;
+ struct erofs_qstr dname;
+
+ if (!ndirents) {
+ kunmap_atomic(de);
+ put_page(page);
+ erofs_err(dir->i_sb,
+ "corrupted dir block %d @ nid %llu",
+ mid, EROFS_I(dir)->nid);
+ DBG_BUGON(1);
+ page = ERR_PTR(-EFSCORRUPTED);
+ goto out;
+ }
+
+ matched = min(startprfx, endprfx);
+
+ dname.name = (u8 *)de + nameoff;
+ if (ndirents == 1)
+ dname.end = (u8 *)de + EROFS_BLKSIZ;
+ else
+ dname.end = (u8 *)de +
+ nameoff_from_disk(de[1].nameoff,
+ EROFS_BLKSIZ);
+
+ /* string comparison without already matched prefix */
+ diff = erofs_dirnamecmp(name, &dname, &matched);
+ kunmap_atomic(de);
+
+ if (!diff) {
+ *_ndirents = 0;
+ goto out;
+ } else if (diff > 0) {
+ head = mid + 1;
+ startprfx = matched;
+
+ if (!IS_ERR(candidate))
+ put_page(candidate);
+ candidate = page;
+ *_ndirents = ndirents;
+ } else {
+ put_page(page);
+
+ back = mid - 1;
+ endprfx = matched;
+ }
+ continue;
+ }
+out: /* free if the candidate is valid */
+ if (!IS_ERR(candidate))
+ put_page(candidate);
+ return page;
+ }
+ return candidate;
+}
+
+int erofs_namei(struct inode *dir,
+ struct qstr *name,
+ erofs_nid_t *nid, unsigned int *d_type)
+{
+ int ndirents;
+ struct page *page;
+ void *data;
+ struct erofs_dirent *de;
+ struct erofs_qstr qn;
+
+ if (!dir->i_size)
+ return -ENOENT;
+
+ qn.name = name->name;
+ qn.end = name->name + name->len;
+
+ ndirents = 0;
+ page = find_target_block_classic(dir, &qn, &ndirents);
+
+ if (IS_ERR(page))
+ return PTR_ERR(page);
+
+ data = kmap_atomic(page);
+ /* the target page has been mapped */
+ if (ndirents)
+ de = find_target_dirent(&qn, data, EROFS_BLKSIZ, ndirents);
+ else
+ de = (struct erofs_dirent *)data;
+
+ if (!IS_ERR(de)) {
+ *nid = le64_to_cpu(de->nid);
+ *d_type = de->file_type;
+ }
+
+ kunmap_atomic(data);
+ put_page(page);
+
+ return PTR_ERR_OR_ZERO(de);
+}
+
+/* NOTE: i_mutex is already held by vfs */
+static struct dentry *erofs_lookup(struct inode *dir,
+ struct dentry *dentry,
+ unsigned int flags)
+{
+ int err;
+ erofs_nid_t nid;
+ unsigned int d_type;
+ struct inode *inode;
+
+ DBG_BUGON(!d_really_is_negative(dentry));
+ /* dentry must be unhashed in lookup, no need to worry about */
+ DBG_BUGON(!d_unhashed(dentry));
+
+ trace_erofs_lookup(dir, dentry, flags);
+
+ /* file name exceeds fs limit */
+ if (dentry->d_name.len > EROFS_NAME_LEN)
+ return ERR_PTR(-ENAMETOOLONG);
+
+ /* false uninitialized warnings on gcc 4.8.x */
+ err = erofs_namei(dir, &dentry->d_name, &nid, &d_type);
+
+ if (err == -ENOENT) {
+ /* negative dentry */
+ inode = NULL;
+ } else if (err) {
+ inode = ERR_PTR(err);
+ } else {
+ erofs_dbg("%s, %s (nid %llu) found, d_type %u", __func__,
+ dentry->d_name.name, nid, d_type);
+ inode = erofs_iget(dir->i_sb, nid, d_type == FT_DIR);
+ }
+ return d_splice_alias(inode, dentry);
+}
+
+const struct inode_operations erofs_dir_iops = {
+ .lookup = erofs_lookup,
+ .getattr = erofs_getattr,
+#ifdef CONFIG_EROFS_FS_XATTR
+ .listxattr = erofs_listxattr,
+#endif
+ .get_acl = erofs_get_acl,
+};
+
diff --git a/fs/erofs/super.c b/fs/erofs/super.c
new file mode 100644
index 000000000000..caf9a95173b0
--- /dev/null
+++ b/fs/erofs/super.c
@@ -0,0 +1,615 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2017-2018 HUAWEI, Inc.
+ * http://www.huawei.com/
+ * Created by Gao Xiang <gaoxiang25@huawei.com>
+ */
+#include <linux/module.h>
+#include <linux/buffer_head.h>
+#include <linux/statfs.h>
+#include <linux/parser.h>
+#include <linux/seq_file.h>
+#include "xattr.h"
+
+#define CREATE_TRACE_POINTS
+#include <trace/events/erofs.h>
+
+static struct kmem_cache *erofs_inode_cachep __read_mostly;
+
+void _erofs_err(struct super_block *sb, const char *function,
+ const char *fmt, ...)
+{
+ struct va_format vaf;
+ va_list args;
+
+ va_start(args, fmt);
+
+ vaf.fmt = fmt;
+ vaf.va = &args;
+
+ pr_err("(device %s): %s: %pV", sb->s_id, function, &vaf);
+ va_end(args);
+}
+
+void _erofs_info(struct super_block *sb, const char *function,
+ const char *fmt, ...)
+{
+ struct va_format vaf;
+ va_list args;
+
+ va_start(args, fmt);
+
+ vaf.fmt = fmt;
+ vaf.va = &args;
+
+ pr_info("(device %s): %pV", sb->s_id, &vaf);
+ va_end(args);
+}
+
+static void erofs_inode_init_once(void *ptr)
+{
+ struct erofs_inode *vi = ptr;
+
+ inode_init_once(&vi->vfs_inode);
+}
+
+static struct inode *erofs_alloc_inode(struct super_block *sb)
+{
+ struct erofs_inode *vi =
+ kmem_cache_alloc(erofs_inode_cachep, GFP_KERNEL);
+
+ if (!vi)
+ return NULL;
+
+ /* zero out everything except vfs_inode */
+ memset(vi, 0, offsetof(struct erofs_inode, vfs_inode));
+ return &vi->vfs_inode;
+}
+
+static void erofs_free_inode(struct inode *inode)
+{
+ struct erofs_inode *vi = EROFS_I(inode);
+
+ /* be careful of RCU symlink path */
+ if (inode->i_op == &erofs_fast_symlink_iops)
+ kfree(inode->i_link);
+ kfree(vi->xattr_shared_xattrs);
+
+ kmem_cache_free(erofs_inode_cachep, vi);
+}
+
+static bool check_layout_compatibility(struct super_block *sb,
+ struct erofs_super_block *dsb)
+{
+ const unsigned int feature = le32_to_cpu(dsb->feature_incompat);
+
+ EROFS_SB(sb)->feature_incompat = feature;
+
+ /* check if current kernel meets all mandatory requirements */
+ if (feature & (~EROFS_ALL_FEATURE_INCOMPAT)) {
+ erofs_err(sb,
+ "unidentified incompatible feature %x, please upgrade kernel version",
+ feature & ~EROFS_ALL_FEATURE_INCOMPAT);
+ return false;
+ }
+ return true;
+}
+
+static int erofs_read_superblock(struct super_block *sb)
+{
+ struct erofs_sb_info *sbi;
+ struct page *page;
+ struct erofs_super_block *dsb;
+ unsigned int blkszbits;
+ void *data;
+ int ret;
+
+ page = read_mapping_page(sb->s_bdev->bd_inode->i_mapping, 0, NULL);
+ if (!page) {
+ erofs_err(sb, "cannot read erofs superblock");
+ return -EIO;
+ }
+
+ sbi = EROFS_SB(sb);
+
+ data = kmap_atomic(page);
+ dsb = (struct erofs_super_block *)(data + EROFS_SUPER_OFFSET);
+
+ ret = -EINVAL;
+ if (le32_to_cpu(dsb->magic) != EROFS_SUPER_MAGIC_V1) {
+ erofs_err(sb, "cannot find valid erofs superblock");
+ goto out;
+ }
+
+ blkszbits = dsb->blkszbits;
+ /* 9(512 bytes) + LOG_SECTORS_PER_BLOCK == LOG_BLOCK_SIZE */
+ if (blkszbits != LOG_BLOCK_SIZE) {
+ erofs_err(sb, "blksize %u isn't supported on this platform",
+ 1 << blkszbits);
+ goto out;
+ }
+
+ if (!check_layout_compatibility(sb, dsb))
+ goto out;
+
+ sbi->blocks = le32_to_cpu(dsb->blocks);
+ sbi->meta_blkaddr = le32_to_cpu(dsb->meta_blkaddr);
+#ifdef CONFIG_EROFS_FS_XATTR
+ sbi->xattr_blkaddr = le32_to_cpu(dsb->xattr_blkaddr);
+#endif
+ sbi->islotbits = ilog2(sizeof(struct erofs_inode_compact));
+ sbi->root_nid = le16_to_cpu(dsb->root_nid);
+ sbi->inos = le64_to_cpu(dsb->inos);
+
+ sbi->build_time = le64_to_cpu(dsb->build_time);
+ sbi->build_time_nsec = le32_to_cpu(dsb->build_time_nsec);
+
+ memcpy(&sb->s_uuid, dsb->uuid, sizeof(dsb->uuid));
+
+ ret = strscpy(sbi->volume_name, dsb->volume_name,
+ sizeof(dsb->volume_name));
+ if (ret < 0) { /* -E2BIG */
+ erofs_err(sb, "bad volume name without NIL terminator");
+ ret = -EFSCORRUPTED;
+ goto out;
+ }
+ ret = 0;
+out:
+ kunmap_atomic(data);
+ put_page(page);
+ return ret;
+}
+
+#ifdef CONFIG_EROFS_FS_ZIP
+static int erofs_build_cache_strategy(struct super_block *sb,
+ substring_t *args)
+{
+ struct erofs_sb_info *sbi = EROFS_SB(sb);
+ const char *cs = match_strdup(args);
+ int err = 0;
+
+ if (!cs) {
+ erofs_err(sb, "Not enough memory to store cache strategy");
+ return -ENOMEM;
+ }
+
+ if (!strcmp(cs, "disabled")) {
+ sbi->cache_strategy = EROFS_ZIP_CACHE_DISABLED;
+ } else if (!strcmp(cs, "readahead")) {
+ sbi->cache_strategy = EROFS_ZIP_CACHE_READAHEAD;
+ } else if (!strcmp(cs, "readaround")) {
+ sbi->cache_strategy = EROFS_ZIP_CACHE_READAROUND;
+ } else {
+ erofs_err(sb, "Unrecognized cache strategy \"%s\"", cs);
+ err = -EINVAL;
+ }
+ kfree(cs);
+ return err;
+}
+#else
+static int erofs_build_cache_strategy(struct super_block *sb,
+ substring_t *args)
+{
+ erofs_info(sb, "EROFS compression is disabled, so cache strategy is ignored");
+ return 0;
+}
+#endif
+
+/* set up default EROFS parameters */
+static void erofs_default_options(struct erofs_sb_info *sbi)
+{
+#ifdef CONFIG_EROFS_FS_ZIP
+ sbi->cache_strategy = EROFS_ZIP_CACHE_READAROUND;
+ sbi->max_sync_decompress_pages = 3;
+#endif
+#ifdef CONFIG_EROFS_FS_XATTR
+ set_opt(sbi, XATTR_USER);
+#endif
+#ifdef CONFIG_EROFS_FS_POSIX_ACL
+ set_opt(sbi, POSIX_ACL);
+#endif
+}
+
+enum {
+ Opt_user_xattr,
+ Opt_nouser_xattr,
+ Opt_acl,
+ Opt_noacl,
+ Opt_cache_strategy,
+ Opt_err
+};
+
+static match_table_t erofs_tokens = {
+ {Opt_user_xattr, "user_xattr"},
+ {Opt_nouser_xattr, "nouser_xattr"},
+ {Opt_acl, "acl"},
+ {Opt_noacl, "noacl"},
+ {Opt_cache_strategy, "cache_strategy=%s"},
+ {Opt_err, NULL}
+};
+
+static int erofs_parse_options(struct super_block *sb, char *options)
+{
+ substring_t args[MAX_OPT_ARGS];
+ char *p;
+ int err;
+
+ if (!options)
+ return 0;
+
+ while ((p = strsep(&options, ","))) {
+ int token;
+
+ if (!*p)
+ continue;
+
+ args[0].to = args[0].from = NULL;
+ token = match_token(p, erofs_tokens, args);
+
+ switch (token) {
+#ifdef CONFIG_EROFS_FS_XATTR
+ case Opt_user_xattr:
+ set_opt(EROFS_SB(sb), XATTR_USER);
+ break;
+ case Opt_nouser_xattr:
+ clear_opt(EROFS_SB(sb), XATTR_USER);
+ break;
+#else
+ case Opt_user_xattr:
+ erofs_info(sb, "user_xattr options not supported");
+ break;
+ case Opt_nouser_xattr:
+ erofs_info(sb, "nouser_xattr options not supported");
+ break;
+#endif
+#ifdef CONFIG_EROFS_FS_POSIX_ACL
+ case Opt_acl:
+ set_opt(EROFS_SB(sb), POSIX_ACL);
+ break;
+ case Opt_noacl:
+ clear_opt(EROFS_SB(sb), POSIX_ACL);
+ break;
+#else
+ case Opt_acl:
+ erofs_info(sb, "acl options not supported");
+ break;
+ case Opt_noacl:
+ erofs_info(sb, "noacl options not supported");
+ break;
+#endif
+ case Opt_cache_strategy:
+ err = erofs_build_cache_strategy(sb, args);
+ if (err)
+ return err;
+ break;
+ default:
+ erofs_err(sb, "Unrecognized mount option \"%s\" or missing value", p);
+ return -EINVAL;
+ }
+ }
+ return 0;
+}
+
+#ifdef CONFIG_EROFS_FS_ZIP
+static const struct address_space_operations managed_cache_aops;
+
+static int erofs_managed_cache_releasepage(struct page *page, gfp_t gfp_mask)
+{
+ int ret = 1; /* 0 - busy */
+ struct address_space *const mapping = page->mapping;
+
+ DBG_BUGON(!PageLocked(page));
+ DBG_BUGON(mapping->a_ops != &managed_cache_aops);
+
+ if (PagePrivate(page))
+ ret = erofs_try_to_free_cached_page(mapping, page);
+
+ return ret;
+}
+
+static void erofs_managed_cache_invalidatepage(struct page *page,
+ unsigned int offset,
+ unsigned int length)
+{
+ const unsigned int stop = length + offset;
+
+ DBG_BUGON(!PageLocked(page));
+
+ /* Check for potential overflow in debug mode */
+ DBG_BUGON(stop > PAGE_SIZE || stop < length);
+
+ if (offset == 0 && stop == PAGE_SIZE)
+ while (!erofs_managed_cache_releasepage(page, GFP_NOFS))
+ cond_resched();
+}
+
+static const struct address_space_operations managed_cache_aops = {
+ .releasepage = erofs_managed_cache_releasepage,
+ .invalidatepage = erofs_managed_cache_invalidatepage,
+};
+
+static int erofs_init_managed_cache(struct super_block *sb)
+{
+ struct erofs_sb_info *const sbi = EROFS_SB(sb);
+ struct inode *const inode = new_inode(sb);
+
+ if (!inode)
+ return -ENOMEM;
+
+ set_nlink(inode, 1);
+ inode->i_size = OFFSET_MAX;
+
+ inode->i_mapping->a_ops = &managed_cache_aops;
+ mapping_set_gfp_mask(inode->i_mapping,
+ GFP_NOFS | __GFP_HIGHMEM | __GFP_MOVABLE);
+ sbi->managed_cache = inode;
+ return 0;
+}
+#else
+static int erofs_init_managed_cache(struct super_block *sb) { return 0; }
+#endif
+
+static int erofs_fill_super(struct super_block *sb, void *data, int silent)
+{
+ struct inode *inode;
+ struct erofs_sb_info *sbi;
+ int err;
+
+ sb->s_magic = EROFS_SUPER_MAGIC;
+
+ if (!sb_set_blocksize(sb, EROFS_BLKSIZ)) {
+ erofs_err(sb, "failed to set erofs blksize");
+ return -EINVAL;
+ }
+
+ sbi = kzalloc(sizeof(*sbi), GFP_KERNEL);
+ if (!sbi)
+ return -ENOMEM;
+
+ sb->s_fs_info = sbi;
+ err = erofs_read_superblock(sb);
+ if (err)
+ return err;
+
+ sb->s_flags |= SB_RDONLY | SB_NOATIME;
+ sb->s_maxbytes = MAX_LFS_FILESIZE;
+ sb->s_time_gran = 1;
+
+ sb->s_op = &erofs_sops;
+
+#ifdef CONFIG_EROFS_FS_XATTR
+ sb->s_xattr = erofs_xattr_handlers;
+#endif
+ /* set erofs default mount options */
+ erofs_default_options(sbi);
+
+ err = erofs_parse_options(sb, data);
+ if (err)
+ return err;
+
+ if (test_opt(sbi, POSIX_ACL))
+ sb->s_flags |= SB_POSIXACL;
+ else
+ sb->s_flags &= ~SB_POSIXACL;
+
+#ifdef CONFIG_EROFS_FS_ZIP
+ INIT_RADIX_TREE(&sbi->workstn_tree, GFP_ATOMIC);
+#endif
+
+ /* get the root inode */
+ inode = erofs_iget(sb, ROOT_NID(sbi), true);
+ if (IS_ERR(inode))
+ return PTR_ERR(inode);
+
+ if (!S_ISDIR(inode->i_mode)) {
+ erofs_err(sb, "rootino(nid %llu) is not a directory(i_mode %o)",
+ ROOT_NID(sbi), inode->i_mode);
+ iput(inode);
+ return -EINVAL;
+ }
+
+ sb->s_root = d_make_root(inode);
+ if (!sb->s_root)
+ return -ENOMEM;
+
+ erofs_shrinker_register(sb);
+ /* sb->s_umount is already locked, SB_ACTIVE and SB_BORN are not set */
+ err = erofs_init_managed_cache(sb);
+ if (err)
+ return err;
+
+ erofs_info(sb, "mounted with opts: %s, root inode @ nid %llu.",
+ (char *)data, ROOT_NID(sbi));
+ return 0;
+}
+
+static struct dentry *erofs_mount(struct file_system_type *fs_type, int flags,
+ const char *dev_name, void *data)
+{
+ return mount_bdev(fs_type, flags, dev_name, data, erofs_fill_super);
+}
+
+/*
+ * could be triggered after deactivate_locked_super()
+ * is called, thus including umount and failed to initialize.
+ */
+static void erofs_kill_sb(struct super_block *sb)
+{
+ struct erofs_sb_info *sbi;
+
+ WARN_ON(sb->s_magic != EROFS_SUPER_MAGIC);
+
+ kill_block_super(sb);
+
+ sbi = EROFS_SB(sb);
+ if (!sbi)
+ return;
+ kfree(sbi);
+ sb->s_fs_info = NULL;
+}
+
+/* called when ->s_root is non-NULL */
+static void erofs_put_super(struct super_block *sb)
+{
+ struct erofs_sb_info *const sbi = EROFS_SB(sb);
+
+ DBG_BUGON(!sbi);
+
+ erofs_shrinker_unregister(sb);
+#ifdef CONFIG_EROFS_FS_ZIP
+ iput(sbi->managed_cache);
+ sbi->managed_cache = NULL;
+#endif
+}
+
+static struct file_system_type erofs_fs_type = {
+ .owner = THIS_MODULE,
+ .name = "erofs",
+ .mount = erofs_mount,
+ .kill_sb = erofs_kill_sb,
+ .fs_flags = FS_REQUIRES_DEV,
+};
+MODULE_ALIAS_FS("erofs");
+
+static int __init erofs_module_init(void)
+{
+ int err;
+
+ erofs_check_ondisk_layout_definitions();
+
+ erofs_inode_cachep = kmem_cache_create("erofs_inode",
+ sizeof(struct erofs_inode), 0,
+ SLAB_RECLAIM_ACCOUNT,
+ erofs_inode_init_once);
+ if (!erofs_inode_cachep) {
+ err = -ENOMEM;
+ goto icache_err;
+ }
+
+ err = erofs_init_shrinker();
+ if (err)
+ goto shrinker_err;
+
+ err = z_erofs_init_zip_subsystem();
+ if (err)
+ goto zip_err;
+
+ err = register_filesystem(&erofs_fs_type);
+ if (err)
+ goto fs_err;
+
+ return 0;
+
+fs_err:
+ z_erofs_exit_zip_subsystem();
+zip_err:
+ erofs_exit_shrinker();
+shrinker_err:
+ kmem_cache_destroy(erofs_inode_cachep);
+icache_err:
+ return err;
+}
+
+static void __exit erofs_module_exit(void)
+{
+ unregister_filesystem(&erofs_fs_type);
+ z_erofs_exit_zip_subsystem();
+ erofs_exit_shrinker();
+
+ /* Ensure all RCU free inodes are safe before cache is destroyed. */
+ rcu_barrier();
+ kmem_cache_destroy(erofs_inode_cachep);
+}
+
+/* get filesystem statistics */
+static int erofs_statfs(struct dentry *dentry, struct kstatfs *buf)
+{
+ struct super_block *sb = dentry->d_sb;
+ struct erofs_sb_info *sbi = EROFS_SB(sb);
+ u64 id = huge_encode_dev(sb->s_bdev->bd_dev);
+
+ buf->f_type = sb->s_magic;
+ buf->f_bsize = EROFS_BLKSIZ;
+ buf->f_blocks = sbi->blocks;
+ buf->f_bfree = buf->f_bavail = 0;
+
+ buf->f_files = ULLONG_MAX;
+ buf->f_ffree = ULLONG_MAX - sbi->inos;
+
+ buf->f_namelen = EROFS_NAME_LEN;
+
+ buf->f_fsid.val[0] = (u32)id;
+ buf->f_fsid.val[1] = (u32)(id >> 32);
+ return 0;
+}
+
+static int erofs_show_options(struct seq_file *seq, struct dentry *root)
+{
+ struct erofs_sb_info *sbi __maybe_unused = EROFS_SB(root->d_sb);
+
+#ifdef CONFIG_EROFS_FS_XATTR
+ if (test_opt(sbi, XATTR_USER))
+ seq_puts(seq, ",user_xattr");
+ else
+ seq_puts(seq, ",nouser_xattr");
+#endif
+#ifdef CONFIG_EROFS_FS_POSIX_ACL
+ if (test_opt(sbi, POSIX_ACL))
+ seq_puts(seq, ",acl");
+ else
+ seq_puts(seq, ",noacl");
+#endif
+#ifdef CONFIG_EROFS_FS_ZIP
+ if (sbi->cache_strategy == EROFS_ZIP_CACHE_DISABLED) {
+ seq_puts(seq, ",cache_strategy=disabled");
+ } else if (sbi->cache_strategy == EROFS_ZIP_CACHE_READAHEAD) {
+ seq_puts(seq, ",cache_strategy=readahead");
+ } else if (sbi->cache_strategy == EROFS_ZIP_CACHE_READAROUND) {
+ seq_puts(seq, ",cache_strategy=readaround");
+ } else {
+ seq_puts(seq, ",cache_strategy=(unknown)");
+ DBG_BUGON(1);
+ }
+#endif
+ return 0;
+}
+
+static int erofs_remount(struct super_block *sb, int *flags, char *data)
+{
+ struct erofs_sb_info *sbi = EROFS_SB(sb);
+ unsigned int org_mnt_opt = sbi->mount_opt;
+ int err;
+
+ DBG_BUGON(!sb_rdonly(sb));
+ err = erofs_parse_options(sb, data);
+ if (err)
+ goto out;
+
+ if (test_opt(sbi, POSIX_ACL))
+ sb->s_flags |= SB_POSIXACL;
+ else
+ sb->s_flags &= ~SB_POSIXACL;
+
+ *flags |= SB_RDONLY;
+ return 0;
+out:
+ sbi->mount_opt = org_mnt_opt;
+ return err;
+}
+
+const struct super_operations erofs_sops = {
+ .put_super = erofs_put_super,
+ .alloc_inode = erofs_alloc_inode,
+ .free_inode = erofs_free_inode,
+ .statfs = erofs_statfs,
+ .show_options = erofs_show_options,
+ .remount_fs = erofs_remount,
+};
+
+module_init(erofs_module_init);
+module_exit(erofs_module_exit);
+
+MODULE_DESCRIPTION("Enhanced ROM File System");
+MODULE_AUTHOR("Gao Xiang, Chao Yu, Miao Xie, CONSUMER BG, HUAWEI Inc.");
+MODULE_LICENSE("GPL");
+
diff --git a/fs/erofs/tagptr.h b/fs/erofs/tagptr.h
new file mode 100644
index 000000000000..a72897c86744
--- /dev/null
+++ b/fs/erofs/tagptr.h
@@ -0,0 +1,110 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * A tagged pointer implementation
+ *
+ * Copyright (C) 2018 Gao Xiang <gaoxiang25@huawei.com>
+ */
+#ifndef __EROFS_FS_TAGPTR_H
+#define __EROFS_FS_TAGPTR_H
+
+#include <linux/types.h>
+#include <linux/build_bug.h>
+
+/*
+ * the name of tagged pointer types are tagptr{1, 2, 3...}_t
+ * avoid directly using the internal structs __tagptr{1, 2, 3...}
+ */
+#define __MAKE_TAGPTR(n) \
+typedef struct __tagptr##n { \
+ uintptr_t v; \
+} tagptr##n##_t;
+
+__MAKE_TAGPTR(1)
+__MAKE_TAGPTR(2)
+__MAKE_TAGPTR(3)
+__MAKE_TAGPTR(4)
+
+#undef __MAKE_TAGPTR
+
+extern void __compiletime_error("bad tagptr tags")
+ __bad_tagptr_tags(void);
+
+extern void __compiletime_error("bad tagptr type")
+ __bad_tagptr_type(void);
+
+/* fix the broken usage of "#define tagptr2_t tagptr3_t" by users */
+#define __tagptr_mask_1(ptr, n) \
+ __builtin_types_compatible_p(typeof(ptr), struct __tagptr##n) ? \
+ (1UL << (n)) - 1 :
+
+#define __tagptr_mask(ptr) (\
+ __tagptr_mask_1(ptr, 1) ( \
+ __tagptr_mask_1(ptr, 2) ( \
+ __tagptr_mask_1(ptr, 3) ( \
+ __tagptr_mask_1(ptr, 4) ( \
+ __bad_tagptr_type(), 0)))))
+
+/* generate a tagged pointer from a raw value */
+#define tagptr_init(type, val) \
+ ((typeof(type)){ .v = (uintptr_t)(val) })
+
+/*
+ * directly cast a tagged pointer to the native pointer type, which
+ * could be used for backward compatibility of existing code.
+ */
+#define tagptr_cast_ptr(tptr) ((void *)(tptr).v)
+
+/* encode tagged pointers */
+#define tagptr_fold(type, ptr, _tags) ({ \
+ const typeof(_tags) tags = (_tags); \
+ if (__builtin_constant_p(tags) && (tags & ~__tagptr_mask(type))) \
+ __bad_tagptr_tags(); \
+tagptr_init(type, (uintptr_t)(ptr) | tags); })
+
+/* decode tagged pointers */
+#define tagptr_unfold_ptr(tptr) \
+ ((void *)((tptr).v & ~__tagptr_mask(tptr)))
+
+#define tagptr_unfold_tags(tptr) \
+ ((tptr).v & __tagptr_mask(tptr))
+
+/* operations for the tagger pointer */
+#define tagptr_eq(_tptr1, _tptr2) ({ \
+ typeof(_tptr1) tptr1 = (_tptr1); \
+ typeof(_tptr2) tptr2 = (_tptr2); \
+ (void)(&tptr1 == &tptr2); \
+(tptr1).v == (tptr2).v; })
+
+/* lock-free CAS operation */
+#define tagptr_cmpxchg(_ptptr, _o, _n) ({ \
+ typeof(_ptptr) ptptr = (_ptptr); \
+ typeof(_o) o = (_o); \
+ typeof(_n) n = (_n); \
+ (void)(&o == &n); \
+ (void)(&o == ptptr); \
+tagptr_init(o, cmpxchg(&ptptr->v, o.v, n.v)); })
+
+/* wrap WRITE_ONCE if atomic update is needed */
+#define tagptr_replace_tags(_ptptr, tags) ({ \
+ typeof(_ptptr) ptptr = (_ptptr); \
+ *ptptr = tagptr_fold(*ptptr, tagptr_unfold_ptr(*ptptr), tags); \
+*ptptr; })
+
+#define tagptr_set_tags(_ptptr, _tags) ({ \
+ typeof(_ptptr) ptptr = (_ptptr); \
+ const typeof(_tags) tags = (_tags); \
+ if (__builtin_constant_p(tags) && (tags & ~__tagptr_mask(*ptptr))) \
+ __bad_tagptr_tags(); \
+ ptptr->v |= tags; \
+*ptptr; })
+
+#define tagptr_clear_tags(_ptptr, _tags) ({ \
+ typeof(_ptptr) ptptr = (_ptptr); \
+ const typeof(_tags) tags = (_tags); \
+ if (__builtin_constant_p(tags) && (tags & ~__tagptr_mask(*ptptr))) \
+ __bad_tagptr_tags(); \
+ ptptr->v &= ~tags; \
+*ptptr; })
+
+#endif /* __EROFS_FS_TAGPTR_H */
+
diff --git a/fs/erofs/utils.c b/fs/erofs/utils.c
new file mode 100644
index 000000000000..d92b3e753a6f
--- /dev/null
+++ b/fs/erofs/utils.c
@@ -0,0 +1,333 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2018 HUAWEI, Inc.
+ * http://www.huawei.com/
+ * Created by Gao Xiang <gaoxiang25@huawei.com>
+ */
+#include "internal.h"
+#include <linux/pagevec.h>
+
+struct page *erofs_allocpage(struct list_head *pool, gfp_t gfp, bool nofail)
+{
+ struct page *page;
+
+ if (!list_empty(pool)) {
+ page = lru_to_page(pool);
+ DBG_BUGON(page_ref_count(page) != 1);
+ list_del(&page->lru);
+ } else {
+ page = alloc_pages(gfp | (nofail ? __GFP_NOFAIL : 0), 0);
+ }
+ return page;
+}
+
+#if (EROFS_PCPUBUF_NR_PAGES > 0)
+static struct {
+ u8 data[PAGE_SIZE * EROFS_PCPUBUF_NR_PAGES];
+} ____cacheline_aligned_in_smp erofs_pcpubuf[NR_CPUS];
+
+void *erofs_get_pcpubuf(unsigned int pagenr)
+{
+ preempt_disable();
+ return &erofs_pcpubuf[smp_processor_id()].data[pagenr * PAGE_SIZE];
+}
+#endif
+
+#ifdef CONFIG_EROFS_FS_ZIP
+/* global shrink count (for all mounted EROFS instances) */
+static atomic_long_t erofs_global_shrink_cnt;
+
+#define __erofs_workgroup_get(grp) atomic_inc(&(grp)->refcount)
+#define __erofs_workgroup_put(grp) atomic_dec(&(grp)->refcount)
+
+static int erofs_workgroup_get(struct erofs_workgroup *grp)
+{
+ int o;
+
+repeat:
+ o = erofs_wait_on_workgroup_freezed(grp);
+ if (o <= 0)
+ return -1;
+
+ if (atomic_cmpxchg(&grp->refcount, o, o + 1) != o)
+ goto repeat;
+
+ /* decrease refcount paired by erofs_workgroup_put */
+ if (o == 1)
+ atomic_long_dec(&erofs_global_shrink_cnt);
+ return 0;
+}
+
+struct erofs_workgroup *erofs_find_workgroup(struct super_block *sb,
+ pgoff_t index, bool *tag)
+{
+ struct erofs_sb_info *sbi = EROFS_SB(sb);
+ struct erofs_workgroup *grp;
+
+repeat:
+ rcu_read_lock();
+ grp = radix_tree_lookup(&sbi->workstn_tree, index);
+ if (grp) {
+ *tag = xa_pointer_tag(grp);
+ grp = xa_untag_pointer(grp);
+
+ if (erofs_workgroup_get(grp)) {
+ /* prefer to relax rcu read side */
+ rcu_read_unlock();
+ goto repeat;
+ }
+
+ DBG_BUGON(index != grp->index);
+ }
+ rcu_read_unlock();
+ return grp;
+}
+
+int erofs_register_workgroup(struct super_block *sb,
+ struct erofs_workgroup *grp,
+ bool tag)
+{
+ struct erofs_sb_info *sbi;
+ int err;
+
+ /* grp shouldn't be broken or used before */
+ if (atomic_read(&grp->refcount) != 1) {
+ DBG_BUGON(1);
+ return -EINVAL;
+ }
+
+ err = radix_tree_preload(GFP_NOFS);
+ if (err)
+ return err;
+
+ sbi = EROFS_SB(sb);
+ xa_lock(&sbi->workstn_tree);
+
+ grp = xa_tag_pointer(grp, tag);
+
+ /*
+ * Bump up reference count before making this workgroup
+ * visible to other users in order to avoid potential UAF
+ * without serialized by workstn_lock.
+ */
+ __erofs_workgroup_get(grp);
+
+ err = radix_tree_insert(&sbi->workstn_tree, grp->index, grp);
+ if (err)
+ /*
+ * it's safe to decrease since the workgroup isn't visible
+ * and refcount >= 2 (cannot be freezed).
+ */
+ __erofs_workgroup_put(grp);
+
+ xa_unlock(&sbi->workstn_tree);
+ radix_tree_preload_end();
+ return err;
+}
+
+static void __erofs_workgroup_free(struct erofs_workgroup *grp)
+{
+ atomic_long_dec(&erofs_global_shrink_cnt);
+ erofs_workgroup_free_rcu(grp);
+}
+
+int erofs_workgroup_put(struct erofs_workgroup *grp)
+{
+ int count = atomic_dec_return(&grp->refcount);
+
+ if (count == 1)
+ atomic_long_inc(&erofs_global_shrink_cnt);
+ else if (!count)
+ __erofs_workgroup_free(grp);
+ return count;
+}
+
+static void erofs_workgroup_unfreeze_final(struct erofs_workgroup *grp)
+{
+ erofs_workgroup_unfreeze(grp, 0);
+ __erofs_workgroup_free(grp);
+}
+
+static bool erofs_try_to_release_workgroup(struct erofs_sb_info *sbi,
+ struct erofs_workgroup *grp,
+ bool cleanup)
+{
+ /*
+ * If managed cache is on, refcount of workgroups
+ * themselves could be < 0 (freezed). In other words,
+ * there is no guarantee that all refcounts > 0.
+ */
+ if (!erofs_workgroup_try_to_freeze(grp, 1))
+ return false;
+
+ /*
+ * Note that all cached pages should be unattached
+ * before deleted from the radix tree. Otherwise some
+ * cached pages could be still attached to the orphan
+ * old workgroup when the new one is available in the tree.
+ */
+ if (erofs_try_to_free_all_cached_pages(sbi, grp)) {
+ erofs_workgroup_unfreeze(grp, 1);
+ return false;
+ }
+
+ /*
+ * It's impossible to fail after the workgroup is freezed,
+ * however in order to avoid some race conditions, add a
+ * DBG_BUGON to observe this in advance.
+ */
+ DBG_BUGON(xa_untag_pointer(radix_tree_delete(&sbi->workstn_tree,
+ grp->index)) != grp);
+
+ /*
+ * If managed cache is on, last refcount should indicate
+ * the related workstation.
+ */
+ erofs_workgroup_unfreeze_final(grp);
+ return true;
+}
+
+static unsigned long erofs_shrink_workstation(struct erofs_sb_info *sbi,
+ unsigned long nr_shrink,
+ bool cleanup)
+{
+ pgoff_t first_index = 0;
+ void *batch[PAGEVEC_SIZE];
+ unsigned int freed = 0;
+
+ int i, found;
+repeat:
+ xa_lock(&sbi->workstn_tree);
+
+ found = radix_tree_gang_lookup(&sbi->workstn_tree,
+ batch, first_index, PAGEVEC_SIZE);
+
+ for (i = 0; i < found; ++i) {
+ struct erofs_workgroup *grp = xa_untag_pointer(batch[i]);
+
+ first_index = grp->index + 1;
+
+ /* try to shrink each valid workgroup */
+ if (!erofs_try_to_release_workgroup(sbi, grp, cleanup))
+ continue;
+
+ ++freed;
+ if (!--nr_shrink)
+ break;
+ }
+ xa_unlock(&sbi->workstn_tree);
+
+ if (i && nr_shrink)
+ goto repeat;
+ return freed;
+}
+
+/* protected by 'erofs_sb_list_lock' */
+static unsigned int shrinker_run_no;
+
+/* protects the mounted 'erofs_sb_list' */
+static DEFINE_SPINLOCK(erofs_sb_list_lock);
+static LIST_HEAD(erofs_sb_list);
+
+void erofs_shrinker_register(struct super_block *sb)
+{
+ struct erofs_sb_info *sbi = EROFS_SB(sb);
+
+ mutex_init(&sbi->umount_mutex);
+
+ spin_lock(&erofs_sb_list_lock);
+ list_add(&sbi->list, &erofs_sb_list);
+ spin_unlock(&erofs_sb_list_lock);
+}
+
+void erofs_shrinker_unregister(struct super_block *sb)
+{
+ struct erofs_sb_info *const sbi = EROFS_SB(sb);
+
+ mutex_lock(&sbi->umount_mutex);
+ erofs_shrink_workstation(sbi, ~0UL, true);
+
+ spin_lock(&erofs_sb_list_lock);
+ list_del(&sbi->list);
+ spin_unlock(&erofs_sb_list_lock);
+ mutex_unlock(&sbi->umount_mutex);
+}
+
+static unsigned long erofs_shrink_count(struct shrinker *shrink,
+ struct shrink_control *sc)
+{
+ return atomic_long_read(&erofs_global_shrink_cnt);
+}
+
+static unsigned long erofs_shrink_scan(struct shrinker *shrink,
+ struct shrink_control *sc)
+{
+ struct erofs_sb_info *sbi;
+ struct list_head *p;
+
+ unsigned long nr = sc->nr_to_scan;
+ unsigned int run_no;
+ unsigned long freed = 0;
+
+ spin_lock(&erofs_sb_list_lock);
+ do {
+ run_no = ++shrinker_run_no;
+ } while (run_no == 0);
+
+ /* Iterate over all mounted superblocks and try to shrink them */
+ p = erofs_sb_list.next;
+ while (p != &erofs_sb_list) {
+ sbi = list_entry(p, struct erofs_sb_info, list);
+
+ /*
+ * We move the ones we do to the end of the list, so we stop
+ * when we see one we have already done.
+ */
+ if (sbi->shrinker_run_no == run_no)
+ break;
+
+ if (!mutex_trylock(&sbi->umount_mutex)) {
+ p = p->next;
+ continue;
+ }
+
+ spin_unlock(&erofs_sb_list_lock);
+ sbi->shrinker_run_no = run_no;
+
+ freed += erofs_shrink_workstation(sbi, nr, false);
+
+ spin_lock(&erofs_sb_list_lock);
+ /* Get the next list element before we move this one */
+ p = p->next;
+
+ /*
+ * Move this one to the end of the list to provide some
+ * fairness.
+ */
+ list_move_tail(&sbi->list, &erofs_sb_list);
+ mutex_unlock(&sbi->umount_mutex);
+
+ if (freed >= nr)
+ break;
+ }
+ spin_unlock(&erofs_sb_list_lock);
+ return freed;
+}
+
+static struct shrinker erofs_shrinker_info = {
+ .scan_objects = erofs_shrink_scan,
+ .count_objects = erofs_shrink_count,
+ .seeks = DEFAULT_SEEKS,
+};
+
+int __init erofs_init_shrinker(void)
+{
+ return register_shrinker(&erofs_shrinker_info);
+}
+
+void erofs_exit_shrinker(void)
+{
+ unregister_shrinker(&erofs_shrinker_info);
+}
+#endif /* !CONFIG_EROFS_FS_ZIP */
+
diff --git a/fs/erofs/xattr.c b/fs/erofs/xattr.c
new file mode 100644
index 000000000000..a13a78725c57
--- /dev/null
+++ b/fs/erofs/xattr.c
@@ -0,0 +1,704 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2017-2018 HUAWEI, Inc.
+ * http://www.huawei.com/
+ * Created by Gao Xiang <gaoxiang25@huawei.com>
+ */
+#include <linux/security.h>
+#include "xattr.h"
+
+struct xattr_iter {
+ struct super_block *sb;
+ struct page *page;
+ void *kaddr;
+
+ erofs_blk_t blkaddr;
+ unsigned int ofs;
+};
+
+static inline void xattr_iter_end(struct xattr_iter *it, bool atomic)
+{
+ /* the only user of kunmap() is 'init_inode_xattrs' */
+ if (!atomic)
+ kunmap(it->page);
+ else
+ kunmap_atomic(it->kaddr);
+
+ unlock_page(it->page);
+ put_page(it->page);
+}
+
+static inline void xattr_iter_end_final(struct xattr_iter *it)
+{
+ if (!it->page)
+ return;
+
+ xattr_iter_end(it, true);
+}
+
+static int init_inode_xattrs(struct inode *inode)
+{
+ struct erofs_inode *const vi = EROFS_I(inode);
+ struct xattr_iter it;
+ unsigned int i;
+ struct erofs_xattr_ibody_header *ih;
+ struct super_block *sb;
+ struct erofs_sb_info *sbi;
+ bool atomic_map;
+ int ret = 0;
+
+ /* the most case is that xattrs of this inode are initialized. */
+ if (test_bit(EROFS_I_EA_INITED_BIT, &vi->flags))
+ return 0;
+
+ if (wait_on_bit_lock(&vi->flags, EROFS_I_BL_XATTR_BIT, TASK_KILLABLE))
+ return -ERESTARTSYS;
+
+ /* someone has initialized xattrs for us? */
+ if (test_bit(EROFS_I_EA_INITED_BIT, &vi->flags))
+ goto out_unlock;
+
+ /*
+ * bypass all xattr operations if ->xattr_isize is not greater than
+ * sizeof(struct erofs_xattr_ibody_header), in detail:
+ * 1) it is not enough to contain erofs_xattr_ibody_header then
+ * ->xattr_isize should be 0 (it means no xattr);
+ * 2) it is just to contain erofs_xattr_ibody_header, which is on-disk
+ * undefined right now (maybe use later with some new sb feature).
+ */
+ if (vi->xattr_isize == sizeof(struct erofs_xattr_ibody_header)) {
+ erofs_err(inode->i_sb,
+ "xattr_isize %d of nid %llu is not supported yet",
+ vi->xattr_isize, vi->nid);
+ ret = -EOPNOTSUPP;
+ goto out_unlock;
+ } else if (vi->xattr_isize < sizeof(struct erofs_xattr_ibody_header)) {
+ if (vi->xattr_isize) {
+ erofs_err(inode->i_sb,
+ "bogus xattr ibody @ nid %llu", vi->nid);
+ DBG_BUGON(1);
+ ret = -EFSCORRUPTED;
+ goto out_unlock; /* xattr ondisk layout error */
+ }
+ ret = -ENOATTR;
+ goto out_unlock;
+ }
+
+ sb = inode->i_sb;
+ sbi = EROFS_SB(sb);
+ it.blkaddr = erofs_blknr(iloc(sbi, vi->nid) + vi->inode_isize);
+ it.ofs = erofs_blkoff(iloc(sbi, vi->nid) + vi->inode_isize);
+
+ it.page = erofs_get_meta_page(sb, it.blkaddr);
+ if (IS_ERR(it.page)) {
+ ret = PTR_ERR(it.page);
+ goto out_unlock;
+ }
+
+ /* read in shared xattr array (non-atomic, see kmalloc below) */
+ it.kaddr = kmap(it.page);
+ atomic_map = false;
+
+ ih = (struct erofs_xattr_ibody_header *)(it.kaddr + it.ofs);
+
+ vi->xattr_shared_count = ih->h_shared_count;
+ vi->xattr_shared_xattrs = kmalloc_array(vi->xattr_shared_count,
+ sizeof(uint), GFP_KERNEL);
+ if (!vi->xattr_shared_xattrs) {
+ xattr_iter_end(&it, atomic_map);
+ ret = -ENOMEM;
+ goto out_unlock;
+ }
+
+ /* let's skip ibody header */
+ it.ofs += sizeof(struct erofs_xattr_ibody_header);
+
+ for (i = 0; i < vi->xattr_shared_count; ++i) {
+ if (it.ofs >= EROFS_BLKSIZ) {
+ /* cannot be unaligned */
+ DBG_BUGON(it.ofs != EROFS_BLKSIZ);
+ xattr_iter_end(&it, atomic_map);
+
+ it.page = erofs_get_meta_page(sb, ++it.blkaddr);
+ if (IS_ERR(it.page)) {
+ kfree(vi->xattr_shared_xattrs);
+ vi->xattr_shared_xattrs = NULL;
+ ret = PTR_ERR(it.page);
+ goto out_unlock;
+ }
+
+ it.kaddr = kmap_atomic(it.page);
+ atomic_map = true;
+ it.ofs = 0;
+ }
+ vi->xattr_shared_xattrs[i] =
+ le32_to_cpu(*(__le32 *)(it.kaddr + it.ofs));
+ it.ofs += sizeof(__le32);
+ }
+ xattr_iter_end(&it, atomic_map);
+
+ set_bit(EROFS_I_EA_INITED_BIT, &vi->flags);
+
+out_unlock:
+ clear_and_wake_up_bit(EROFS_I_BL_XATTR_BIT, &vi->flags);
+ return ret;
+}
+
+/*
+ * the general idea for these return values is
+ * if 0 is returned, go on processing the current xattr;
+ * 1 (> 0) is returned, skip this round to process the next xattr;
+ * -err (< 0) is returned, an error (maybe ENOXATTR) occurred
+ * and need to be handled
+ */
+struct xattr_iter_handlers {
+ int (*entry)(struct xattr_iter *_it, struct erofs_xattr_entry *entry);
+ int (*name)(struct xattr_iter *_it, unsigned int processed, char *buf,
+ unsigned int len);
+ int (*alloc_buffer)(struct xattr_iter *_it, unsigned int value_sz);
+ void (*value)(struct xattr_iter *_it, unsigned int processed, char *buf,
+ unsigned int len);
+};
+
+static inline int xattr_iter_fixup(struct xattr_iter *it)
+{
+ if (it->ofs < EROFS_BLKSIZ)
+ return 0;
+
+ xattr_iter_end(it, true);
+
+ it->blkaddr += erofs_blknr(it->ofs);
+
+ it->page = erofs_get_meta_page(it->sb, it->blkaddr);
+ if (IS_ERR(it->page)) {
+ int err = PTR_ERR(it->page);
+
+ it->page = NULL;
+ return err;
+ }
+
+ it->kaddr = kmap_atomic(it->page);
+ it->ofs = erofs_blkoff(it->ofs);
+ return 0;
+}
+
+static int inline_xattr_iter_begin(struct xattr_iter *it,
+ struct inode *inode)
+{
+ struct erofs_inode *const vi = EROFS_I(inode);
+ struct erofs_sb_info *const sbi = EROFS_SB(inode->i_sb);
+ unsigned int xattr_header_sz, inline_xattr_ofs;
+
+ xattr_header_sz = inlinexattr_header_size(inode);
+ if (xattr_header_sz >= vi->xattr_isize) {
+ DBG_BUGON(xattr_header_sz > vi->xattr_isize);
+ return -ENOATTR;
+ }
+
+ inline_xattr_ofs = vi->inode_isize + xattr_header_sz;
+
+ it->blkaddr = erofs_blknr(iloc(sbi, vi->nid) + inline_xattr_ofs);
+ it->ofs = erofs_blkoff(iloc(sbi, vi->nid) + inline_xattr_ofs);
+
+ it->page = erofs_get_meta_page(inode->i_sb, it->blkaddr);
+ if (IS_ERR(it->page))
+ return PTR_ERR(it->page);
+
+ it->kaddr = kmap_atomic(it->page);
+ return vi->xattr_isize - xattr_header_sz;
+}
+
+/*
+ * Regardless of success or failure, `xattr_foreach' will end up with
+ * `ofs' pointing to the next xattr item rather than an arbitrary position.
+ */
+static int xattr_foreach(struct xattr_iter *it,
+ const struct xattr_iter_handlers *op,
+ unsigned int *tlimit)
+{
+ struct erofs_xattr_entry entry;
+ unsigned int value_sz, processed, slice;
+ int err;
+
+ /* 0. fixup blkaddr, ofs, ipage */
+ err = xattr_iter_fixup(it);
+ if (err)
+ return err;
+
+ /*
+ * 1. read xattr entry to the memory,
+ * since we do EROFS_XATTR_ALIGN
+ * therefore entry should be in the page
+ */
+ entry = *(struct erofs_xattr_entry *)(it->kaddr + it->ofs);
+ if (tlimit) {
+ unsigned int entry_sz = erofs_xattr_entry_size(&entry);
+
+ /* xattr on-disk corruption: xattr entry beyond xattr_isize */
+ if (*tlimit < entry_sz) {
+ DBG_BUGON(1);
+ return -EFSCORRUPTED;
+ }
+ *tlimit -= entry_sz;
+ }
+
+ it->ofs += sizeof(struct erofs_xattr_entry);
+ value_sz = le16_to_cpu(entry.e_value_size);
+
+ /* handle entry */
+ err = op->entry(it, &entry);
+ if (err) {
+ it->ofs += entry.e_name_len + value_sz;
+ goto out;
+ }
+
+ /* 2. handle xattr name (ofs will finally be at the end of name) */
+ processed = 0;
+
+ while (processed < entry.e_name_len) {
+ if (it->ofs >= EROFS_BLKSIZ) {
+ DBG_BUGON(it->ofs > EROFS_BLKSIZ);
+
+ err = xattr_iter_fixup(it);
+ if (err)
+ goto out;
+ it->ofs = 0;
+ }
+
+ slice = min_t(unsigned int, PAGE_SIZE - it->ofs,
+ entry.e_name_len - processed);
+
+ /* handle name */
+ err = op->name(it, processed, it->kaddr + it->ofs, slice);
+ if (err) {
+ it->ofs += entry.e_name_len - processed + value_sz;
+ goto out;
+ }
+
+ it->ofs += slice;
+ processed += slice;
+ }
+
+ /* 3. handle xattr value */
+ processed = 0;
+
+ if (op->alloc_buffer) {
+ err = op->alloc_buffer(it, value_sz);
+ if (err) {
+ it->ofs += value_sz;
+ goto out;
+ }
+ }
+
+ while (processed < value_sz) {
+ if (it->ofs >= EROFS_BLKSIZ) {
+ DBG_BUGON(it->ofs > EROFS_BLKSIZ);
+
+ err = xattr_iter_fixup(it);
+ if (err)
+ goto out;
+ it->ofs = 0;
+ }
+
+ slice = min_t(unsigned int, PAGE_SIZE - it->ofs,
+ value_sz - processed);
+ op->value(it, processed, it->kaddr + it->ofs, slice);
+ it->ofs += slice;
+ processed += slice;
+ }
+
+out:
+ /* xattrs should be 4-byte aligned (on-disk constraint) */
+ it->ofs = EROFS_XATTR_ALIGN(it->ofs);
+ return err < 0 ? err : 0;
+}
+
+struct getxattr_iter {
+ struct xattr_iter it;
+
+ char *buffer;
+ int buffer_size, index;
+ struct qstr name;
+};
+
+static int xattr_entrymatch(struct xattr_iter *_it,
+ struct erofs_xattr_entry *entry)
+{
+ struct getxattr_iter *it = container_of(_it, struct getxattr_iter, it);
+
+ return (it->index != entry->e_name_index ||
+ it->name.len != entry->e_name_len) ? -ENOATTR : 0;
+}
+
+static int xattr_namematch(struct xattr_iter *_it,
+ unsigned int processed, char *buf, unsigned int len)
+{
+ struct getxattr_iter *it = container_of(_it, struct getxattr_iter, it);
+
+ return memcmp(buf, it->name.name + processed, len) ? -ENOATTR : 0;
+}
+
+static int xattr_checkbuffer(struct xattr_iter *_it,
+ unsigned int value_sz)
+{
+ struct getxattr_iter *it = container_of(_it, struct getxattr_iter, it);
+ int err = it->buffer_size < value_sz ? -ERANGE : 0;
+
+ it->buffer_size = value_sz;
+ return !it->buffer ? 1 : err;
+}
+
+static void xattr_copyvalue(struct xattr_iter *_it,
+ unsigned int processed,
+ char *buf, unsigned int len)
+{
+ struct getxattr_iter *it = container_of(_it, struct getxattr_iter, it);
+
+ memcpy(it->buffer + processed, buf, len);
+}
+
+static const struct xattr_iter_handlers find_xattr_handlers = {
+ .entry = xattr_entrymatch,
+ .name = xattr_namematch,
+ .alloc_buffer = xattr_checkbuffer,
+ .value = xattr_copyvalue
+};
+
+static int inline_getxattr(struct inode *inode, struct getxattr_iter *it)
+{
+ int ret;
+ unsigned int remaining;
+
+ ret = inline_xattr_iter_begin(&it->it, inode);
+ if (ret < 0)
+ return ret;
+
+ remaining = ret;
+ while (remaining) {
+ ret = xattr_foreach(&it->it, &find_xattr_handlers, &remaining);
+ if (ret != -ENOATTR)
+ break;
+ }
+ xattr_iter_end_final(&it->it);
+
+ return ret ? ret : it->buffer_size;
+}
+
+static int shared_getxattr(struct inode *inode, struct getxattr_iter *it)
+{
+ struct erofs_inode *const vi = EROFS_I(inode);
+ struct super_block *const sb = inode->i_sb;
+ struct erofs_sb_info *const sbi = EROFS_SB(sb);
+ unsigned int i;
+ int ret = -ENOATTR;
+
+ for (i = 0; i < vi->xattr_shared_count; ++i) {
+ erofs_blk_t blkaddr =
+ xattrblock_addr(sbi, vi->xattr_shared_xattrs[i]);
+
+ it->it.ofs = xattrblock_offset(sbi, vi->xattr_shared_xattrs[i]);
+
+ if (!i || blkaddr != it->it.blkaddr) {
+ if (i)
+ xattr_iter_end(&it->it, true);
+
+ it->it.page = erofs_get_meta_page(sb, blkaddr);
+ if (IS_ERR(it->it.page))
+ return PTR_ERR(it->it.page);
+
+ it->it.kaddr = kmap_atomic(it->it.page);
+ it->it.blkaddr = blkaddr;
+ }
+
+ ret = xattr_foreach(&it->it, &find_xattr_handlers, NULL);
+ if (ret != -ENOATTR)
+ break;
+ }
+ if (vi->xattr_shared_count)
+ xattr_iter_end_final(&it->it);
+
+ return ret ? ret : it->buffer_size;
+}
+
+static bool erofs_xattr_user_list(struct dentry *dentry)
+{
+ return test_opt(EROFS_SB(dentry->d_sb), XATTR_USER);
+}
+
+static bool erofs_xattr_trusted_list(struct dentry *dentry)
+{
+ return capable(CAP_SYS_ADMIN);
+}
+
+int erofs_getxattr(struct inode *inode, int index,
+ const char *name,
+ void *buffer, size_t buffer_size)
+{
+ int ret;
+ struct getxattr_iter it;
+
+ if (!name)
+ return -EINVAL;
+
+ ret = init_inode_xattrs(inode);
+ if (ret)
+ return ret;
+
+ it.index = index;
+
+ it.name.len = strlen(name);
+ if (it.name.len > EROFS_NAME_LEN)
+ return -ERANGE;
+ it.name.name = name;
+
+ it.buffer = buffer;
+ it.buffer_size = buffer_size;
+
+ it.it.sb = inode->i_sb;
+ ret = inline_getxattr(inode, &it);
+ if (ret == -ENOATTR)
+ ret = shared_getxattr(inode, &it);
+ return ret;
+}
+
+static int erofs_xattr_generic_get(const struct xattr_handler *handler,
+ struct dentry *unused, struct inode *inode,
+ const char *name, void *buffer, size_t size)
+{
+ struct erofs_sb_info *const sbi = EROFS_I_SB(inode);
+
+ switch (handler->flags) {
+ case EROFS_XATTR_INDEX_USER:
+ if (!test_opt(sbi, XATTR_USER))
+ return -EOPNOTSUPP;
+ break;
+ case EROFS_XATTR_INDEX_TRUSTED:
+ if (!capable(CAP_SYS_ADMIN))
+ return -EPERM;
+ break;
+ case EROFS_XATTR_INDEX_SECURITY:
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return erofs_getxattr(inode, handler->flags, name, buffer, size);
+}
+
+const struct xattr_handler erofs_xattr_user_handler = {
+ .prefix = XATTR_USER_PREFIX,
+ .flags = EROFS_XATTR_INDEX_USER,
+ .list = erofs_xattr_user_list,
+ .get = erofs_xattr_generic_get,
+};
+
+const struct xattr_handler erofs_xattr_trusted_handler = {
+ .prefix = XATTR_TRUSTED_PREFIX,
+ .flags = EROFS_XATTR_INDEX_TRUSTED,
+ .list = erofs_xattr_trusted_list,
+ .get = erofs_xattr_generic_get,
+};
+
+#ifdef CONFIG_EROFS_FS_SECURITY
+const struct xattr_handler __maybe_unused erofs_xattr_security_handler = {
+ .prefix = XATTR_SECURITY_PREFIX,
+ .flags = EROFS_XATTR_INDEX_SECURITY,
+ .get = erofs_xattr_generic_get,
+};
+#endif
+
+const struct xattr_handler *erofs_xattr_handlers[] = {
+ &erofs_xattr_user_handler,
+#ifdef CONFIG_EROFS_FS_POSIX_ACL
+ &posix_acl_access_xattr_handler,
+ &posix_acl_default_xattr_handler,
+#endif
+ &erofs_xattr_trusted_handler,
+#ifdef CONFIG_EROFS_FS_SECURITY
+ &erofs_xattr_security_handler,
+#endif
+ NULL,
+};
+
+struct listxattr_iter {
+ struct xattr_iter it;
+
+ struct dentry *dentry;
+ char *buffer;
+ int buffer_size, buffer_ofs;
+};
+
+static int xattr_entrylist(struct xattr_iter *_it,
+ struct erofs_xattr_entry *entry)
+{
+ struct listxattr_iter *it =
+ container_of(_it, struct listxattr_iter, it);
+ unsigned int prefix_len;
+ const char *prefix;
+
+ const struct xattr_handler *h =
+ erofs_xattr_handler(entry->e_name_index);
+
+ if (!h || (h->list && !h->list(it->dentry)))
+ return 1;
+
+ prefix = xattr_prefix(h);
+ prefix_len = strlen(prefix);
+
+ if (!it->buffer) {
+ it->buffer_ofs += prefix_len + entry->e_name_len + 1;
+ return 1;
+ }
+
+ if (it->buffer_ofs + prefix_len
+ + entry->e_name_len + 1 > it->buffer_size)
+ return -ERANGE;
+
+ memcpy(it->buffer + it->buffer_ofs, prefix, prefix_len);
+ it->buffer_ofs += prefix_len;
+ return 0;
+}
+
+static int xattr_namelist(struct xattr_iter *_it,
+ unsigned int processed, char *buf, unsigned int len)
+{
+ struct listxattr_iter *it =
+ container_of(_it, struct listxattr_iter, it);
+
+ memcpy(it->buffer + it->buffer_ofs, buf, len);
+ it->buffer_ofs += len;
+ return 0;
+}
+
+static int xattr_skipvalue(struct xattr_iter *_it,
+ unsigned int value_sz)
+{
+ struct listxattr_iter *it =
+ container_of(_it, struct listxattr_iter, it);
+
+ it->buffer[it->buffer_ofs++] = '\0';
+ return 1;
+}
+
+static const struct xattr_iter_handlers list_xattr_handlers = {
+ .entry = xattr_entrylist,
+ .name = xattr_namelist,
+ .alloc_buffer = xattr_skipvalue,
+ .value = NULL
+};
+
+static int inline_listxattr(struct listxattr_iter *it)
+{
+ int ret;
+ unsigned int remaining;
+
+ ret = inline_xattr_iter_begin(&it->it, d_inode(it->dentry));
+ if (ret < 0)
+ return ret;
+
+ remaining = ret;
+ while (remaining) {
+ ret = xattr_foreach(&it->it, &list_xattr_handlers, &remaining);
+ if (ret)
+ break;
+ }
+ xattr_iter_end_final(&it->it);
+ return ret ? ret : it->buffer_ofs;
+}
+
+static int shared_listxattr(struct listxattr_iter *it)
+{
+ struct inode *const inode = d_inode(it->dentry);
+ struct erofs_inode *const vi = EROFS_I(inode);
+ struct super_block *const sb = inode->i_sb;
+ struct erofs_sb_info *const sbi = EROFS_SB(sb);
+ unsigned int i;
+ int ret = 0;
+
+ for (i = 0; i < vi->xattr_shared_count; ++i) {
+ erofs_blk_t blkaddr =
+ xattrblock_addr(sbi, vi->xattr_shared_xattrs[i]);
+
+ it->it.ofs = xattrblock_offset(sbi, vi->xattr_shared_xattrs[i]);
+ if (!i || blkaddr != it->it.blkaddr) {
+ if (i)
+ xattr_iter_end(&it->it, true);
+
+ it->it.page = erofs_get_meta_page(sb, blkaddr);
+ if (IS_ERR(it->it.page))
+ return PTR_ERR(it->it.page);
+
+ it->it.kaddr = kmap_atomic(it->it.page);
+ it->it.blkaddr = blkaddr;
+ }
+
+ ret = xattr_foreach(&it->it, &list_xattr_handlers, NULL);
+ if (ret)
+ break;
+ }
+ if (vi->xattr_shared_count)
+ xattr_iter_end_final(&it->it);
+
+ return ret ? ret : it->buffer_ofs;
+}
+
+ssize_t erofs_listxattr(struct dentry *dentry,
+ char *buffer, size_t buffer_size)
+{
+ int ret;
+ struct listxattr_iter it;
+
+ ret = init_inode_xattrs(d_inode(dentry));
+ if (ret)
+ return ret;
+
+ it.dentry = dentry;
+ it.buffer = buffer;
+ it.buffer_size = buffer_size;
+ it.buffer_ofs = 0;
+
+ it.it.sb = dentry->d_sb;
+
+ ret = inline_listxattr(&it);
+ if (ret < 0 && ret != -ENOATTR)
+ return ret;
+ return shared_listxattr(&it);
+}
+
+#ifdef CONFIG_EROFS_FS_POSIX_ACL
+struct posix_acl *erofs_get_acl(struct inode *inode, int type)
+{
+ struct posix_acl *acl;
+ int prefix, rc;
+ char *value = NULL;
+
+ switch (type) {
+ case ACL_TYPE_ACCESS:
+ prefix = EROFS_XATTR_INDEX_POSIX_ACL_ACCESS;
+ break;
+ case ACL_TYPE_DEFAULT:
+ prefix = EROFS_XATTR_INDEX_POSIX_ACL_DEFAULT;
+ break;
+ default:
+ return ERR_PTR(-EINVAL);
+ }
+
+ rc = erofs_getxattr(inode, prefix, "", NULL, 0);
+ if (rc > 0) {
+ value = kmalloc(rc, GFP_KERNEL);
+ if (!value)
+ return ERR_PTR(-ENOMEM);
+ rc = erofs_getxattr(inode, prefix, "", value, rc);
+ }
+
+ if (rc == -ENOATTR)
+ acl = NULL;
+ else if (rc < 0)
+ acl = ERR_PTR(rc);
+ else
+ acl = posix_acl_from_xattr(&init_user_ns, value, rc);
+ kfree(value);
+ return acl;
+}
+#endif
+
diff --git a/fs/erofs/xattr.h b/fs/erofs/xattr.h
new file mode 100644
index 000000000000..3585b84d2f20
--- /dev/null
+++ b/fs/erofs/xattr.h
@@ -0,0 +1,92 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (C) 2017-2018 HUAWEI, Inc.
+ * http://www.huawei.com/
+ * Created by Gao Xiang <gaoxiang25@huawei.com>
+ */
+#ifndef __EROFS_XATTR_H
+#define __EROFS_XATTR_H
+
+#include "internal.h"
+#include <linux/posix_acl_xattr.h>
+#include <linux/xattr.h>
+
+/* Attribute not found */
+#define ENOATTR ENODATA
+
+static inline unsigned int inlinexattr_header_size(struct inode *inode)
+{
+ return sizeof(struct erofs_xattr_ibody_header) +
+ sizeof(u32) * EROFS_I(inode)->xattr_shared_count;
+}
+
+static inline erofs_blk_t xattrblock_addr(struct erofs_sb_info *sbi,
+ unsigned int xattr_id)
+{
+#ifdef CONFIG_EROFS_FS_XATTR
+ return sbi->xattr_blkaddr +
+ xattr_id * sizeof(__u32) / EROFS_BLKSIZ;
+#else
+ return 0;
+#endif
+}
+
+static inline unsigned int xattrblock_offset(struct erofs_sb_info *sbi,
+ unsigned int xattr_id)
+{
+ return (xattr_id * sizeof(__u32)) % EROFS_BLKSIZ;
+}
+
+#ifdef CONFIG_EROFS_FS_XATTR
+extern const struct xattr_handler erofs_xattr_user_handler;
+extern const struct xattr_handler erofs_xattr_trusted_handler;
+#ifdef CONFIG_EROFS_FS_SECURITY
+extern const struct xattr_handler erofs_xattr_security_handler;
+#endif
+
+static inline const struct xattr_handler *erofs_xattr_handler(unsigned int idx)
+{
+static const struct xattr_handler *xattr_handler_map[] = {
+ [EROFS_XATTR_INDEX_USER] = &erofs_xattr_user_handler,
+#ifdef CONFIG_EROFS_FS_POSIX_ACL
+ [EROFS_XATTR_INDEX_POSIX_ACL_ACCESS] = &posix_acl_access_xattr_handler,
+ [EROFS_XATTR_INDEX_POSIX_ACL_DEFAULT] =
+ &posix_acl_default_xattr_handler,
+#endif
+ [EROFS_XATTR_INDEX_TRUSTED] = &erofs_xattr_trusted_handler,
+#ifdef CONFIG_EROFS_FS_SECURITY
+ [EROFS_XATTR_INDEX_SECURITY] = &erofs_xattr_security_handler,
+#endif
+};
+
+ return idx && idx < ARRAY_SIZE(xattr_handler_map) ?
+ xattr_handler_map[idx] : NULL;
+}
+
+extern const struct xattr_handler *erofs_xattr_handlers[];
+
+int erofs_getxattr(struct inode *, int, const char *, void *, size_t);
+ssize_t erofs_listxattr(struct dentry *, char *, size_t);
+#else
+static inline int erofs_getxattr(struct inode *inode, int index,
+ const char *name, void *buffer,
+ size_t buffer_size)
+{
+ return -EOPNOTSUPP;
+}
+
+static inline ssize_t erofs_listxattr(struct dentry *dentry,
+ char *buffer, size_t buffer_size)
+{
+ return -EOPNOTSUPP;
+}
+#endif /* !CONFIG_EROFS_FS_XATTR */
+
+#ifdef CONFIG_EROFS_FS_POSIX_ACL
+struct posix_acl *erofs_get_acl(struct inode *inode, int type);
+#else
+#define erofs_get_acl (NULL)
+#endif
+
+#endif
+
diff --git a/fs/erofs/zdata.c b/fs/erofs/zdata.c
new file mode 100644
index 000000000000..96e34c90f814
--- /dev/null
+++ b/fs/erofs/zdata.c
@@ -0,0 +1,1431 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2018 HUAWEI, Inc.
+ * http://www.huawei.com/
+ * Created by Gao Xiang <gaoxiang25@huawei.com>
+ */
+#include "zdata.h"
+#include "compress.h"
+#include <linux/prefetch.h>
+
+#include <trace/events/erofs.h>
+
+/*
+ * a compressed_pages[] placeholder in order to avoid
+ * being filled with file pages for in-place decompression.
+ */
+#define PAGE_UNALLOCATED ((void *)0x5F0E4B1D)
+
+/* how to allocate cached pages for a pcluster */
+enum z_erofs_cache_alloctype {
+ DONTALLOC, /* don't allocate any cached pages */
+ DELAYEDALLOC, /* delayed allocation (at the time of submitting io) */
+};
+
+/*
+ * tagged pointer with 1-bit tag for all compressed pages
+ * tag 0 - the page is just found with an extra page reference
+ */
+typedef tagptr1_t compressed_page_t;
+
+#define tag_compressed_page_justfound(page) \
+ tagptr_fold(compressed_page_t, page, 1)
+
+static struct workqueue_struct *z_erofs_workqueue __read_mostly;
+static struct kmem_cache *pcluster_cachep __read_mostly;
+
+void z_erofs_exit_zip_subsystem(void)
+{
+ destroy_workqueue(z_erofs_workqueue);
+ kmem_cache_destroy(pcluster_cachep);
+}
+
+static inline int z_erofs_init_workqueue(void)
+{
+ const unsigned int onlinecpus = num_possible_cpus();
+ const unsigned int flags = WQ_UNBOUND | WQ_HIGHPRI | WQ_CPU_INTENSIVE;
+
+ /*
+ * no need to spawn too many threads, limiting threads could minimum
+ * scheduling overhead, perhaps per-CPU threads should be better?
+ */
+ z_erofs_workqueue = alloc_workqueue("erofs_unzipd", flags,
+ onlinecpus + onlinecpus / 4);
+ return z_erofs_workqueue ? 0 : -ENOMEM;
+}
+
+static void z_erofs_pcluster_init_once(void *ptr)
+{
+ struct z_erofs_pcluster *pcl = ptr;
+ struct z_erofs_collection *cl = z_erofs_primarycollection(pcl);
+ unsigned int i;
+
+ mutex_init(&cl->lock);
+ cl->nr_pages = 0;
+ cl->vcnt = 0;
+ for (i = 0; i < Z_EROFS_CLUSTER_MAX_PAGES; ++i)
+ pcl->compressed_pages[i] = NULL;
+}
+
+static void z_erofs_pcluster_init_always(struct z_erofs_pcluster *pcl)
+{
+ struct z_erofs_collection *cl = z_erofs_primarycollection(pcl);
+
+ atomic_set(&pcl->obj.refcount, 1);
+
+ DBG_BUGON(cl->nr_pages);
+ DBG_BUGON(cl->vcnt);
+}
+
+int __init z_erofs_init_zip_subsystem(void)
+{
+ pcluster_cachep = kmem_cache_create("erofs_compress",
+ Z_EROFS_WORKGROUP_SIZE, 0,
+ SLAB_RECLAIM_ACCOUNT,
+ z_erofs_pcluster_init_once);
+ if (pcluster_cachep) {
+ if (!z_erofs_init_workqueue())
+ return 0;
+
+ kmem_cache_destroy(pcluster_cachep);
+ }
+ return -ENOMEM;
+}
+
+enum z_erofs_collectmode {
+ COLLECT_SECONDARY,
+ COLLECT_PRIMARY,
+ /*
+ * The current collection was the tail of an exist chain, in addition
+ * that the previous processed chained collections are all decided to
+ * be hooked up to it.
+ * A new chain will be created for the remaining collections which are
+ * not processed yet, therefore different from COLLECT_PRIMARY_FOLLOWED,
+ * the next collection cannot reuse the whole page safely in
+ * the following scenario:
+ * ________________________________________________________________
+ * | tail (partial) page | head (partial) page |
+ * | (belongs to the next cl) | (belongs to the current cl) |
+ * |_______PRIMARY_FOLLOWED_______|________PRIMARY_HOOKED___________|
+ */
+ COLLECT_PRIMARY_HOOKED,
+ COLLECT_PRIMARY_FOLLOWED_NOINPLACE,
+ /*
+ * The current collection has been linked with the owned chain, and
+ * could also be linked with the remaining collections, which means
+ * if the processing page is the tail page of the collection, thus
+ * the current collection can safely use the whole page (since
+ * the previous collection is under control) for in-place I/O, as
+ * illustrated below:
+ * ________________________________________________________________
+ * | tail (partial) page | head (partial) page |
+ * | (of the current cl) | (of the previous collection) |
+ * | PRIMARY_FOLLOWED or | |
+ * |_____PRIMARY_HOOKED___|____________PRIMARY_FOLLOWED____________|
+ *
+ * [ (*) the above page can be used as inplace I/O. ]
+ */
+ COLLECT_PRIMARY_FOLLOWED,
+};
+
+struct z_erofs_collector {
+ struct z_erofs_pagevec_ctor vector;
+
+ struct z_erofs_pcluster *pcl, *tailpcl;
+ struct z_erofs_collection *cl;
+ struct page **compressedpages;
+ z_erofs_next_pcluster_t owned_head;
+
+ enum z_erofs_collectmode mode;
+};
+
+struct z_erofs_decompress_frontend {
+ struct inode *const inode;
+
+ struct z_erofs_collector clt;
+ struct erofs_map_blocks map;
+
+ /* used for applying cache strategy on the fly */
+ bool backmost;
+ erofs_off_t headoffset;
+};
+
+#define COLLECTOR_INIT() { \
+ .owned_head = Z_EROFS_PCLUSTER_TAIL, \
+ .mode = COLLECT_PRIMARY_FOLLOWED }
+
+#define DECOMPRESS_FRONTEND_INIT(__i) { \
+ .inode = __i, .clt = COLLECTOR_INIT(), \
+ .backmost = true, }
+
+static struct page *z_pagemap_global[Z_EROFS_VMAP_GLOBAL_PAGES];
+static DEFINE_MUTEX(z_pagemap_global_lock);
+
+static void preload_compressed_pages(struct z_erofs_collector *clt,
+ struct address_space *mc,
+ enum z_erofs_cache_alloctype type,
+ struct list_head *pagepool)
+{
+ const struct z_erofs_pcluster *pcl = clt->pcl;
+ const unsigned int clusterpages = BIT(pcl->clusterbits);
+ struct page **pages = clt->compressedpages;
+ pgoff_t index = pcl->obj.index + (pages - pcl->compressed_pages);
+ bool standalone = true;
+
+ if (clt->mode < COLLECT_PRIMARY_FOLLOWED)
+ return;
+
+ for (; pages < pcl->compressed_pages + clusterpages; ++pages) {
+ struct page *page;
+ compressed_page_t t;
+
+ /* the compressed page was loaded before */
+ if (READ_ONCE(*pages))
+ continue;
+
+ page = find_get_page(mc, index);
+
+ if (page) {
+ t = tag_compressed_page_justfound(page);
+ } else if (type == DELAYEDALLOC) {
+ t = tagptr_init(compressed_page_t, PAGE_UNALLOCATED);
+ } else { /* DONTALLOC */
+ if (standalone)
+ clt->compressedpages = pages;
+ standalone = false;
+ continue;
+ }
+
+ if (!cmpxchg_relaxed(pages, NULL, tagptr_cast_ptr(t)))
+ continue;
+
+ if (page)
+ put_page(page);
+ }
+
+ if (standalone) /* downgrade to PRIMARY_FOLLOWED_NOINPLACE */
+ clt->mode = COLLECT_PRIMARY_FOLLOWED_NOINPLACE;
+}
+
+/* called by erofs_shrinker to get rid of all compressed_pages */
+int erofs_try_to_free_all_cached_pages(struct erofs_sb_info *sbi,
+ struct erofs_workgroup *grp)
+{
+ struct z_erofs_pcluster *const pcl =
+ container_of(grp, struct z_erofs_pcluster, obj);
+ struct address_space *const mapping = MNGD_MAPPING(sbi);
+ const unsigned int clusterpages = BIT(pcl->clusterbits);
+ int i;
+
+ /*
+ * refcount of workgroup is now freezed as 1,
+ * therefore no need to worry about available decompression users.
+ */
+ for (i = 0; i < clusterpages; ++i) {
+ struct page *page = pcl->compressed_pages[i];
+
+ if (!page)
+ continue;
+
+ /* block other users from reclaiming or migrating the page */
+ if (!trylock_page(page))
+ return -EBUSY;
+
+ if (page->mapping != mapping)
+ continue;
+
+ /* barrier is implied in the following 'unlock_page' */
+ WRITE_ONCE(pcl->compressed_pages[i], NULL);
+ set_page_private(page, 0);
+ ClearPagePrivate(page);
+
+ unlock_page(page);
+ put_page(page);
+ }
+ return 0;
+}
+
+int erofs_try_to_free_cached_page(struct address_space *mapping,
+ struct page *page)
+{
+ struct z_erofs_pcluster *const pcl = (void *)page_private(page);
+ const unsigned int clusterpages = BIT(pcl->clusterbits);
+ int ret = 0; /* 0 - busy */
+
+ if (erofs_workgroup_try_to_freeze(&pcl->obj, 1)) {
+ unsigned int i;
+
+ for (i = 0; i < clusterpages; ++i) {
+ if (pcl->compressed_pages[i] == page) {
+ WRITE_ONCE(pcl->compressed_pages[i], NULL);
+ ret = 1;
+ break;
+ }
+ }
+ erofs_workgroup_unfreeze(&pcl->obj, 1);
+
+ if (ret) {
+ ClearPagePrivate(page);
+ put_page(page);
+ }
+ }
+ return ret;
+}
+
+/* page_type must be Z_EROFS_PAGE_TYPE_EXCLUSIVE */
+static inline bool z_erofs_try_inplace_io(struct z_erofs_collector *clt,
+ struct page *page)
+{
+ struct z_erofs_pcluster *const pcl = clt->pcl;
+ const unsigned int clusterpages = BIT(pcl->clusterbits);
+
+ while (clt->compressedpages < pcl->compressed_pages + clusterpages) {
+ if (!cmpxchg(clt->compressedpages++, NULL, page))
+ return true;
+ }
+ return false;
+}
+
+/* callers must be with collection lock held */
+static int z_erofs_attach_page(struct z_erofs_collector *clt,
+ struct page *page,
+ enum z_erofs_page_type type)
+{
+ int ret;
+ bool occupied;
+
+ /* give priority for inplaceio */
+ if (clt->mode >= COLLECT_PRIMARY &&
+ type == Z_EROFS_PAGE_TYPE_EXCLUSIVE &&
+ z_erofs_try_inplace_io(clt, page))
+ return 0;
+
+ ret = z_erofs_pagevec_enqueue(&clt->vector,
+ page, type, &occupied);
+ clt->cl->vcnt += (unsigned int)ret;
+
+ return ret ? 0 : -EAGAIN;
+}
+
+static enum z_erofs_collectmode
+try_to_claim_pcluster(struct z_erofs_pcluster *pcl,
+ z_erofs_next_pcluster_t *owned_head)
+{
+ /* let's claim these following types of pclusters */
+retry:
+ if (pcl->next == Z_EROFS_PCLUSTER_NIL) {
+ /* type 1, nil pcluster */
+ if (cmpxchg(&pcl->next, Z_EROFS_PCLUSTER_NIL,
+ *owned_head) != Z_EROFS_PCLUSTER_NIL)
+ goto retry;
+
+ *owned_head = &pcl->next;
+ /* lucky, I am the followee :) */
+ return COLLECT_PRIMARY_FOLLOWED;
+ } else if (pcl->next == Z_EROFS_PCLUSTER_TAIL) {
+ /*
+ * type 2, link to the end of a existing open chain,
+ * be careful that its submission itself is governed
+ * by the original owned chain.
+ */
+ if (cmpxchg(&pcl->next, Z_EROFS_PCLUSTER_TAIL,
+ *owned_head) != Z_EROFS_PCLUSTER_TAIL)
+ goto retry;
+ *owned_head = Z_EROFS_PCLUSTER_TAIL;
+ return COLLECT_PRIMARY_HOOKED;
+ }
+ return COLLECT_PRIMARY; /* :( better luck next time */
+}
+
+static struct z_erofs_collection *cllookup(struct z_erofs_collector *clt,
+ struct inode *inode,
+ struct erofs_map_blocks *map)
+{
+ struct erofs_workgroup *grp;
+ struct z_erofs_pcluster *pcl;
+ struct z_erofs_collection *cl;
+ unsigned int length;
+ bool tag;
+
+ grp = erofs_find_workgroup(inode->i_sb, map->m_pa >> PAGE_SHIFT, &tag);
+ if (!grp)
+ return NULL;
+
+ pcl = container_of(grp, struct z_erofs_pcluster, obj);
+ if (clt->owned_head == &pcl->next || pcl == clt->tailpcl) {
+ DBG_BUGON(1);
+ erofs_workgroup_put(grp);
+ return ERR_PTR(-EFSCORRUPTED);
+ }
+
+ cl = z_erofs_primarycollection(pcl);
+ if (cl->pageofs != (map->m_la & ~PAGE_MASK)) {
+ DBG_BUGON(1);
+ erofs_workgroup_put(grp);
+ return ERR_PTR(-EFSCORRUPTED);
+ }
+
+ length = READ_ONCE(pcl->length);
+ if (length & Z_EROFS_PCLUSTER_FULL_LENGTH) {
+ if ((map->m_llen << Z_EROFS_PCLUSTER_LENGTH_BIT) > length) {
+ DBG_BUGON(1);
+ erofs_workgroup_put(grp);
+ return ERR_PTR(-EFSCORRUPTED);
+ }
+ } else {
+ unsigned int llen = map->m_llen << Z_EROFS_PCLUSTER_LENGTH_BIT;
+
+ if (map->m_flags & EROFS_MAP_FULL_MAPPED)
+ llen |= Z_EROFS_PCLUSTER_FULL_LENGTH;
+
+ while (llen > length &&
+ length != cmpxchg_relaxed(&pcl->length, length, llen)) {
+ cpu_relax();
+ length = READ_ONCE(pcl->length);
+ }
+ }
+ mutex_lock(&cl->lock);
+ /* used to check tail merging loop due to corrupted images */
+ if (clt->owned_head == Z_EROFS_PCLUSTER_TAIL)
+ clt->tailpcl = pcl;
+ clt->mode = try_to_claim_pcluster(pcl, &clt->owned_head);
+ /* clean tailpcl if the current owned_head is Z_EROFS_PCLUSTER_TAIL */
+ if (clt->owned_head == Z_EROFS_PCLUSTER_TAIL)
+ clt->tailpcl = NULL;
+ clt->pcl = pcl;
+ clt->cl = cl;
+ return cl;
+}
+
+static struct z_erofs_collection *clregister(struct z_erofs_collector *clt,
+ struct inode *inode,
+ struct erofs_map_blocks *map)
+{
+ struct z_erofs_pcluster *pcl;
+ struct z_erofs_collection *cl;
+ int err;
+
+ /* no available workgroup, let's allocate one */
+ pcl = kmem_cache_alloc(pcluster_cachep, GFP_NOFS);
+ if (!pcl)
+ return ERR_PTR(-ENOMEM);
+
+ z_erofs_pcluster_init_always(pcl);
+ pcl->obj.index = map->m_pa >> PAGE_SHIFT;
+
+ pcl->length = (map->m_llen << Z_EROFS_PCLUSTER_LENGTH_BIT) |
+ (map->m_flags & EROFS_MAP_FULL_MAPPED ?
+ Z_EROFS_PCLUSTER_FULL_LENGTH : 0);
+
+ if (map->m_flags & EROFS_MAP_ZIPPED)
+ pcl->algorithmformat = Z_EROFS_COMPRESSION_LZ4;
+ else
+ pcl->algorithmformat = Z_EROFS_COMPRESSION_SHIFTED;
+
+ pcl->clusterbits = EROFS_I(inode)->z_physical_clusterbits[0];
+ pcl->clusterbits -= PAGE_SHIFT;
+
+ /* new pclusters should be claimed as type 1, primary and followed */
+ pcl->next = clt->owned_head;
+ clt->mode = COLLECT_PRIMARY_FOLLOWED;
+
+ cl = z_erofs_primarycollection(pcl);
+ cl->pageofs = map->m_la & ~PAGE_MASK;
+
+ /*
+ * lock all primary followed works before visible to others
+ * and mutex_trylock *never* fails for a new pcluster.
+ */
+ mutex_trylock(&cl->lock);
+
+ err = erofs_register_workgroup(inode->i_sb, &pcl->obj, 0);
+ if (err) {
+ mutex_unlock(&cl->lock);
+ kmem_cache_free(pcluster_cachep, pcl);
+ return ERR_PTR(-EAGAIN);
+ }
+ /* used to check tail merging loop due to corrupted images */
+ if (clt->owned_head == Z_EROFS_PCLUSTER_TAIL)
+ clt->tailpcl = pcl;
+ clt->owned_head = &pcl->next;
+ clt->pcl = pcl;
+ clt->cl = cl;
+ return cl;
+}
+
+static int z_erofs_collector_begin(struct z_erofs_collector *clt,
+ struct inode *inode,
+ struct erofs_map_blocks *map)
+{
+ struct z_erofs_collection *cl;
+
+ DBG_BUGON(clt->cl);
+
+ /* must be Z_EROFS_PCLUSTER_TAIL or pointed to previous collection */
+ DBG_BUGON(clt->owned_head == Z_EROFS_PCLUSTER_NIL);
+ DBG_BUGON(clt->owned_head == Z_EROFS_PCLUSTER_TAIL_CLOSED);
+
+ if (!PAGE_ALIGNED(map->m_pa)) {
+ DBG_BUGON(1);
+ return -EINVAL;
+ }
+
+repeat:
+ cl = cllookup(clt, inode, map);
+ if (!cl) {
+ cl = clregister(clt, inode, map);
+
+ if (cl == ERR_PTR(-EAGAIN))
+ goto repeat;
+ }
+
+ if (IS_ERR(cl))
+ return PTR_ERR(cl);
+
+ z_erofs_pagevec_ctor_init(&clt->vector, Z_EROFS_NR_INLINE_PAGEVECS,
+ cl->pagevec, cl->vcnt);
+
+ clt->compressedpages = clt->pcl->compressed_pages;
+ if (clt->mode <= COLLECT_PRIMARY) /* cannot do in-place I/O */
+ clt->compressedpages += Z_EROFS_CLUSTER_MAX_PAGES;
+ return 0;
+}
+
+/*
+ * keep in mind that no referenced pclusters will be freed
+ * only after a RCU grace period.
+ */
+static void z_erofs_rcu_callback(struct rcu_head *head)
+{
+ struct z_erofs_collection *const cl =
+ container_of(head, struct z_erofs_collection, rcu);
+
+ kmem_cache_free(pcluster_cachep,
+ container_of(cl, struct z_erofs_pcluster,
+ primary_collection));
+}
+
+void erofs_workgroup_free_rcu(struct erofs_workgroup *grp)
+{
+ struct z_erofs_pcluster *const pcl =
+ container_of(grp, struct z_erofs_pcluster, obj);
+ struct z_erofs_collection *const cl = z_erofs_primarycollection(pcl);
+
+ call_rcu(&cl->rcu, z_erofs_rcu_callback);
+}
+
+static void z_erofs_collection_put(struct z_erofs_collection *cl)
+{
+ struct z_erofs_pcluster *const pcl =
+ container_of(cl, struct z_erofs_pcluster, primary_collection);
+
+ erofs_workgroup_put(&pcl->obj);
+}
+
+static bool z_erofs_collector_end(struct z_erofs_collector *clt)
+{
+ struct z_erofs_collection *cl = clt->cl;
+
+ if (!cl)
+ return false;
+
+ z_erofs_pagevec_ctor_exit(&clt->vector, false);
+ mutex_unlock(&cl->lock);
+
+ /*
+ * if all pending pages are added, don't hold its reference
+ * any longer if the pcluster isn't hosted by ourselves.
+ */
+ if (clt->mode < COLLECT_PRIMARY_FOLLOWED_NOINPLACE)
+ z_erofs_collection_put(cl);
+
+ clt->cl = NULL;
+ return true;
+}
+
+static inline struct page *__stagingpage_alloc(struct list_head *pagepool,
+ gfp_t gfp)
+{
+ struct page *page = erofs_allocpage(pagepool, gfp, true);
+
+ page->mapping = Z_EROFS_MAPPING_STAGING;
+ return page;
+}
+
+static bool should_alloc_managed_pages(struct z_erofs_decompress_frontend *fe,
+ unsigned int cachestrategy,
+ erofs_off_t la)
+{
+ if (cachestrategy <= EROFS_ZIP_CACHE_DISABLED)
+ return false;
+
+ if (fe->backmost)
+ return true;
+
+ return cachestrategy >= EROFS_ZIP_CACHE_READAROUND &&
+ la < fe->headoffset;
+}
+
+static int z_erofs_do_read_page(struct z_erofs_decompress_frontend *fe,
+ struct page *page,
+ struct list_head *pagepool)
+{
+ struct inode *const inode = fe->inode;
+ struct erofs_sb_info *const sbi __maybe_unused = EROFS_I_SB(inode);
+ struct erofs_map_blocks *const map = &fe->map;
+ struct z_erofs_collector *const clt = &fe->clt;
+ const loff_t offset = page_offset(page);
+ bool tight = (clt->mode >= COLLECT_PRIMARY_HOOKED);
+
+ enum z_erofs_cache_alloctype cache_strategy;
+ enum z_erofs_page_type page_type;
+ unsigned int cur, end, spiltted, index;
+ int err = 0;
+
+ /* register locked file pages as online pages in pack */
+ z_erofs_onlinepage_init(page);
+
+ spiltted = 0;
+ end = PAGE_SIZE;
+repeat:
+ cur = end - 1;
+
+ /* lucky, within the range of the current map_blocks */
+ if (offset + cur >= map->m_la &&
+ offset + cur < map->m_la + map->m_llen) {
+ /* didn't get a valid collection previously (very rare) */
+ if (!clt->cl)
+ goto restart_now;
+ goto hitted;
+ }
+
+ /* go ahead the next map_blocks */
+ erofs_dbg("%s: [out-of-range] pos %llu", __func__, offset + cur);
+
+ if (z_erofs_collector_end(clt))
+ fe->backmost = false;
+
+ map->m_la = offset + cur;
+ map->m_llen = 0;
+ err = z_erofs_map_blocks_iter(inode, map, 0);
+ if (err)
+ goto err_out;
+
+restart_now:
+ if (!(map->m_flags & EROFS_MAP_MAPPED))
+ goto hitted;
+
+ err = z_erofs_collector_begin(clt, inode, map);
+ if (err)
+ goto err_out;
+
+ /* preload all compressed pages (maybe downgrade role if necessary) */
+ if (should_alloc_managed_pages(fe, sbi->cache_strategy, map->m_la))
+ cache_strategy = DELAYEDALLOC;
+ else
+ cache_strategy = DONTALLOC;
+
+ preload_compressed_pages(clt, MNGD_MAPPING(sbi),
+ cache_strategy, pagepool);
+
+ tight &= (clt->mode >= COLLECT_PRIMARY_HOOKED);
+hitted:
+ cur = end - min_t(unsigned int, offset + end - map->m_la, end);
+ if (!(map->m_flags & EROFS_MAP_MAPPED)) {
+ zero_user_segment(page, cur, end);
+ goto next_part;
+ }
+
+ /* let's derive page type */
+ page_type = cur ? Z_EROFS_VLE_PAGE_TYPE_HEAD :
+ (!spiltted ? Z_EROFS_PAGE_TYPE_EXCLUSIVE :
+ (tight ? Z_EROFS_PAGE_TYPE_EXCLUSIVE :
+ Z_EROFS_VLE_PAGE_TYPE_TAIL_SHARED));
+
+ if (cur)
+ tight &= (clt->mode >= COLLECT_PRIMARY_FOLLOWED);
+
+retry:
+ err = z_erofs_attach_page(clt, page, page_type);
+ /* should allocate an additional staging page for pagevec */
+ if (err == -EAGAIN) {
+ struct page *const newpage =
+ __stagingpage_alloc(pagepool, GFP_NOFS);
+
+ err = z_erofs_attach_page(clt, newpage,
+ Z_EROFS_PAGE_TYPE_EXCLUSIVE);
+ if (!err)
+ goto retry;
+ }
+
+ if (err)
+ goto err_out;
+
+ index = page->index - (map->m_la >> PAGE_SHIFT);
+
+ z_erofs_onlinepage_fixup(page, index, true);
+
+ /* bump up the number of spiltted parts of a page */
+ ++spiltted;
+ /* also update nr_pages */
+ clt->cl->nr_pages = max_t(pgoff_t, clt->cl->nr_pages, index + 1);
+next_part:
+ /* can be used for verification */
+ map->m_llen = offset + cur - map->m_la;
+
+ end = cur;
+ if (end > 0)
+ goto repeat;
+
+out:
+ z_erofs_onlinepage_endio(page);
+
+ erofs_dbg("%s, finish page: %pK spiltted: %u map->m_llen %llu",
+ __func__, page, spiltted, map->m_llen);
+ return err;
+
+ /* if some error occurred while processing this page */
+err_out:
+ SetPageError(page);
+ goto out;
+}
+
+static void z_erofs_vle_unzip_kickoff(void *ptr, int bios)
+{
+ tagptr1_t t = tagptr_init(tagptr1_t, ptr);
+ struct z_erofs_unzip_io *io = tagptr_unfold_ptr(t);
+ bool background = tagptr_unfold_tags(t);
+
+ if (!background) {
+ unsigned long flags;
+
+ spin_lock_irqsave(&io->u.wait.lock, flags);
+ if (!atomic_add_return(bios, &io->pending_bios))
+ wake_up_locked(&io->u.wait);
+ spin_unlock_irqrestore(&io->u.wait.lock, flags);
+ return;
+ }
+
+ if (!atomic_add_return(bios, &io->pending_bios))
+ queue_work(z_erofs_workqueue, &io->u.work);
+}
+
+static inline void z_erofs_vle_read_endio(struct bio *bio)
+{
+ struct erofs_sb_info *sbi = NULL;
+ blk_status_t err = bio->bi_status;
+ struct bio_vec *bvec;
+ struct bvec_iter_all iter_all;
+
+ bio_for_each_segment_all(bvec, bio, iter_all) {
+ struct page *page = bvec->bv_page;
+ bool cachemngd = false;
+
+ DBG_BUGON(PageUptodate(page));
+ DBG_BUGON(!page->mapping);
+
+ if (!sbi && !z_erofs_page_is_staging(page))
+ sbi = EROFS_SB(page->mapping->host->i_sb);
+
+ /* sbi should already be gotten if the page is managed */
+ if (sbi)
+ cachemngd = erofs_page_is_managed(sbi, page);
+
+ if (err)
+ SetPageError(page);
+ else if (cachemngd)
+ SetPageUptodate(page);
+
+ if (cachemngd)
+ unlock_page(page);
+ }
+
+ z_erofs_vle_unzip_kickoff(bio->bi_private, -1);
+ bio_put(bio);
+}
+
+static int z_erofs_decompress_pcluster(struct super_block *sb,
+ struct z_erofs_pcluster *pcl,
+ struct list_head *pagepool)
+{
+ struct erofs_sb_info *const sbi = EROFS_SB(sb);
+ const unsigned int clusterpages = BIT(pcl->clusterbits);
+ struct z_erofs_pagevec_ctor ctor;
+ unsigned int i, outputsize, llen, nr_pages;
+ struct page *pages_onstack[Z_EROFS_VMAP_ONSTACK_PAGES];
+ struct page **pages, **compressed_pages, *page;
+
+ enum z_erofs_page_type page_type;
+ bool overlapped, partial;
+ struct z_erofs_collection *cl;
+ int err;
+
+ might_sleep();
+ cl = z_erofs_primarycollection(pcl);
+ DBG_BUGON(!READ_ONCE(cl->nr_pages));
+
+ mutex_lock(&cl->lock);
+ nr_pages = cl->nr_pages;
+
+ if (nr_pages <= Z_EROFS_VMAP_ONSTACK_PAGES) {
+ pages = pages_onstack;
+ } else if (nr_pages <= Z_EROFS_VMAP_GLOBAL_PAGES &&
+ mutex_trylock(&z_pagemap_global_lock)) {
+ pages = z_pagemap_global;
+ } else {
+ gfp_t gfp_flags = GFP_KERNEL;
+
+ if (nr_pages > Z_EROFS_VMAP_GLOBAL_PAGES)
+ gfp_flags |= __GFP_NOFAIL;
+
+ pages = kvmalloc_array(nr_pages, sizeof(struct page *),
+ gfp_flags);
+
+ /* fallback to global pagemap for the lowmem scenario */
+ if (!pages) {
+ mutex_lock(&z_pagemap_global_lock);
+ pages = z_pagemap_global;
+ }
+ }
+
+ for (i = 0; i < nr_pages; ++i)
+ pages[i] = NULL;
+
+ err = 0;
+ z_erofs_pagevec_ctor_init(&ctor, Z_EROFS_NR_INLINE_PAGEVECS,
+ cl->pagevec, 0);
+
+ for (i = 0; i < cl->vcnt; ++i) {
+ unsigned int pagenr;
+
+ page = z_erofs_pagevec_dequeue(&ctor, &page_type);
+
+ /* all pages in pagevec ought to be valid */
+ DBG_BUGON(!page);
+ DBG_BUGON(!page->mapping);
+
+ if (z_erofs_put_stagingpage(pagepool, page))
+ continue;
+
+ if (page_type == Z_EROFS_VLE_PAGE_TYPE_HEAD)
+ pagenr = 0;
+ else
+ pagenr = z_erofs_onlinepage_index(page);
+
+ DBG_BUGON(pagenr >= nr_pages);
+
+ /*
+ * currently EROFS doesn't support multiref(dedup),
+ * so here erroring out one multiref page.
+ */
+ if (pages[pagenr]) {
+ DBG_BUGON(1);
+ SetPageError(pages[pagenr]);
+ z_erofs_onlinepage_endio(pages[pagenr]);
+ err = -EFSCORRUPTED;
+ }
+ pages[pagenr] = page;
+ }
+ z_erofs_pagevec_ctor_exit(&ctor, true);
+
+ overlapped = false;
+ compressed_pages = pcl->compressed_pages;
+
+ for (i = 0; i < clusterpages; ++i) {
+ unsigned int pagenr;
+
+ page = compressed_pages[i];
+
+ /* all compressed pages ought to be valid */
+ DBG_BUGON(!page);
+ DBG_BUGON(!page->mapping);
+
+ if (!z_erofs_page_is_staging(page)) {
+ if (erofs_page_is_managed(sbi, page)) {
+ if (!PageUptodate(page))
+ err = -EIO;
+ continue;
+ }
+
+ /*
+ * only if non-head page can be selected
+ * for inplace decompression
+ */
+ pagenr = z_erofs_onlinepage_index(page);
+
+ DBG_BUGON(pagenr >= nr_pages);
+ if (pages[pagenr]) {
+ DBG_BUGON(1);
+ SetPageError(pages[pagenr]);
+ z_erofs_onlinepage_endio(pages[pagenr]);
+ err = -EFSCORRUPTED;
+ }
+ pages[pagenr] = page;
+
+ overlapped = true;
+ }
+
+ /* PG_error needs checking for inplaced and staging pages */
+ if (PageError(page)) {
+ DBG_BUGON(PageUptodate(page));
+ err = -EIO;
+ }
+ }
+
+ if (err)
+ goto out;
+
+ llen = pcl->length >> Z_EROFS_PCLUSTER_LENGTH_BIT;
+ if (nr_pages << PAGE_SHIFT >= cl->pageofs + llen) {
+ outputsize = llen;
+ partial = !(pcl->length & Z_EROFS_PCLUSTER_FULL_LENGTH);
+ } else {
+ outputsize = (nr_pages << PAGE_SHIFT) - cl->pageofs;
+ partial = true;
+ }
+
+ err = z_erofs_decompress(&(struct z_erofs_decompress_req) {
+ .sb = sb,
+ .in = compressed_pages,
+ .out = pages,
+ .pageofs_out = cl->pageofs,
+ .inputsize = PAGE_SIZE,
+ .outputsize = outputsize,
+ .alg = pcl->algorithmformat,
+ .inplace_io = overlapped,
+ .partial_decoding = partial
+ }, pagepool);
+
+out:
+ /* must handle all compressed pages before endding pages */
+ for (i = 0; i < clusterpages; ++i) {
+ page = compressed_pages[i];
+
+ if (erofs_page_is_managed(sbi, page))
+ continue;
+
+ /* recycle all individual staging pages */
+ (void)z_erofs_put_stagingpage(pagepool, page);
+
+ WRITE_ONCE(compressed_pages[i], NULL);
+ }
+
+ for (i = 0; i < nr_pages; ++i) {
+ page = pages[i];
+ if (!page)
+ continue;
+
+ DBG_BUGON(!page->mapping);
+
+ /* recycle all individual staging pages */
+ if (z_erofs_put_stagingpage(pagepool, page))
+ continue;
+
+ if (err < 0)
+ SetPageError(page);
+
+ z_erofs_onlinepage_endio(page);
+ }
+
+ if (pages == z_pagemap_global)
+ mutex_unlock(&z_pagemap_global_lock);
+ else if (pages != pages_onstack)
+ kvfree(pages);
+
+ cl->nr_pages = 0;
+ cl->vcnt = 0;
+
+ /* all cl locks MUST be taken before the following line */
+ WRITE_ONCE(pcl->next, Z_EROFS_PCLUSTER_NIL);
+
+ /* all cl locks SHOULD be released right now */
+ mutex_unlock(&cl->lock);
+
+ z_erofs_collection_put(cl);
+ return err;
+}
+
+static void z_erofs_vle_unzip_all(struct super_block *sb,
+ struct z_erofs_unzip_io *io,
+ struct list_head *pagepool)
+{
+ z_erofs_next_pcluster_t owned = io->head;
+
+ while (owned != Z_EROFS_PCLUSTER_TAIL_CLOSED) {
+ struct z_erofs_pcluster *pcl;
+
+ /* no possible that 'owned' equals Z_EROFS_WORK_TPTR_TAIL */
+ DBG_BUGON(owned == Z_EROFS_PCLUSTER_TAIL);
+
+ /* no possible that 'owned' equals NULL */
+ DBG_BUGON(owned == Z_EROFS_PCLUSTER_NIL);
+
+ pcl = container_of(owned, struct z_erofs_pcluster, next);
+ owned = READ_ONCE(pcl->next);
+
+ z_erofs_decompress_pcluster(sb, pcl, pagepool);
+ }
+}
+
+static void z_erofs_vle_unzip_wq(struct work_struct *work)
+{
+ struct z_erofs_unzip_io_sb *iosb =
+ container_of(work, struct z_erofs_unzip_io_sb, io.u.work);
+ LIST_HEAD(pagepool);
+
+ DBG_BUGON(iosb->io.head == Z_EROFS_PCLUSTER_TAIL_CLOSED);
+ z_erofs_vle_unzip_all(iosb->sb, &iosb->io, &pagepool);
+
+ put_pages_list(&pagepool);
+ kvfree(iosb);
+}
+
+static struct page *pickup_page_for_submission(struct z_erofs_pcluster *pcl,
+ unsigned int nr,
+ struct list_head *pagepool,
+ struct address_space *mc,
+ gfp_t gfp)
+{
+ /* determined at compile time to avoid too many #ifdefs */
+ const bool nocache = __builtin_constant_p(mc) ? !mc : false;
+ const pgoff_t index = pcl->obj.index;
+ bool tocache = false;
+
+ struct address_space *mapping;
+ struct page *oldpage, *page;
+
+ compressed_page_t t;
+ int justfound;
+
+repeat:
+ page = READ_ONCE(pcl->compressed_pages[nr]);
+ oldpage = page;
+
+ if (!page)
+ goto out_allocpage;
+
+ /*
+ * the cached page has not been allocated and
+ * an placeholder is out there, prepare it now.
+ */
+ if (!nocache && page == PAGE_UNALLOCATED) {
+ tocache = true;
+ goto out_allocpage;
+ }
+
+ /* process the target tagged pointer */
+ t = tagptr_init(compressed_page_t, page);
+ justfound = tagptr_unfold_tags(t);
+ page = tagptr_unfold_ptr(t);
+
+ mapping = READ_ONCE(page->mapping);
+
+ /*
+ * if managed cache is disabled, it's no way to
+ * get such a cached-like page.
+ */
+ if (nocache) {
+ /* if managed cache is disabled, it is impossible `justfound' */
+ DBG_BUGON(justfound);
+
+ /* and it should be locked, not uptodate, and not truncated */
+ DBG_BUGON(!PageLocked(page));
+ DBG_BUGON(PageUptodate(page));
+ DBG_BUGON(!mapping);
+ goto out;
+ }
+
+ /*
+ * unmanaged (file) pages are all locked solidly,
+ * therefore it is impossible for `mapping' to be NULL.
+ */
+ if (mapping && mapping != mc)
+ /* ought to be unmanaged pages */
+ goto out;
+
+ lock_page(page);
+
+ /* only true if page reclaim goes wrong, should never happen */
+ DBG_BUGON(justfound && PagePrivate(page));
+
+ /* the page is still in manage cache */
+ if (page->mapping == mc) {
+ WRITE_ONCE(pcl->compressed_pages[nr], page);
+
+ ClearPageError(page);
+ if (!PagePrivate(page)) {
+ /*
+ * impossible to be !PagePrivate(page) for
+ * the current restriction as well if
+ * the page is already in compressed_pages[].
+ */
+ DBG_BUGON(!justfound);
+
+ justfound = 0;
+ set_page_private(page, (unsigned long)pcl);
+ SetPagePrivate(page);
+ }
+
+ /* no need to submit io if it is already up-to-date */
+ if (PageUptodate(page)) {
+ unlock_page(page);
+ page = NULL;
+ }
+ goto out;
+ }
+
+ /*
+ * the managed page has been truncated, it's unsafe to
+ * reuse this one, let's allocate a new cache-managed page.
+ */
+ DBG_BUGON(page->mapping);
+ DBG_BUGON(!justfound);
+
+ tocache = true;
+ unlock_page(page);
+ put_page(page);
+out_allocpage:
+ page = __stagingpage_alloc(pagepool, gfp);
+ if (oldpage != cmpxchg(&pcl->compressed_pages[nr], oldpage, page)) {
+ list_add(&page->lru, pagepool);
+ cpu_relax();
+ goto repeat;
+ }
+ if (nocache || !tocache)
+ goto out;
+ if (add_to_page_cache_lru(page, mc, index + nr, gfp)) {
+ page->mapping = Z_EROFS_MAPPING_STAGING;
+ goto out;
+ }
+
+ set_page_private(page, (unsigned long)pcl);
+ SetPagePrivate(page);
+out: /* the only exit (for tracing and debugging) */
+ return page;
+}
+
+static struct z_erofs_unzip_io *jobqueue_init(struct super_block *sb,
+ struct z_erofs_unzip_io *io,
+ bool foreground)
+{
+ struct z_erofs_unzip_io_sb *iosb;
+
+ if (foreground) {
+ /* waitqueue available for foreground io */
+ DBG_BUGON(!io);
+
+ init_waitqueue_head(&io->u.wait);
+ atomic_set(&io->pending_bios, 0);
+ goto out;
+ }
+
+ iosb = kvzalloc(sizeof(*iosb), GFP_KERNEL | __GFP_NOFAIL);
+ DBG_BUGON(!iosb);
+
+ /* initialize fields in the allocated descriptor */
+ io = &iosb->io;
+ iosb->sb = sb;
+ INIT_WORK(&io->u.work, z_erofs_vle_unzip_wq);
+out:
+ io->head = Z_EROFS_PCLUSTER_TAIL_CLOSED;
+ return io;
+}
+
+/* define decompression jobqueue types */
+enum {
+ JQ_BYPASS,
+ JQ_SUBMIT,
+ NR_JOBQUEUES,
+};
+
+static void *jobqueueset_init(struct super_block *sb,
+ z_erofs_next_pcluster_t qtail[],
+ struct z_erofs_unzip_io *q[],
+ struct z_erofs_unzip_io *fgq,
+ bool forcefg)
+{
+ /*
+ * if managed cache is enabled, bypass jobqueue is needed,
+ * no need to read from device for all pclusters in this queue.
+ */
+ q[JQ_BYPASS] = jobqueue_init(sb, fgq + JQ_BYPASS, true);
+ qtail[JQ_BYPASS] = &q[JQ_BYPASS]->head;
+
+ q[JQ_SUBMIT] = jobqueue_init(sb, fgq + JQ_SUBMIT, forcefg);
+ qtail[JQ_SUBMIT] = &q[JQ_SUBMIT]->head;
+
+ return tagptr_cast_ptr(tagptr_fold(tagptr1_t, q[JQ_SUBMIT], !forcefg));
+}
+
+static void move_to_bypass_jobqueue(struct z_erofs_pcluster *pcl,
+ z_erofs_next_pcluster_t qtail[],
+ z_erofs_next_pcluster_t owned_head)
+{
+ z_erofs_next_pcluster_t *const submit_qtail = qtail[JQ_SUBMIT];
+ z_erofs_next_pcluster_t *const bypass_qtail = qtail[JQ_BYPASS];
+
+ DBG_BUGON(owned_head == Z_EROFS_PCLUSTER_TAIL_CLOSED);
+ if (owned_head == Z_EROFS_PCLUSTER_TAIL)
+ owned_head = Z_EROFS_PCLUSTER_TAIL_CLOSED;
+
+ WRITE_ONCE(pcl->next, Z_EROFS_PCLUSTER_TAIL_CLOSED);
+
+ WRITE_ONCE(*submit_qtail, owned_head);
+ WRITE_ONCE(*bypass_qtail, &pcl->next);
+
+ qtail[JQ_BYPASS] = &pcl->next;
+}
+
+static bool postsubmit_is_all_bypassed(struct z_erofs_unzip_io *q[],
+ unsigned int nr_bios,
+ bool force_fg)
+{
+ /*
+ * although background is preferred, no one is pending for submission.
+ * don't issue workqueue for decompression but drop it directly instead.
+ */
+ if (force_fg || nr_bios)
+ return false;
+
+ kvfree(container_of(q[JQ_SUBMIT], struct z_erofs_unzip_io_sb, io));
+ return true;
+}
+
+static bool z_erofs_vle_submit_all(struct super_block *sb,
+ z_erofs_next_pcluster_t owned_head,
+ struct list_head *pagepool,
+ struct z_erofs_unzip_io *fgq,
+ bool force_fg)
+{
+ struct erofs_sb_info *const sbi __maybe_unused = EROFS_SB(sb);
+ z_erofs_next_pcluster_t qtail[NR_JOBQUEUES];
+ struct z_erofs_unzip_io *q[NR_JOBQUEUES];
+ struct bio *bio;
+ void *bi_private;
+ /* since bio will be NULL, no need to initialize last_index */
+ pgoff_t uninitialized_var(last_index);
+ bool force_submit = false;
+ unsigned int nr_bios;
+
+ if (owned_head == Z_EROFS_PCLUSTER_TAIL)
+ return false;
+
+ force_submit = false;
+ bio = NULL;
+ nr_bios = 0;
+ bi_private = jobqueueset_init(sb, qtail, q, fgq, force_fg);
+
+ /* by default, all need io submission */
+ q[JQ_SUBMIT]->head = owned_head;
+
+ do {
+ struct z_erofs_pcluster *pcl;
+ unsigned int clusterpages;
+ pgoff_t first_index;
+ struct page *page;
+ unsigned int i = 0, bypass = 0;
+ int err;
+
+ /* no possible 'owned_head' equals the following */
+ DBG_BUGON(owned_head == Z_EROFS_PCLUSTER_TAIL_CLOSED);
+ DBG_BUGON(owned_head == Z_EROFS_PCLUSTER_NIL);
+
+ pcl = container_of(owned_head, struct z_erofs_pcluster, next);
+
+ clusterpages = BIT(pcl->clusterbits);
+
+ /* close the main owned chain at first */
+ owned_head = cmpxchg(&pcl->next, Z_EROFS_PCLUSTER_TAIL,
+ Z_EROFS_PCLUSTER_TAIL_CLOSED);
+
+ first_index = pcl->obj.index;
+ force_submit |= (first_index != last_index + 1);
+
+repeat:
+ page = pickup_page_for_submission(pcl, i, pagepool,
+ MNGD_MAPPING(sbi),
+ GFP_NOFS);
+ if (!page) {
+ force_submit = true;
+ ++bypass;
+ goto skippage;
+ }
+
+ if (bio && force_submit) {
+submit_bio_retry:
+ submit_bio(bio);
+ bio = NULL;
+ }
+
+ if (!bio) {
+ bio = bio_alloc(GFP_NOIO, BIO_MAX_PAGES);
+
+ bio->bi_end_io = z_erofs_vle_read_endio;
+ bio_set_dev(bio, sb->s_bdev);
+ bio->bi_iter.bi_sector = (sector_t)(first_index + i) <<
+ LOG_SECTORS_PER_BLOCK;
+ bio->bi_private = bi_private;
+ bio->bi_opf = REQ_OP_READ;
+
+ ++nr_bios;
+ }
+
+ err = bio_add_page(bio, page, PAGE_SIZE, 0);
+ if (err < PAGE_SIZE)
+ goto submit_bio_retry;
+
+ force_submit = false;
+ last_index = first_index + i;
+skippage:
+ if (++i < clusterpages)
+ goto repeat;
+
+ if (bypass < clusterpages)
+ qtail[JQ_SUBMIT] = &pcl->next;
+ else
+ move_to_bypass_jobqueue(pcl, qtail, owned_head);
+ } while (owned_head != Z_EROFS_PCLUSTER_TAIL);
+
+ if (bio)
+ submit_bio(bio);
+
+ if (postsubmit_is_all_bypassed(q, nr_bios, force_fg))
+ return true;
+
+ z_erofs_vle_unzip_kickoff(bi_private, nr_bios);
+ return true;
+}
+
+static void z_erofs_submit_and_unzip(struct super_block *sb,
+ struct z_erofs_collector *clt,
+ struct list_head *pagepool,
+ bool force_fg)
+{
+ struct z_erofs_unzip_io io[NR_JOBQUEUES];
+
+ if (!z_erofs_vle_submit_all(sb, clt->owned_head,
+ pagepool, io, force_fg))
+ return;
+
+ /* decompress no I/O pclusters immediately */
+ z_erofs_vle_unzip_all(sb, &io[JQ_BYPASS], pagepool);
+
+ if (!force_fg)
+ return;
+
+ /* wait until all bios are completed */
+ wait_event(io[JQ_SUBMIT].u.wait,
+ !atomic_read(&io[JQ_SUBMIT].pending_bios));
+
+ /* let's synchronous decompression */
+ z_erofs_vle_unzip_all(sb, &io[JQ_SUBMIT], pagepool);
+}
+
+static int z_erofs_vle_normalaccess_readpage(struct file *file,
+ struct page *page)
+{
+ struct inode *const inode = page->mapping->host;
+ struct z_erofs_decompress_frontend f = DECOMPRESS_FRONTEND_INIT(inode);
+ int err;
+ LIST_HEAD(pagepool);
+
+ trace_erofs_readpage(page, false);
+
+ f.headoffset = (erofs_off_t)page->index << PAGE_SHIFT;
+
+ err = z_erofs_do_read_page(&f, page, &pagepool);
+ (void)z_erofs_collector_end(&f.clt);
+
+ /* if some compressed cluster ready, need submit them anyway */
+ z_erofs_submit_and_unzip(inode->i_sb, &f.clt, &pagepool, true);
+
+ if (err)
+ erofs_err(inode->i_sb, "failed to read, err [%d]", err);
+
+ if (f.map.mpage)
+ put_page(f.map.mpage);
+
+ /* clean up the remaining free pages */
+ put_pages_list(&pagepool);
+ return err;
+}
+
+static bool should_decompress_synchronously(struct erofs_sb_info *sbi,
+ unsigned int nr)
+{
+ return nr <= sbi->max_sync_decompress_pages;
+}
+
+static int z_erofs_vle_normalaccess_readpages(struct file *filp,
+ struct address_space *mapping,
+ struct list_head *pages,
+ unsigned int nr_pages)
+{
+ struct inode *const inode = mapping->host;
+ struct erofs_sb_info *const sbi = EROFS_I_SB(inode);
+
+ bool sync = should_decompress_synchronously(sbi, nr_pages);
+ struct z_erofs_decompress_frontend f = DECOMPRESS_FRONTEND_INIT(inode);
+ gfp_t gfp = mapping_gfp_constraint(mapping, GFP_KERNEL);
+ struct page *head = NULL;
+ LIST_HEAD(pagepool);
+
+ trace_erofs_readpages(mapping->host, lru_to_page(pages),
+ nr_pages, false);
+
+ f.headoffset = (erofs_off_t)lru_to_page(pages)->index << PAGE_SHIFT;
+
+ for (; nr_pages; --nr_pages) {
+ struct page *page = lru_to_page(pages);
+
+ prefetchw(&page->flags);
+ list_del(&page->lru);
+
+ /*
+ * A pure asynchronous readahead is indicated if
+ * a PG_readahead marked page is hitted at first.
+ * Let's also do asynchronous decompression for this case.
+ */
+ sync &= !(PageReadahead(page) && !head);
+
+ if (add_to_page_cache_lru(page, mapping, page->index, gfp)) {
+ list_add(&page->lru, &pagepool);
+ continue;
+ }
+
+ set_page_private(page, (unsigned long)head);
+ head = page;
+ }
+
+ while (head) {
+ struct page *page = head;
+ int err;
+
+ /* traversal in reverse order */
+ head = (void *)page_private(page);
+
+ err = z_erofs_do_read_page(&f, page, &pagepool);
+ if (err)
+ erofs_err(inode->i_sb,
+ "readahead error at page %lu @ nid %llu",
+ page->index, EROFS_I(inode)->nid);
+ put_page(page);
+ }
+
+ (void)z_erofs_collector_end(&f.clt);
+
+ z_erofs_submit_and_unzip(inode->i_sb, &f.clt, &pagepool, sync);
+
+ if (f.map.mpage)
+ put_page(f.map.mpage);
+
+ /* clean up the remaining free pages */
+ put_pages_list(&pagepool);
+ return 0;
+}
+
+const struct address_space_operations z_erofs_vle_normalaccess_aops = {
+ .readpage = z_erofs_vle_normalaccess_readpage,
+ .readpages = z_erofs_vle_normalaccess_readpages,
+};
+
diff --git a/fs/erofs/zdata.h b/fs/erofs/zdata.h
new file mode 100644
index 000000000000..faf950189bd7
--- /dev/null
+++ b/fs/erofs/zdata.h
@@ -0,0 +1,193 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (C) 2018 HUAWEI, Inc.
+ * http://www.huawei.com/
+ * Created by Gao Xiang <gaoxiang25@huawei.com>
+ */
+#ifndef __EROFS_FS_ZDATA_H
+#define __EROFS_FS_ZDATA_H
+
+#include "internal.h"
+#include "zpvec.h"
+
+#define Z_EROFS_NR_INLINE_PAGEVECS 3
+
+/*
+ * Structure fields follow one of the following exclusion rules.
+ *
+ * I: Modifiable by initialization/destruction paths and read-only
+ * for everyone else;
+ *
+ * L: Field should be protected by pageset lock;
+ *
+ * A: Field should be accessed / updated in atomic for parallelized code.
+ */
+struct z_erofs_collection {
+ struct mutex lock;
+
+ /* I: page offset of start position of decompression */
+ unsigned short pageofs;
+
+ /* L: maximum relative page index in pagevec[] */
+ unsigned short nr_pages;
+
+ /* L: total number of pages in pagevec[] */
+ unsigned int vcnt;
+
+ union {
+ /* L: inline a certain number of pagevecs for bootstrap */
+ erofs_vtptr_t pagevec[Z_EROFS_NR_INLINE_PAGEVECS];
+
+ /* I: can be used to free the pcluster by RCU. */
+ struct rcu_head rcu;
+ };
+};
+
+#define Z_EROFS_PCLUSTER_FULL_LENGTH 0x00000001
+#define Z_EROFS_PCLUSTER_LENGTH_BIT 1
+
+/*
+ * let's leave a type here in case of introducing
+ * another tagged pointer later.
+ */
+typedef void *z_erofs_next_pcluster_t;
+
+struct z_erofs_pcluster {
+ struct erofs_workgroup obj;
+ struct z_erofs_collection primary_collection;
+
+ /* A: point to next chained pcluster or TAILs */
+ z_erofs_next_pcluster_t next;
+
+ /* A: compressed pages (including multi-usage pages) */
+ struct page *compressed_pages[Z_EROFS_CLUSTER_MAX_PAGES];
+
+ /* A: lower limit of decompressed length and if full length or not */
+ unsigned int length;
+
+ /* I: compression algorithm format */
+ unsigned char algorithmformat;
+ /* I: bit shift of physical cluster size */
+ unsigned char clusterbits;
+};
+
+#define z_erofs_primarycollection(pcluster) (&(pcluster)->primary_collection)
+
+/* let's avoid the valid 32-bit kernel addresses */
+
+/* the chained workgroup has't submitted io (still open) */
+#define Z_EROFS_PCLUSTER_TAIL ((void *)0x5F0ECAFE)
+/* the chained workgroup has already submitted io */
+#define Z_EROFS_PCLUSTER_TAIL_CLOSED ((void *)0x5F0EDEAD)
+
+#define Z_EROFS_PCLUSTER_NIL (NULL)
+
+#define Z_EROFS_WORKGROUP_SIZE sizeof(struct z_erofs_pcluster)
+
+struct z_erofs_unzip_io {
+ atomic_t pending_bios;
+ z_erofs_next_pcluster_t head;
+
+ union {
+ wait_queue_head_t wait;
+ struct work_struct work;
+ } u;
+};
+
+struct z_erofs_unzip_io_sb {
+ struct z_erofs_unzip_io io;
+ struct super_block *sb;
+};
+
+#define MNGD_MAPPING(sbi) ((sbi)->managed_cache->i_mapping)
+static inline bool erofs_page_is_managed(const struct erofs_sb_info *sbi,
+ struct page *page)
+{
+ return page->mapping == MNGD_MAPPING(sbi);
+}
+
+#define Z_EROFS_ONLINEPAGE_COUNT_BITS 2
+#define Z_EROFS_ONLINEPAGE_COUNT_MASK ((1 << Z_EROFS_ONLINEPAGE_COUNT_BITS) - 1)
+#define Z_EROFS_ONLINEPAGE_INDEX_SHIFT (Z_EROFS_ONLINEPAGE_COUNT_BITS)
+
+/*
+ * waiters (aka. ongoing_packs): # to unlock the page
+ * sub-index: 0 - for partial page, >= 1 full page sub-index
+ */
+typedef atomic_t z_erofs_onlinepage_t;
+
+/* type punning */
+union z_erofs_onlinepage_converter {
+ z_erofs_onlinepage_t *o;
+ unsigned long *v;
+};
+
+static inline unsigned int z_erofs_onlinepage_index(struct page *page)
+{
+ union z_erofs_onlinepage_converter u;
+
+ DBG_BUGON(!PagePrivate(page));
+ u.v = &page_private(page);
+
+ return atomic_read(u.o) >> Z_EROFS_ONLINEPAGE_INDEX_SHIFT;
+}
+
+static inline void z_erofs_onlinepage_init(struct page *page)
+{
+ union {
+ z_erofs_onlinepage_t o;
+ unsigned long v;
+ /* keep from being unlocked in advance */
+ } u = { .o = ATOMIC_INIT(1) };
+
+ set_page_private(page, u.v);
+ smp_wmb();
+ SetPagePrivate(page);
+}
+
+static inline void z_erofs_onlinepage_fixup(struct page *page,
+ uintptr_t index, bool down)
+{
+ unsigned long *p, o, v, id;
+repeat:
+ p = &page_private(page);
+ o = READ_ONCE(*p);
+
+ id = o >> Z_EROFS_ONLINEPAGE_INDEX_SHIFT;
+ if (id) {
+ if (!index)
+ return;
+
+ DBG_BUGON(id != index);
+ }
+
+ v = (index << Z_EROFS_ONLINEPAGE_INDEX_SHIFT) |
+ ((o & Z_EROFS_ONLINEPAGE_COUNT_MASK) + (unsigned int)down);
+ if (cmpxchg(p, o, v) != o)
+ goto repeat;
+}
+
+static inline void z_erofs_onlinepage_endio(struct page *page)
+{
+ union z_erofs_onlinepage_converter u;
+ unsigned int v;
+
+ DBG_BUGON(!PagePrivate(page));
+ u.v = &page_private(page);
+
+ v = atomic_dec_return(u.o);
+ if (!(v & Z_EROFS_ONLINEPAGE_COUNT_MASK)) {
+ ClearPagePrivate(page);
+ if (!PageError(page))
+ SetPageUptodate(page);
+ unlock_page(page);
+ }
+ erofs_dbg("%s, page %p value %x", __func__, page, atomic_read(u.o));
+}
+
+#define Z_EROFS_VMAP_ONSTACK_PAGES \
+ min_t(unsigned int, THREAD_SIZE / 8 / sizeof(struct page *), 96U)
+#define Z_EROFS_VMAP_GLOBAL_PAGES 2048
+
+#endif
+
diff --git a/fs/erofs/zmap.c b/fs/erofs/zmap.c
new file mode 100644
index 000000000000..6a26c293ae2d
--- /dev/null
+++ b/fs/erofs/zmap.c
@@ -0,0 +1,471 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2018-2019 HUAWEI, Inc.
+ * http://www.huawei.com/
+ * Created by Gao Xiang <gaoxiang25@huawei.com>
+ */
+#include "internal.h"
+#include <asm/unaligned.h>
+#include <trace/events/erofs.h>
+
+int z_erofs_fill_inode(struct inode *inode)
+{
+ struct erofs_inode *const vi = EROFS_I(inode);
+
+ if (vi->datalayout == EROFS_INODE_FLAT_COMPRESSION_LEGACY) {
+ vi->z_advise = 0;
+ vi->z_algorithmtype[0] = 0;
+ vi->z_algorithmtype[1] = 0;
+ vi->z_logical_clusterbits = LOG_BLOCK_SIZE;
+ vi->z_physical_clusterbits[0] = vi->z_logical_clusterbits;
+ vi->z_physical_clusterbits[1] = vi->z_logical_clusterbits;
+ set_bit(EROFS_I_Z_INITED_BIT, &vi->flags);
+ }
+
+ inode->i_mapping->a_ops = &z_erofs_vle_normalaccess_aops;
+ return 0;
+}
+
+static int fill_inode_lazy(struct inode *inode)
+{
+ struct erofs_inode *const vi = EROFS_I(inode);
+ struct super_block *const sb = inode->i_sb;
+ int err;
+ erofs_off_t pos;
+ struct page *page;
+ void *kaddr;
+ struct z_erofs_map_header *h;
+
+ if (test_bit(EROFS_I_Z_INITED_BIT, &vi->flags))
+ return 0;
+
+ if (wait_on_bit_lock(&vi->flags, EROFS_I_BL_Z_BIT, TASK_KILLABLE))
+ return -ERESTARTSYS;
+
+ err = 0;
+ if (test_bit(EROFS_I_Z_INITED_BIT, &vi->flags))
+ goto out_unlock;
+
+ DBG_BUGON(vi->datalayout == EROFS_INODE_FLAT_COMPRESSION_LEGACY);
+
+ pos = ALIGN(iloc(EROFS_SB(sb), vi->nid) + vi->inode_isize +
+ vi->xattr_isize, 8);
+ page = erofs_get_meta_page(sb, erofs_blknr(pos));
+ if (IS_ERR(page)) {
+ err = PTR_ERR(page);
+ goto out_unlock;
+ }
+
+ kaddr = kmap_atomic(page);
+
+ h = kaddr + erofs_blkoff(pos);
+ vi->z_advise = le16_to_cpu(h->h_advise);
+ vi->z_algorithmtype[0] = h->h_algorithmtype & 15;
+ vi->z_algorithmtype[1] = h->h_algorithmtype >> 4;
+
+ if (vi->z_algorithmtype[0] >= Z_EROFS_COMPRESSION_MAX) {
+ erofs_err(sb, "unknown compression format %u for nid %llu, please upgrade kernel",
+ vi->z_algorithmtype[0], vi->nid);
+ err = -EOPNOTSUPP;
+ goto unmap_done;
+ }
+
+ vi->z_logical_clusterbits = LOG_BLOCK_SIZE + (h->h_clusterbits & 7);
+ vi->z_physical_clusterbits[0] = vi->z_logical_clusterbits +
+ ((h->h_clusterbits >> 3) & 3);
+
+ if (vi->z_physical_clusterbits[0] != LOG_BLOCK_SIZE) {
+ erofs_err(sb, "unsupported physical clusterbits %u for nid %llu, please upgrade kernel",
+ vi->z_physical_clusterbits[0], vi->nid);
+ err = -EOPNOTSUPP;
+ goto unmap_done;
+ }
+
+ vi->z_physical_clusterbits[1] = vi->z_logical_clusterbits +
+ ((h->h_clusterbits >> 5) & 7);
+ set_bit(EROFS_I_Z_INITED_BIT, &vi->flags);
+unmap_done:
+ kunmap_atomic(kaddr);
+ unlock_page(page);
+ put_page(page);
+out_unlock:
+ clear_and_wake_up_bit(EROFS_I_BL_Z_BIT, &vi->flags);
+ return err;
+}
+
+struct z_erofs_maprecorder {
+ struct inode *inode;
+ struct erofs_map_blocks *map;
+ void *kaddr;
+
+ unsigned long lcn;
+ /* compression extent information gathered */
+ u8 type;
+ u16 clusterofs;
+ u16 delta[2];
+ erofs_blk_t pblk;
+};
+
+static int z_erofs_reload_indexes(struct z_erofs_maprecorder *m,
+ erofs_blk_t eblk)
+{
+ struct super_block *const sb = m->inode->i_sb;
+ struct erofs_map_blocks *const map = m->map;
+ struct page *mpage = map->mpage;
+
+ if (mpage) {
+ if (mpage->index == eblk) {
+ if (!m->kaddr)
+ m->kaddr = kmap_atomic(mpage);
+ return 0;
+ }
+
+ if (m->kaddr) {
+ kunmap_atomic(m->kaddr);
+ m->kaddr = NULL;
+ }
+ put_page(mpage);
+ }
+
+ mpage = erofs_get_meta_page(sb, eblk);
+ if (IS_ERR(mpage)) {
+ map->mpage = NULL;
+ return PTR_ERR(mpage);
+ }
+ m->kaddr = kmap_atomic(mpage);
+ unlock_page(mpage);
+ map->mpage = mpage;
+ return 0;
+}
+
+static int vle_legacy_load_cluster_from_disk(struct z_erofs_maprecorder *m,
+ unsigned long lcn)
+{
+ struct inode *const inode = m->inode;
+ struct erofs_inode *const vi = EROFS_I(inode);
+ const erofs_off_t ibase = iloc(EROFS_I_SB(inode), vi->nid);
+ const erofs_off_t pos =
+ Z_EROFS_VLE_LEGACY_INDEX_ALIGN(ibase + vi->inode_isize +
+ vi->xattr_isize) +
+ lcn * sizeof(struct z_erofs_vle_decompressed_index);
+ struct z_erofs_vle_decompressed_index *di;
+ unsigned int advise, type;
+ int err;
+
+ err = z_erofs_reload_indexes(m, erofs_blknr(pos));
+ if (err)
+ return err;
+
+ m->lcn = lcn;
+ di = m->kaddr + erofs_blkoff(pos);
+
+ advise = le16_to_cpu(di->di_advise);
+ type = (advise >> Z_EROFS_VLE_DI_CLUSTER_TYPE_BIT) &
+ ((1 << Z_EROFS_VLE_DI_CLUSTER_TYPE_BITS) - 1);
+ switch (type) {
+ case Z_EROFS_VLE_CLUSTER_TYPE_NONHEAD:
+ m->clusterofs = 1 << vi->z_logical_clusterbits;
+ m->delta[0] = le16_to_cpu(di->di_u.delta[0]);
+ m->delta[1] = le16_to_cpu(di->di_u.delta[1]);
+ break;
+ case Z_EROFS_VLE_CLUSTER_TYPE_PLAIN:
+ case Z_EROFS_VLE_CLUSTER_TYPE_HEAD:
+ m->clusterofs = le16_to_cpu(di->di_clusterofs);
+ m->pblk = le32_to_cpu(di->di_u.blkaddr);
+ break;
+ default:
+ DBG_BUGON(1);
+ return -EOPNOTSUPP;
+ }
+ m->type = type;
+ return 0;
+}
+
+static unsigned int decode_compactedbits(unsigned int lobits,
+ unsigned int lomask,
+ u8 *in, unsigned int pos, u8 *type)
+{
+ const unsigned int v = get_unaligned_le32(in + pos / 8) >> (pos & 7);
+ const unsigned int lo = v & lomask;
+
+ *type = (v >> lobits) & 3;
+ return lo;
+}
+
+static int unpack_compacted_index(struct z_erofs_maprecorder *m,
+ unsigned int amortizedshift,
+ unsigned int eofs)
+{
+ struct erofs_inode *const vi = EROFS_I(m->inode);
+ const unsigned int lclusterbits = vi->z_logical_clusterbits;
+ const unsigned int lomask = (1 << lclusterbits) - 1;
+ unsigned int vcnt, base, lo, encodebits, nblk;
+ int i;
+ u8 *in, type;
+
+ if (1 << amortizedshift == 4)
+ vcnt = 2;
+ else if (1 << amortizedshift == 2 && lclusterbits == 12)
+ vcnt = 16;
+ else
+ return -EOPNOTSUPP;
+
+ encodebits = ((vcnt << amortizedshift) - sizeof(__le32)) * 8 / vcnt;
+ base = round_down(eofs, vcnt << amortizedshift);
+ in = m->kaddr + base;
+
+ i = (eofs - base) >> amortizedshift;
+
+ lo = decode_compactedbits(lclusterbits, lomask,
+ in, encodebits * i, &type);
+ m->type = type;
+ if (type == Z_EROFS_VLE_CLUSTER_TYPE_NONHEAD) {
+ m->clusterofs = 1 << lclusterbits;
+ if (i + 1 != vcnt) {
+ m->delta[0] = lo;
+ return 0;
+ }
+ /*
+ * since the last lcluster in the pack is special,
+ * of which lo saves delta[1] rather than delta[0].
+ * Hence, get delta[0] by the previous lcluster indirectly.
+ */
+ lo = decode_compactedbits(lclusterbits, lomask,
+ in, encodebits * (i - 1), &type);
+ if (type != Z_EROFS_VLE_CLUSTER_TYPE_NONHEAD)
+ lo = 0;
+ m->delta[0] = lo + 1;
+ return 0;
+ }
+ m->clusterofs = lo;
+ m->delta[0] = 0;
+ /* figout out blkaddr (pblk) for HEAD lclusters */
+ nblk = 1;
+ while (i > 0) {
+ --i;
+ lo = decode_compactedbits(lclusterbits, lomask,
+ in, encodebits * i, &type);
+ if (type == Z_EROFS_VLE_CLUSTER_TYPE_NONHEAD)
+ i -= lo;
+
+ if (i >= 0)
+ ++nblk;
+ }
+ in += (vcnt << amortizedshift) - sizeof(__le32);
+ m->pblk = le32_to_cpu(*(__le32 *)in) + nblk;
+ return 0;
+}
+
+static int compacted_load_cluster_from_disk(struct z_erofs_maprecorder *m,
+ unsigned long lcn)
+{
+ struct inode *const inode = m->inode;
+ struct erofs_inode *const vi = EROFS_I(inode);
+ const unsigned int lclusterbits = vi->z_logical_clusterbits;
+ const erofs_off_t ebase = ALIGN(iloc(EROFS_I_SB(inode), vi->nid) +
+ vi->inode_isize + vi->xattr_isize, 8) +
+ sizeof(struct z_erofs_map_header);
+ const unsigned int totalidx = DIV_ROUND_UP(inode->i_size, EROFS_BLKSIZ);
+ unsigned int compacted_4b_initial, compacted_2b;
+ unsigned int amortizedshift;
+ erofs_off_t pos;
+ int err;
+
+ if (lclusterbits != 12)
+ return -EOPNOTSUPP;
+
+ if (lcn >= totalidx)
+ return -EINVAL;
+
+ m->lcn = lcn;
+ /* used to align to 32-byte (compacted_2b) alignment */
+ compacted_4b_initial = (32 - ebase % 32) / 4;
+ if (compacted_4b_initial == 32 / 4)
+ compacted_4b_initial = 0;
+
+ if (vi->z_advise & Z_EROFS_ADVISE_COMPACTED_2B)
+ compacted_2b = rounddown(totalidx - compacted_4b_initial, 16);
+ else
+ compacted_2b = 0;
+
+ pos = ebase;
+ if (lcn < compacted_4b_initial) {
+ amortizedshift = 2;
+ goto out;
+ }
+ pos += compacted_4b_initial * 4;
+ lcn -= compacted_4b_initial;
+
+ if (lcn < compacted_2b) {
+ amortizedshift = 1;
+ goto out;
+ }
+ pos += compacted_2b * 2;
+ lcn -= compacted_2b;
+ amortizedshift = 2;
+out:
+ pos += lcn * (1 << amortizedshift);
+ err = z_erofs_reload_indexes(m, erofs_blknr(pos));
+ if (err)
+ return err;
+ return unpack_compacted_index(m, amortizedshift, erofs_blkoff(pos));
+}
+
+static int vle_load_cluster_from_disk(struct z_erofs_maprecorder *m,
+ unsigned int lcn)
+{
+ const unsigned int datamode = EROFS_I(m->inode)->datalayout;
+
+ if (datamode == EROFS_INODE_FLAT_COMPRESSION_LEGACY)
+ return vle_legacy_load_cluster_from_disk(m, lcn);
+
+ if (datamode == EROFS_INODE_FLAT_COMPRESSION)
+ return compacted_load_cluster_from_disk(m, lcn);
+
+ return -EINVAL;
+}
+
+static int vle_extent_lookback(struct z_erofs_maprecorder *m,
+ unsigned int lookback_distance)
+{
+ struct erofs_inode *const vi = EROFS_I(m->inode);
+ struct erofs_map_blocks *const map = m->map;
+ const unsigned int lclusterbits = vi->z_logical_clusterbits;
+ unsigned long lcn = m->lcn;
+ int err;
+
+ if (lcn < lookback_distance) {
+ erofs_err(m->inode->i_sb,
+ "bogus lookback distance @ nid %llu", vi->nid);
+ DBG_BUGON(1);
+ return -EFSCORRUPTED;
+ }
+
+ /* load extent head logical cluster if needed */
+ lcn -= lookback_distance;
+ err = vle_load_cluster_from_disk(m, lcn);
+ if (err)
+ return err;
+
+ switch (m->type) {
+ case Z_EROFS_VLE_CLUSTER_TYPE_NONHEAD:
+ if (!m->delta[0]) {
+ erofs_err(m->inode->i_sb,
+ "invalid lookback distance 0 @ nid %llu",
+ vi->nid);
+ DBG_BUGON(1);
+ return -EFSCORRUPTED;
+ }
+ return vle_extent_lookback(m, m->delta[0]);
+ case Z_EROFS_VLE_CLUSTER_TYPE_PLAIN:
+ map->m_flags &= ~EROFS_MAP_ZIPPED;
+ /* fallthrough */
+ case Z_EROFS_VLE_CLUSTER_TYPE_HEAD:
+ map->m_la = (lcn << lclusterbits) | m->clusterofs;
+ break;
+ default:
+ erofs_err(m->inode->i_sb,
+ "unknown type %u @ lcn %lu of nid %llu",
+ m->type, lcn, vi->nid);
+ DBG_BUGON(1);
+ return -EOPNOTSUPP;
+ }
+ return 0;
+}
+
+int z_erofs_map_blocks_iter(struct inode *inode,
+ struct erofs_map_blocks *map,
+ int flags)
+{
+ struct erofs_inode *const vi = EROFS_I(inode);
+ struct z_erofs_maprecorder m = {
+ .inode = inode,
+ .map = map,
+ };
+ int err = 0;
+ unsigned int lclusterbits, endoff;
+ unsigned long long ofs, end;
+
+ trace_z_erofs_map_blocks_iter_enter(inode, map, flags);
+
+ /* when trying to read beyond EOF, leave it unmapped */
+ if (map->m_la >= inode->i_size) {
+ map->m_llen = map->m_la + 1 - inode->i_size;
+ map->m_la = inode->i_size;
+ map->m_flags = 0;
+ goto out;
+ }
+
+ err = fill_inode_lazy(inode);
+ if (err)
+ goto out;
+
+ lclusterbits = vi->z_logical_clusterbits;
+ ofs = map->m_la;
+ m.lcn = ofs >> lclusterbits;
+ endoff = ofs & ((1 << lclusterbits) - 1);
+
+ err = vle_load_cluster_from_disk(&m, m.lcn);
+ if (err)
+ goto unmap_out;
+
+ map->m_flags = EROFS_MAP_ZIPPED; /* by default, compressed */
+ end = (m.lcn + 1ULL) << lclusterbits;
+
+ switch (m.type) {
+ case Z_EROFS_VLE_CLUSTER_TYPE_PLAIN:
+ if (endoff >= m.clusterofs)
+ map->m_flags &= ~EROFS_MAP_ZIPPED;
+ /* fallthrough */
+ case Z_EROFS_VLE_CLUSTER_TYPE_HEAD:
+ if (endoff >= m.clusterofs) {
+ map->m_la = (m.lcn << lclusterbits) | m.clusterofs;
+ break;
+ }
+ /* m.lcn should be >= 1 if endoff < m.clusterofs */
+ if (!m.lcn) {
+ erofs_err(inode->i_sb,
+ "invalid logical cluster 0 at nid %llu",
+ vi->nid);
+ err = -EFSCORRUPTED;
+ goto unmap_out;
+ }
+ end = (m.lcn << lclusterbits) | m.clusterofs;
+ map->m_flags |= EROFS_MAP_FULL_MAPPED;
+ m.delta[0] = 1;
+ /* fallthrough */
+ case Z_EROFS_VLE_CLUSTER_TYPE_NONHEAD:
+ /* get the correspoinding first chunk */
+ err = vle_extent_lookback(&m, m.delta[0]);
+ if (err)
+ goto unmap_out;
+ break;
+ default:
+ erofs_err(inode->i_sb,
+ "unknown type %u @ offset %llu of nid %llu",
+ m.type, ofs, vi->nid);
+ err = -EOPNOTSUPP;
+ goto unmap_out;
+ }
+
+ map->m_llen = end - map->m_la;
+ map->m_plen = 1 << lclusterbits;
+ map->m_pa = blknr_to_addr(m.pblk);
+ map->m_flags |= EROFS_MAP_MAPPED;
+
+unmap_out:
+ if (m.kaddr)
+ kunmap_atomic(m.kaddr);
+
+out:
+ erofs_dbg("%s, m_la %llu m_pa %llu m_llen %llu m_plen %llu m_flags 0%o",
+ __func__, map->m_la, map->m_pa,
+ map->m_llen, map->m_plen, map->m_flags);
+
+ trace_z_erofs_map_blocks_iter_exit(inode, map, flags, err);
+
+ /* aggressively BUG_ON iff CONFIG_EROFS_FS_DEBUG is on */
+ DBG_BUGON(err < 0 && err != -ENOMEM);
+ return err;
+}
+
diff --git a/fs/erofs/zpvec.h b/fs/erofs/zpvec.h
new file mode 100644
index 000000000000..58556903aa94
--- /dev/null
+++ b/fs/erofs/zpvec.h
@@ -0,0 +1,157 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (C) 2018 HUAWEI, Inc.
+ * http://www.huawei.com/
+ * Created by Gao Xiang <gaoxiang25@huawei.com>
+ */
+#ifndef __EROFS_FS_ZPVEC_H
+#define __EROFS_FS_ZPVEC_H
+
+#include "tagptr.h"
+
+/* page type in pagevec for decompress subsystem */
+enum z_erofs_page_type {
+ /* including Z_EROFS_VLE_PAGE_TAIL_EXCLUSIVE */
+ Z_EROFS_PAGE_TYPE_EXCLUSIVE,
+
+ Z_EROFS_VLE_PAGE_TYPE_TAIL_SHARED,
+
+ Z_EROFS_VLE_PAGE_TYPE_HEAD,
+ Z_EROFS_VLE_PAGE_TYPE_MAX
+};
+
+extern void __compiletime_error("Z_EROFS_PAGE_TYPE_EXCLUSIVE != 0")
+ __bad_page_type_exclusive(void);
+
+/* pagevec tagged pointer */
+typedef tagptr2_t erofs_vtptr_t;
+
+/* pagevec collector */
+struct z_erofs_pagevec_ctor {
+ struct page *curr, *next;
+ erofs_vtptr_t *pages;
+
+ unsigned int nr, index;
+};
+
+static inline void z_erofs_pagevec_ctor_exit(struct z_erofs_pagevec_ctor *ctor,
+ bool atomic)
+{
+ if (!ctor->curr)
+ return;
+
+ if (atomic)
+ kunmap_atomic(ctor->pages);
+ else
+ kunmap(ctor->curr);
+}
+
+static inline struct page *
+z_erofs_pagevec_ctor_next_page(struct z_erofs_pagevec_ctor *ctor,
+ unsigned int nr)
+{
+ unsigned int index;
+
+ /* keep away from occupied pages */
+ if (ctor->next)
+ return ctor->next;
+
+ for (index = 0; index < nr; ++index) {
+ const erofs_vtptr_t t = ctor->pages[index];
+ const unsigned int tags = tagptr_unfold_tags(t);
+
+ if (tags == Z_EROFS_PAGE_TYPE_EXCLUSIVE)
+ return tagptr_unfold_ptr(t);
+ }
+ DBG_BUGON(nr >= ctor->nr);
+ return NULL;
+}
+
+static inline void
+z_erofs_pagevec_ctor_pagedown(struct z_erofs_pagevec_ctor *ctor,
+ bool atomic)
+{
+ struct page *next = z_erofs_pagevec_ctor_next_page(ctor, ctor->nr);
+
+ z_erofs_pagevec_ctor_exit(ctor, atomic);
+
+ ctor->curr = next;
+ ctor->next = NULL;
+ ctor->pages = atomic ?
+ kmap_atomic(ctor->curr) : kmap(ctor->curr);
+
+ ctor->nr = PAGE_SIZE / sizeof(struct page *);
+ ctor->index = 0;
+}
+
+static inline void z_erofs_pagevec_ctor_init(struct z_erofs_pagevec_ctor *ctor,
+ unsigned int nr,
+ erofs_vtptr_t *pages,
+ unsigned int i)
+{
+ ctor->nr = nr;
+ ctor->curr = ctor->next = NULL;
+ ctor->pages = pages;
+
+ if (i >= nr) {
+ i -= nr;
+ z_erofs_pagevec_ctor_pagedown(ctor, false);
+ while (i > ctor->nr) {
+ i -= ctor->nr;
+ z_erofs_pagevec_ctor_pagedown(ctor, false);
+ }
+ }
+ ctor->next = z_erofs_pagevec_ctor_next_page(ctor, i);
+ ctor->index = i;
+}
+
+static inline bool z_erofs_pagevec_enqueue(struct z_erofs_pagevec_ctor *ctor,
+ struct page *page,
+ enum z_erofs_page_type type,
+ bool *occupied)
+{
+ *occupied = false;
+ if (!ctor->next && type)
+ if (ctor->index + 1 == ctor->nr)
+ return false;
+
+ if (ctor->index >= ctor->nr)
+ z_erofs_pagevec_ctor_pagedown(ctor, false);
+
+ /* exclusive page type must be 0 */
+ if (Z_EROFS_PAGE_TYPE_EXCLUSIVE != (uintptr_t)NULL)
+ __bad_page_type_exclusive();
+
+ /* should remind that collector->next never equal to 1, 2 */
+ if (type == (uintptr_t)ctor->next) {
+ ctor->next = page;
+ *occupied = true;
+ }
+ ctor->pages[ctor->index++] = tagptr_fold(erofs_vtptr_t, page, type);
+ return true;
+}
+
+static inline struct page *
+z_erofs_pagevec_dequeue(struct z_erofs_pagevec_ctor *ctor,
+ enum z_erofs_page_type *type)
+{
+ erofs_vtptr_t t;
+
+ if (ctor->index >= ctor->nr) {
+ DBG_BUGON(!ctor->next);
+ z_erofs_pagevec_ctor_pagedown(ctor, true);
+ }
+
+ t = ctor->pages[ctor->index];
+
+ *type = tagptr_unfold_tags(t);
+
+ /* should remind that collector->next never equal to 1, 2 */
+ if (*type == (uintptr_t)ctor->next)
+ ctor->next = tagptr_unfold_ptr(t);
+
+ ctor->pages[ctor->index++] = tagptr_fold(erofs_vtptr_t, NULL, 0);
+ return tagptr_unfold_ptr(t);
+}
+#endif
+
diff --git a/fs/eventpoll.c b/fs/eventpoll.c
index d7f1f5011fac..c4159bcc05d9 100644
--- a/fs/eventpoll.c
+++ b/fs/eventpoll.c
@@ -1459,13 +1459,13 @@ static int ep_create_wakeup_source(struct epitem *epi)
struct wakeup_source *ws;
if (!epi->ep->ws) {
- epi->ep->ws = wakeup_source_register("eventpoll");
+ epi->ep->ws = wakeup_source_register(NULL, "eventpoll");
if (!epi->ep->ws)
return -ENOMEM;
}
name = epi->ffd.file->f_path.dentry->d_name.name;
- ws = wakeup_source_register(name);
+ ws = wakeup_source_register(NULL, name);
if (!ws)
return -ENOMEM;
diff --git a/fs/exportfs/expfs.c b/fs/exportfs/expfs.c
index f0e549783caf..09bc68708d28 100644
--- a/fs/exportfs/expfs.c
+++ b/fs/exportfs/expfs.c
@@ -7,7 +7,7 @@
* and for mapping back from file handles to dentries.
*
* For details on why we do all the strange and hairy things in here
- * take a look at Documentation/filesystems/nfs/Exporting.
+ * take a look at Documentation/filesystems/nfs/exporting.rst.
*/
#include <linux/exportfs.h>
#include <linux/fs.h>
diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c
index 420fe3deed39..006b7a2070bf 100644
--- a/fs/ext4/inode.c
+++ b/fs/ext4/inode.c
@@ -4586,7 +4586,6 @@ static int __ext4_get_inode_loc(struct inode *inode,
struct buffer_head *bh;
struct super_block *sb = inode->i_sb;
ext4_fsblk_t block;
- struct blk_plug plug;
int inodes_per_block, inode_offset;
iloc->bh = NULL;
@@ -4675,7 +4674,6 @@ make_io:
* If we need to do any I/O, try to pre-readahead extra
* blocks from the inode table.
*/
- blk_start_plug(&plug);
if (EXT4_SB(sb)->s_inode_readahead_blks) {
ext4_fsblk_t b, end, table;
unsigned num;
@@ -4706,7 +4704,6 @@ make_io:
get_bh(bh);
bh->b_end_io = end_buffer_read_sync;
submit_bh(REQ_OP_READ, REQ_META | REQ_PRIO, bh);
- blk_finish_plug(&plug);
wait_on_buffer(bh);
if (!buffer_uptodate(bh)) {
EXT4_ERROR_INODE_BLOCK(inode, block,
diff --git a/fs/fs-writeback.c b/fs/fs-writeback.c
index 542b02d170f8..8aaa7eec7b74 100644
--- a/fs/fs-writeback.c
+++ b/fs/fs-writeback.c
@@ -36,10 +36,6 @@
*/
#define MIN_WRITEBACK_PAGES (4096UL >> (PAGE_SHIFT - 10))
-struct wb_completion {
- atomic_t cnt;
-};
-
/*
* Passed into wb_writeback(), essentially a subset of writeback_control
*/
@@ -61,19 +57,6 @@ struct wb_writeback_work {
};
/*
- * If one wants to wait for one or more wb_writeback_works, each work's
- * ->done should be set to a wb_completion defined using the following
- * macro. Once all work items are issued with wb_queue_work(), the caller
- * can wait for the completion of all using wb_wait_for_completion(). Work
- * items which are waited upon aren't freed automatically on completion.
- */
-#define DEFINE_WB_COMPLETION_ONSTACK(cmpl) \
- struct wb_completion cmpl = { \
- .cnt = ATOMIC_INIT(1), \
- }
-
-
-/*
* If an inode is constantly having its pages dirtied, but then the
* updates stop dirtytime_expire_interval seconds in the past, it's
* possible for the worst case time between when an inode has its
@@ -182,7 +165,7 @@ static void finish_writeback_work(struct bdi_writeback *wb,
if (work->auto_free)
kfree(work);
if (done && atomic_dec_and_test(&done->cnt))
- wake_up_all(&wb->bdi->wb_waitq);
+ wake_up_all(done->waitq);
}
static void wb_queue_work(struct bdi_writeback *wb,
@@ -206,28 +189,44 @@ static void wb_queue_work(struct bdi_writeback *wb,
/**
* wb_wait_for_completion - wait for completion of bdi_writeback_works
- * @bdi: bdi work items were issued to
* @done: target wb_completion
*
* Wait for one or more work items issued to @bdi with their ->done field
- * set to @done, which should have been defined with
- * DEFINE_WB_COMPLETION_ONSTACK(). This function returns after all such
- * work items are completed. Work items which are waited upon aren't freed
+ * set to @done, which should have been initialized with
+ * DEFINE_WB_COMPLETION(). This function returns after all such work items
+ * are completed. Work items which are waited upon aren't freed
* automatically on completion.
*/
-static void wb_wait_for_completion(struct backing_dev_info *bdi,
- struct wb_completion *done)
+void wb_wait_for_completion(struct wb_completion *done)
{
atomic_dec(&done->cnt); /* put down the initial count */
- wait_event(bdi->wb_waitq, !atomic_read(&done->cnt));
+ wait_event(*done->waitq, !atomic_read(&done->cnt));
}
#ifdef CONFIG_CGROUP_WRITEBACK
-/* parameters for foreign inode detection, see wb_detach_inode() */
+/*
+ * Parameters for foreign inode detection, see wbc_detach_inode() to see
+ * how they're used.
+ *
+ * These paramters are inherently heuristical as the detection target
+ * itself is fuzzy. All we want to do is detaching an inode from the
+ * current owner if it's being written to by some other cgroups too much.
+ *
+ * The current cgroup writeback is built on the assumption that multiple
+ * cgroups writing to the same inode concurrently is very rare and a mode
+ * of operation which isn't well supported. As such, the goal is not
+ * taking too long when a different cgroup takes over an inode while
+ * avoiding too aggressive flip-flops from occasional foreign writes.
+ *
+ * We record, very roughly, 2s worth of IO time history and if more than
+ * half of that is foreign, trigger the switch. The recording is quantized
+ * to 16 slots. To avoid tiny writes from swinging the decision too much,
+ * writes smaller than 1/8 of avg size are ignored.
+ */
#define WB_FRN_TIME_SHIFT 13 /* 1s = 2^13, upto 8 secs w/ 16bit */
#define WB_FRN_TIME_AVG_SHIFT 3 /* avg = avg * 7/8 + new * 1/8 */
-#define WB_FRN_TIME_CUT_DIV 2 /* ignore rounds < avg / 2 */
+#define WB_FRN_TIME_CUT_DIV 8 /* ignore rounds < avg / 8 */
#define WB_FRN_TIME_PERIOD (2 * (1 << WB_FRN_TIME_SHIFT)) /* 2s */
#define WB_FRN_HIST_SLOTS 16 /* inode->i_wb_frn_history is 16bit */
@@ -237,6 +236,7 @@ static void wb_wait_for_completion(struct backing_dev_info *bdi,
/* if foreign slots >= 8, switch */
#define WB_FRN_HIST_MAX_SLOTS (WB_FRN_HIST_THR_SLOTS / 2 + 1)
/* one round can affect upto 5 slots */
+#define WB_FRN_MAX_IN_FLIGHT 1024 /* don't queue too many concurrently */
static atomic_t isw_nr_in_flight = ATOMIC_INIT(0);
static struct workqueue_struct *isw_wq;
@@ -389,6 +389,8 @@ static void inode_switch_wbs_work_fn(struct work_struct *work)
if (unlikely(inode->i_state & I_FREEING))
goto skip_switch;
+ trace_inode_switch_wbs(inode, old_wb, new_wb);
+
/*
* Count and transfer stats. Note that PAGECACHE_TAG_DIRTY points
* to possibly dirty pages while PAGECACHE_TAG_WRITEBACK points to
@@ -489,18 +491,13 @@ static void inode_switch_wbs(struct inode *inode, int new_wb_id)
if (inode->i_state & I_WB_SWITCH)
return;
- /*
- * Avoid starting new switches while sync_inodes_sb() is in
- * progress. Otherwise, if the down_write protected issue path
- * blocks heavily, we might end up starting a large number of
- * switches which will block on the rwsem.
- */
- if (!down_read_trylock(&bdi->wb_switch_rwsem))
+ /* avoid queueing a new switch if too many are already in flight */
+ if (atomic_read(&isw_nr_in_flight) > WB_FRN_MAX_IN_FLIGHT)
return;
isw = kzalloc(sizeof(*isw), GFP_ATOMIC);
if (!isw)
- goto out_unlock;
+ return;
/* find and pin the new wb */
rcu_read_lock();
@@ -534,15 +531,12 @@ static void inode_switch_wbs(struct inode *inode, int new_wb_id)
call_rcu(&isw->rcu_head, inode_switch_wbs_rcu_fn);
atomic_inc(&isw_nr_in_flight);
-
- goto out_unlock;
+ return;
out_free:
if (isw->new_wb)
wb_put(isw->new_wb);
kfree(isw);
-out_unlock:
- up_read(&bdi->wb_switch_rwsem);
}
/**
@@ -681,6 +675,9 @@ void wbc_detach_inode(struct writeback_control *wbc)
if (wbc->wb_id != max_id)
history |= (1U << slots) - 1;
+ if (history)
+ trace_inode_foreign_history(inode, wbc, history);
+
/*
* Switch if the current wb isn't the consistent winner.
* If there are multiple closely competing dirtiers, the
@@ -843,7 +840,7 @@ static void bdi_split_work_to_wbs(struct backing_dev_info *bdi,
restart:
rcu_read_lock();
list_for_each_entry_continue_rcu(wb, &bdi->wb_list, bdi_node) {
- DEFINE_WB_COMPLETION_ONSTACK(fallback_work_done);
+ DEFINE_WB_COMPLETION(fallback_work_done, bdi);
struct wb_writeback_work fallback_work;
struct wb_writeback_work *work;
long nr_pages;
@@ -890,7 +887,7 @@ restart:
last_wb = wb;
rcu_read_unlock();
- wb_wait_for_completion(bdi, &fallback_work_done);
+ wb_wait_for_completion(&fallback_work_done);
goto restart;
}
rcu_read_unlock();
@@ -900,6 +897,89 @@ restart:
}
/**
+ * cgroup_writeback_by_id - initiate cgroup writeback from bdi and memcg IDs
+ * @bdi_id: target bdi id
+ * @memcg_id: target memcg css id
+ * @nr_pages: number of pages to write, 0 for best-effort dirty flushing
+ * @reason: reason why some writeback work initiated
+ * @done: target wb_completion
+ *
+ * Initiate flush of the bdi_writeback identified by @bdi_id and @memcg_id
+ * with the specified parameters.
+ */
+int cgroup_writeback_by_id(u64 bdi_id, int memcg_id, unsigned long nr,
+ enum wb_reason reason, struct wb_completion *done)
+{
+ struct backing_dev_info *bdi;
+ struct cgroup_subsys_state *memcg_css;
+ struct bdi_writeback *wb;
+ struct wb_writeback_work *work;
+ int ret;
+
+ /* lookup bdi and memcg */
+ bdi = bdi_get_by_id(bdi_id);
+ if (!bdi)
+ return -ENOENT;
+
+ rcu_read_lock();
+ memcg_css = css_from_id(memcg_id, &memory_cgrp_subsys);
+ if (memcg_css && !css_tryget(memcg_css))
+ memcg_css = NULL;
+ rcu_read_unlock();
+ if (!memcg_css) {
+ ret = -ENOENT;
+ goto out_bdi_put;
+ }
+
+ /*
+ * And find the associated wb. If the wb isn't there already
+ * there's nothing to flush, don't create one.
+ */
+ wb = wb_get_lookup(bdi, memcg_css);
+ if (!wb) {
+ ret = -ENOENT;
+ goto out_css_put;
+ }
+
+ /*
+ * If @nr is zero, the caller is attempting to write out most of
+ * the currently dirty pages. Let's take the current dirty page
+ * count and inflate it by 25% which should be large enough to
+ * flush out most dirty pages while avoiding getting livelocked by
+ * concurrent dirtiers.
+ */
+ if (!nr) {
+ unsigned long filepages, headroom, dirty, writeback;
+
+ mem_cgroup_wb_stats(wb, &filepages, &headroom, &dirty,
+ &writeback);
+ nr = dirty * 10 / 8;
+ }
+
+ /* issue the writeback work */
+ work = kzalloc(sizeof(*work), GFP_NOWAIT | __GFP_NOWARN);
+ if (work) {
+ work->nr_pages = nr;
+ work->sync_mode = WB_SYNC_NONE;
+ work->range_cyclic = 1;
+ work->reason = reason;
+ work->done = done;
+ work->auto_free = 1;
+ wb_queue_work(wb, work);
+ ret = 0;
+ } else {
+ ret = -ENOMEM;
+ }
+
+ wb_put(wb);
+out_css_put:
+ css_put(memcg_css);
+out_bdi_put:
+ bdi_put(bdi);
+ return ret;
+}
+
+/**
* cgroup_writeback_umount - flush inode wb switches for umount
*
* This function is called when a super_block is about to be destroyed and
@@ -2362,7 +2442,8 @@ static void wait_sb_inodes(struct super_block *sb)
static void __writeback_inodes_sb_nr(struct super_block *sb, unsigned long nr,
enum wb_reason reason, bool skip_if_busy)
{
- DEFINE_WB_COMPLETION_ONSTACK(done);
+ struct backing_dev_info *bdi = sb->s_bdi;
+ DEFINE_WB_COMPLETION(done, bdi);
struct wb_writeback_work work = {
.sb = sb,
.sync_mode = WB_SYNC_NONE,
@@ -2371,14 +2452,13 @@ static void __writeback_inodes_sb_nr(struct super_block *sb, unsigned long nr,
.nr_pages = nr,
.reason = reason,
};
- struct backing_dev_info *bdi = sb->s_bdi;
if (!bdi_has_dirty_io(bdi) || bdi == &noop_backing_dev_info)
return;
WARN_ON(!rwsem_is_locked(&sb->s_umount));
bdi_split_work_to_wbs(sb->s_bdi, &work, skip_if_busy);
- wb_wait_for_completion(bdi, &done);
+ wb_wait_for_completion(&done);
}
/**
@@ -2440,7 +2520,8 @@ EXPORT_SYMBOL(try_to_writeback_inodes_sb);
*/
void sync_inodes_sb(struct super_block *sb)
{
- DEFINE_WB_COMPLETION_ONSTACK(done);
+ struct backing_dev_info *bdi = sb->s_bdi;
+ DEFINE_WB_COMPLETION(done, bdi);
struct wb_writeback_work work = {
.sb = sb,
.sync_mode = WB_SYNC_ALL,
@@ -2450,7 +2531,6 @@ void sync_inodes_sb(struct super_block *sb)
.reason = WB_REASON_SYNC,
.for_sync = 1,
};
- struct backing_dev_info *bdi = sb->s_bdi;
/*
* Can't skip on !bdi_has_dirty() because we should wait for !dirty
@@ -2464,7 +2544,7 @@ void sync_inodes_sb(struct super_block *sb)
/* protect against inode wb switch, see inode_switch_wbs_work_fn() */
bdi_down_write_wb_switch_rwsem(bdi);
bdi_split_work_to_wbs(bdi, &work, false);
- wb_wait_for_completion(bdi, &done);
+ wb_wait_for_completion(&done);
bdi_up_write_wb_switch_rwsem(bdi);
wait_sb_inodes(sb);
diff --git a/fs/fs_context.c b/fs/fs_context.c
index 103643c68e3f..87c2c9687d90 100644
--- a/fs/fs_context.c
+++ b/fs/fs_context.c
@@ -279,10 +279,8 @@ static struct fs_context *alloc_fs_context(struct file_system_type *fs_type,
fc->user_ns = get_user_ns(reference->d_sb->s_user_ns);
break;
case FS_CONTEXT_FOR_RECONFIGURE:
- /* We don't pin any namespaces as the superblock's
- * subscriptions cannot be changed at this point.
- */
atomic_inc(&reference->d_sb->s_active);
+ fc->user_ns = get_user_ns(reference->d_sb->s_user_ns);
fc->root = dget(reference);
break;
}
diff --git a/fs/gfs2/bmap.c b/fs/gfs2/bmap.c
index 4df26ef2b2b1..4f8b5fd6c81f 100644
--- a/fs/gfs2/bmap.c
+++ b/fs/gfs2/bmap.c
@@ -390,6 +390,19 @@ static int fillup_metapath(struct gfs2_inode *ip, struct metapath *mp, int h)
return mp->mp_aheight - x - 1;
}
+static sector_t metapath_to_block(struct gfs2_sbd *sdp, struct metapath *mp)
+{
+ sector_t factor = 1, block = 0;
+ int hgt;
+
+ for (hgt = mp->mp_fheight - 1; hgt >= 0; hgt--) {
+ if (hgt < mp->mp_aheight)
+ block += mp->mp_list[hgt] * factor;
+ factor *= sdp->sd_inptrs;
+ }
+ return block;
+}
+
static void release_metapath(struct metapath *mp)
{
int i;
@@ -430,60 +443,84 @@ static inline unsigned int gfs2_extent_length(struct buffer_head *bh, __be64 *pt
return ptr - first;
}
-typedef const __be64 *(*gfs2_metadata_walker)(
- struct metapath *mp,
- const __be64 *start, const __be64 *end,
- u64 factor, void *data);
+enum walker_status { WALK_STOP, WALK_FOLLOW, WALK_CONTINUE };
-#define WALK_STOP ((__be64 *)0)
-#define WALK_NEXT ((__be64 *)1)
+/*
+ * gfs2_metadata_walker - walk an indirect block
+ * @mp: Metapath to indirect block
+ * @ptrs: Number of pointers to look at
+ *
+ * When returning WALK_FOLLOW, the walker must update @mp to point at the right
+ * indirect block to follow.
+ */
+typedef enum walker_status (*gfs2_metadata_walker)(struct metapath *mp,
+ unsigned int ptrs);
-static int gfs2_walk_metadata(struct inode *inode, sector_t lblock,
- u64 len, struct metapath *mp, gfs2_metadata_walker walker,
- void *data)
+/*
+ * gfs2_walk_metadata - walk a tree of indirect blocks
+ * @inode: The inode
+ * @mp: Starting point of walk
+ * @max_len: Maximum number of blocks to walk
+ * @walker: Called during the walk
+ *
+ * Returns 1 if the walk was stopped by @walker, 0 if we went past @max_len or
+ * past the end of metadata, and a negative error code otherwise.
+ */
+
+static int gfs2_walk_metadata(struct inode *inode, struct metapath *mp,
+ u64 max_len, gfs2_metadata_walker walker)
{
- struct metapath clone;
struct gfs2_inode *ip = GFS2_I(inode);
struct gfs2_sbd *sdp = GFS2_SB(inode);
- const __be64 *start, *end, *ptr;
u64 factor = 1;
unsigned int hgt;
- int ret = 0;
+ int ret;
- for (hgt = ip->i_height - 1; hgt >= mp->mp_aheight; hgt--)
+ /*
+ * The walk starts in the lowest allocated indirect block, which may be
+ * before the position indicated by @mp. Adjust @max_len accordingly
+ * to avoid a short walk.
+ */
+ for (hgt = mp->mp_fheight - 1; hgt >= mp->mp_aheight; hgt--) {
+ max_len += mp->mp_list[hgt] * factor;
+ mp->mp_list[hgt] = 0;
factor *= sdp->sd_inptrs;
+ }
for (;;) {
- u64 step;
+ u16 start = mp->mp_list[hgt];
+ enum walker_status status;
+ unsigned int ptrs;
+ u64 len;
/* Walk indirect block. */
- start = metapointer(hgt, mp);
- end = metaend(hgt, mp);
-
- step = (end - start) * factor;
- if (step > len)
- end = start + DIV_ROUND_UP_ULL(len, factor);
-
- ptr = walker(mp, start, end, factor, data);
- if (ptr == WALK_STOP)
+ ptrs = (hgt >= 1 ? sdp->sd_inptrs : sdp->sd_diptrs) - start;
+ len = ptrs * factor;
+ if (len > max_len)
+ ptrs = DIV_ROUND_UP_ULL(max_len, factor);
+ status = walker(mp, ptrs);
+ switch (status) {
+ case WALK_STOP:
+ return 1;
+ case WALK_FOLLOW:
+ BUG_ON(mp->mp_aheight == mp->mp_fheight);
+ ptrs = mp->mp_list[hgt] - start;
+ len = ptrs * factor;
break;
- if (step >= len)
+ case WALK_CONTINUE:
break;
- len -= step;
- if (ptr != WALK_NEXT) {
- BUG_ON(!*ptr);
- mp->mp_list[hgt] += ptr - start;
- goto fill_up_metapath;
}
+ if (len >= max_len)
+ break;
+ max_len -= len;
+ if (status == WALK_FOLLOW)
+ goto fill_up_metapath;
lower_metapath:
/* Decrease height of metapath. */
- if (mp != &clone) {
- clone_metapath(&clone, mp);
- mp = &clone;
- }
brelse(mp->mp_bh[hgt]);
mp->mp_bh[hgt] = NULL;
+ mp->mp_list[hgt] = 0;
if (!hgt)
break;
hgt--;
@@ -491,10 +528,7 @@ lower_metapath:
/* Advance in metadata tree. */
(mp->mp_list[hgt])++;
- start = metapointer(hgt, mp);
- end = metaend(hgt, mp);
- if (start >= end) {
- mp->mp_list[hgt] = 0;
+ if (mp->mp_list[hgt] >= sdp->sd_inptrs) {
if (!hgt)
break;
goto lower_metapath;
@@ -502,44 +536,36 @@ lower_metapath:
fill_up_metapath:
/* Increase height of metapath. */
- if (mp != &clone) {
- clone_metapath(&clone, mp);
- mp = &clone;
- }
ret = fillup_metapath(ip, mp, ip->i_height - 1);
if (ret < 0)
- break;
+ return ret;
hgt += ret;
for (; ret; ret--)
do_div(factor, sdp->sd_inptrs);
mp->mp_aheight = hgt + 1;
}
- if (mp == &clone)
- release_metapath(mp);
- return ret;
+ return 0;
}
-struct gfs2_hole_walker_args {
- u64 blocks;
-};
-
-static const __be64 *gfs2_hole_walker(struct metapath *mp,
- const __be64 *start, const __be64 *end,
- u64 factor, void *data)
+static enum walker_status gfs2_hole_walker(struct metapath *mp,
+ unsigned int ptrs)
{
- struct gfs2_hole_walker_args *args = data;
- const __be64 *ptr;
+ const __be64 *start, *ptr, *end;
+ unsigned int hgt;
+
+ hgt = mp->mp_aheight - 1;
+ start = metapointer(hgt, mp);
+ end = start + ptrs;
for (ptr = start; ptr < end; ptr++) {
if (*ptr) {
- args->blocks += (ptr - start) * factor;
+ mp->mp_list[hgt] += ptr - start;
if (mp->mp_aheight == mp->mp_fheight)
return WALK_STOP;
- return ptr; /* increase height */
+ return WALK_FOLLOW;
}
}
- args->blocks += (end - start) * factor;
- return WALK_NEXT;
+ return WALK_CONTINUE;
}
/**
@@ -557,12 +583,24 @@ static const __be64 *gfs2_hole_walker(struct metapath *mp,
static int gfs2_hole_size(struct inode *inode, sector_t lblock, u64 len,
struct metapath *mp, struct iomap *iomap)
{
- struct gfs2_hole_walker_args args = { };
- int ret = 0;
+ struct metapath clone;
+ u64 hole_size;
+ int ret;
- ret = gfs2_walk_metadata(inode, lblock, len, mp, gfs2_hole_walker, &args);
- if (!ret)
- iomap->length = args.blocks << inode->i_blkbits;
+ clone_metapath(&clone, mp);
+ ret = gfs2_walk_metadata(inode, &clone, len, gfs2_hole_walker);
+ if (ret < 0)
+ goto out;
+
+ if (ret == 1)
+ hole_size = metapath_to_block(GFS2_SB(inode), &clone) - lblock;
+ else
+ hole_size = len;
+ iomap->length = hole_size << inode->i_blkbits;
+ ret = 0;
+
+out:
+ release_metapath(&clone);
return ret;
}
diff --git a/fs/io_uring.c b/fs/io_uring.c
index d542f1cf4428..0dadbdbead0f 100644
--- a/fs/io_uring.c
+++ b/fs/io_uring.c
@@ -75,7 +75,7 @@
#include "internal.h"
-#define IORING_MAX_ENTRIES 4096
+#define IORING_MAX_ENTRIES 32768
#define IORING_MAX_FIXED_FILES 1024
struct io_uring {
@@ -84,27 +84,29 @@ struct io_uring {
};
/*
- * This data is shared with the application through the mmap at offset
- * IORING_OFF_SQ_RING.
+ * This data is shared with the application through the mmap at offsets
+ * IORING_OFF_SQ_RING and IORING_OFF_CQ_RING.
*
* The offsets to the member fields are published through struct
* io_sqring_offsets when calling io_uring_setup.
*/
-struct io_sq_ring {
+struct io_rings {
/*
* Head and tail offsets into the ring; the offsets need to be
* masked to get valid indices.
*
- * The kernel controls head and the application controls tail.
+ * The kernel controls head of the sq ring and the tail of the cq ring,
+ * and the application controls tail of the sq ring and the head of the
+ * cq ring.
*/
- struct io_uring r;
+ struct io_uring sq, cq;
/*
- * Bitmask to apply to head and tail offsets (constant, equals
+ * Bitmasks to apply to head and tail offsets (constant, equals
* ring_entries - 1)
*/
- u32 ring_mask;
- /* Ring size (constant, power of 2) */
- u32 ring_entries;
+ u32 sq_ring_mask, cq_ring_mask;
+ /* Ring sizes (constant, power of 2) */
+ u32 sq_ring_entries, cq_ring_entries;
/*
* Number of invalid entries dropped by the kernel due to
* invalid index stored in array
@@ -117,7 +119,7 @@ struct io_sq_ring {
* counter includes all submissions that were dropped reaching
* the new SQ head (and possibly more).
*/
- u32 dropped;
+ u32 sq_dropped;
/*
* Runtime flags
*
@@ -127,43 +129,7 @@ struct io_sq_ring {
* The application needs a full memory barrier before checking
* for IORING_SQ_NEED_WAKEUP after updating the sq tail.
*/
- u32 flags;
- /*
- * Ring buffer of indices into array of io_uring_sqe, which is
- * mmapped by the application using the IORING_OFF_SQES offset.
- *
- * This indirection could e.g. be used to assign fixed
- * io_uring_sqe entries to operations and only submit them to
- * the queue when needed.
- *
- * The kernel modifies neither the indices array nor the entries
- * array.
- */
- u32 array[];
-};
-
-/*
- * This data is shared with the application through the mmap at offset
- * IORING_OFF_CQ_RING.
- *
- * The offsets to the member fields are published through struct
- * io_cqring_offsets when calling io_uring_setup.
- */
-struct io_cq_ring {
- /*
- * Head and tail offsets into the ring; the offsets need to be
- * masked to get valid indices.
- *
- * The application controls head and the kernel tail.
- */
- struct io_uring r;
- /*
- * Bitmask to apply to head and tail offsets (constant, equals
- * ring_entries - 1)
- */
- u32 ring_mask;
- /* Ring size (constant, power of 2) */
- u32 ring_entries;
+ u32 sq_flags;
/*
* Number of completion events lost because the queue was full;
* this should be avoided by the application by making sure
@@ -177,7 +143,7 @@ struct io_cq_ring {
* As completion events come in out of order this counter is not
* ordered with any other data.
*/
- u32 overflow;
+ u32 cq_overflow;
/*
* Ring buffer of completion events.
*
@@ -185,7 +151,7 @@ struct io_cq_ring {
* produced, so the application is allowed to modify pending
* entries.
*/
- struct io_uring_cqe cqes[];
+ struct io_uring_cqe cqes[] ____cacheline_aligned_in_smp;
};
struct io_mapped_ubuf {
@@ -201,7 +167,7 @@ struct async_list {
struct list_head list;
struct file *file;
- off_t io_end;
+ off_t io_start;
size_t io_len;
};
@@ -215,8 +181,18 @@ struct io_ring_ctx {
bool compat;
bool account_mem;
- /* SQ ring */
- struct io_sq_ring *sq_ring;
+ /*
+ * Ring buffer of indices into array of io_uring_sqe, which is
+ * mmapped by the application using the IORING_OFF_SQES offset.
+ *
+ * This indirection could e.g. be used to assign fixed
+ * io_uring_sqe entries to operations and only submit them to
+ * the queue when needed.
+ *
+ * The kernel modifies neither the indices array nor the entries
+ * array.
+ */
+ u32 *sq_array;
unsigned cached_sq_head;
unsigned sq_entries;
unsigned sq_mask;
@@ -227,15 +203,13 @@ struct io_ring_ctx {
} ____cacheline_aligned_in_smp;
/* IO offload */
- struct workqueue_struct *sqo_wq;
+ struct workqueue_struct *sqo_wq[2];
struct task_struct *sqo_thread; /* if using sq thread polling */
struct mm_struct *sqo_mm;
wait_queue_head_t sqo_wait;
struct completion sqo_thread_started;
struct {
- /* CQ ring */
- struct io_cq_ring *cq_ring;
unsigned cached_cq_tail;
unsigned cq_entries;
unsigned cq_mask;
@@ -244,6 +218,8 @@ struct io_ring_ctx {
struct eventfd_ctx *cq_ev_fd;
} ____cacheline_aligned_in_smp;
+ struct io_rings *rings;
+
/*
* If used, fixed file set. Writers must ensure that ->refs is dead,
* readers must ensure that ->refs is alive as long as the file* is
@@ -288,6 +264,7 @@ struct io_ring_ctx {
struct sqe_submit {
const struct io_uring_sqe *sqe;
unsigned short index;
+ u32 sequence;
bool has_user;
bool needs_lock;
bool needs_fixed_file;
@@ -335,6 +312,7 @@ struct io_kiocb {
#define REQ_F_LINK 64 /* linked sqes */
#define REQ_F_LINK_DONE 128 /* linked sqes done */
#define REQ_F_FAIL_LINK 256 /* fail rest of links */
+#define REQ_F_SHADOW_DRAIN 512 /* link-drain shadow req */
u64 user_data;
u32 result;
u32 sequence;
@@ -366,6 +344,7 @@ struct io_submit_state {
};
static void io_sq_wq_submit_work(struct work_struct *work);
+static void __io_free_req(struct io_kiocb *req);
static struct kmem_cache *req_cachep;
@@ -430,7 +409,7 @@ static inline bool io_sequence_defer(struct io_ring_ctx *ctx,
if ((req->flags & (REQ_F_IO_DRAIN|REQ_F_IO_DRAINED)) != REQ_F_IO_DRAIN)
return false;
- return req->sequence != ctx->cached_cq_tail + ctx->sq_ring->dropped;
+ return req->sequence != ctx->cached_cq_tail + ctx->rings->sq_dropped;
}
static struct io_kiocb *io_get_deferred_req(struct io_ring_ctx *ctx)
@@ -451,11 +430,11 @@ static struct io_kiocb *io_get_deferred_req(struct io_ring_ctx *ctx)
static void __io_commit_cqring(struct io_ring_ctx *ctx)
{
- struct io_cq_ring *ring = ctx->cq_ring;
+ struct io_rings *rings = ctx->rings;
- if (ctx->cached_cq_tail != READ_ONCE(ring->r.tail)) {
+ if (ctx->cached_cq_tail != READ_ONCE(rings->cq.tail)) {
/* order cqe stores with ring update */
- smp_store_release(&ring->r.tail, ctx->cached_cq_tail);
+ smp_store_release(&rings->cq.tail, ctx->cached_cq_tail);
if (wq_has_sleeper(&ctx->cq_wait)) {
wake_up_interruptible(&ctx->cq_wait);
@@ -464,6 +443,24 @@ static void __io_commit_cqring(struct io_ring_ctx *ctx)
}
}
+static inline void io_queue_async_work(struct io_ring_ctx *ctx,
+ struct io_kiocb *req)
+{
+ int rw;
+
+ switch (req->submit.sqe->opcode) {
+ case IORING_OP_WRITEV:
+ case IORING_OP_WRITE_FIXED:
+ rw = !(req->rw.ki_flags & IOCB_DIRECT);
+ break;
+ default:
+ rw = 0;
+ break;
+ }
+
+ queue_work(ctx->sqo_wq[rw], &req->work);
+}
+
static void io_commit_cqring(struct io_ring_ctx *ctx)
{
struct io_kiocb *req;
@@ -471,14 +468,19 @@ static void io_commit_cqring(struct io_ring_ctx *ctx)
__io_commit_cqring(ctx);
while ((req = io_get_deferred_req(ctx)) != NULL) {
+ if (req->flags & REQ_F_SHADOW_DRAIN) {
+ /* Just for drain, free it. */
+ __io_free_req(req);
+ continue;
+ }
req->flags |= REQ_F_IO_DRAINED;
- queue_work(ctx->sqo_wq, &req->work);
+ io_queue_async_work(ctx, req);
}
}
static struct io_uring_cqe *io_get_cqring(struct io_ring_ctx *ctx)
{
- struct io_cq_ring *ring = ctx->cq_ring;
+ struct io_rings *rings = ctx->rings;
unsigned tail;
tail = ctx->cached_cq_tail;
@@ -487,11 +489,11 @@ static struct io_uring_cqe *io_get_cqring(struct io_ring_ctx *ctx)
* control dependency is enough as we're using WRITE_ONCE to
* fill the cq entry
*/
- if (tail - READ_ONCE(ring->r.head) == ring->ring_entries)
+ if (tail - READ_ONCE(rings->cq.head) == rings->cq_ring_entries)
return NULL;
ctx->cached_cq_tail++;
- return &ring->cqes[tail & ctx->cq_mask];
+ return &rings->cqes[tail & ctx->cq_mask];
}
static void io_cqring_fill_event(struct io_ring_ctx *ctx, u64 ki_user_data,
@@ -510,9 +512,9 @@ static void io_cqring_fill_event(struct io_ring_ctx *ctx, u64 ki_user_data,
WRITE_ONCE(cqe->res, res);
WRITE_ONCE(cqe->flags, 0);
} else {
- unsigned overflow = READ_ONCE(ctx->cq_ring->overflow);
+ unsigned overflow = READ_ONCE(ctx->rings->cq_overflow);
- WRITE_ONCE(ctx->cq_ring->overflow, overflow + 1);
+ WRITE_ONCE(ctx->rings->cq_overflow, overflow + 1);
}
}
@@ -635,7 +637,7 @@ static void io_req_link_next(struct io_kiocb *req)
nxt->flags |= REQ_F_LINK_DONE;
INIT_WORK(&nxt->work, io_sq_wq_submit_work);
- queue_work(req->ctx->sqo_wq, &nxt->work);
+ io_queue_async_work(req->ctx, nxt);
}
}
@@ -679,6 +681,13 @@ static void io_put_req(struct io_kiocb *req)
io_free_req(req);
}
+static unsigned io_cqring_events(struct io_rings *rings)
+{
+ /* See comment at the top of this file */
+ smp_rmb();
+ return READ_ONCE(rings->cq.tail) - READ_ONCE(rings->cq.head);
+}
+
/*
* Find and free completed poll iocbs
*/
@@ -771,7 +780,7 @@ static int io_do_iopoll(struct io_ring_ctx *ctx, unsigned int *nr_events,
static int io_iopoll_getevents(struct io_ring_ctx *ctx, unsigned int *nr_events,
long min)
{
- while (!list_empty(&ctx->poll_list)) {
+ while (!list_empty(&ctx->poll_list) && !need_resched()) {
int ret;
ret = io_do_iopoll(ctx, nr_events, min);
@@ -798,6 +807,12 @@ static void io_iopoll_reap_events(struct io_ring_ctx *ctx)
unsigned int nr_events = 0;
io_iopoll_getevents(ctx, &nr_events, 1);
+
+ /*
+ * Ensure we allow local-to-the-cpu processing to take place,
+ * in this case we need to ensure that we reap all events.
+ */
+ cond_resched();
}
mutex_unlock(&ctx->uring_lock);
}
@@ -805,11 +820,42 @@ static void io_iopoll_reap_events(struct io_ring_ctx *ctx)
static int io_iopoll_check(struct io_ring_ctx *ctx, unsigned *nr_events,
long min)
{
- int ret = 0;
+ int iters, ret = 0;
+ /*
+ * We disallow the app entering submit/complete with polling, but we
+ * still need to lock the ring to prevent racing with polled issue
+ * that got punted to a workqueue.
+ */
+ mutex_lock(&ctx->uring_lock);
+
+ iters = 0;
do {
int tmin = 0;
+ /*
+ * Don't enter poll loop if we already have events pending.
+ * If we do, we can potentially be spinning for commands that
+ * already triggered a CQE (eg in error).
+ */
+ if (io_cqring_events(ctx->rings))
+ break;
+
+ /*
+ * If a submit got punted to a workqueue, we can have the
+ * application entering polling for a command before it gets
+ * issued. That app will hold the uring_lock for the duration
+ * of the poll right here, so we need to take a breather every
+ * now and then to ensure that the issue has a chance to add
+ * the poll to the issued list. Otherwise we can spin here
+ * forever, while the workqueue is stuck trying to acquire the
+ * very same mutex.
+ */
+ if (!(++iters & 7)) {
+ mutex_unlock(&ctx->uring_lock);
+ mutex_lock(&ctx->uring_lock);
+ }
+
if (*nr_events < min)
tmin = min - *nr_events;
@@ -819,6 +865,7 @@ static int io_iopoll_check(struct io_ring_ctx *ctx, unsigned *nr_events,
ret = 0;
} while (min && !*nr_events && !need_resched());
+ mutex_unlock(&ctx->uring_lock);
return ret;
}
@@ -1097,10 +1144,8 @@ static int io_import_fixed(struct io_ring_ctx *ctx, int rw,
iter->bvec = bvec + seg_skip;
iter->nr_segs -= seg_skip;
- iter->count -= (seg_skip << PAGE_SHIFT);
+ iter->count -= bvec->bv_len + offset;
iter->iov_offset = offset & ~PAGE_MASK;
- if (iter->iov_offset)
- iter->count -= iter->iov_offset;
}
}
@@ -1144,6 +1189,28 @@ static ssize_t io_import_iovec(struct io_ring_ctx *ctx, int rw,
return import_iovec(rw, buf, sqe_len, UIO_FASTIOV, iovec, iter);
}
+static inline bool io_should_merge(struct async_list *al, struct kiocb *kiocb)
+{
+ if (al->file == kiocb->ki_filp) {
+ off_t start, end;
+
+ /*
+ * Allow merging if we're anywhere in the range of the same
+ * page. Generally this happens for sub-page reads or writes,
+ * and it's beneficial to allow the first worker to bring the
+ * page in and the piggy backed work can then work on the
+ * cached page.
+ */
+ start = al->io_start & PAGE_MASK;
+ end = (al->io_start + al->io_len + PAGE_SIZE - 1) & PAGE_MASK;
+ if (kiocb->ki_pos >= start && kiocb->ki_pos <= end)
+ return true;
+ }
+
+ al->file = NULL;
+ return false;
+}
+
/*
* Make a note of the last file/offset/direction we punted to async
* context. We'll use this information to see if we can piggy back a
@@ -1155,9 +1222,8 @@ static void io_async_list_note(int rw, struct io_kiocb *req, size_t len)
struct async_list *async_list = &req->ctx->pending_async[rw];
struct kiocb *kiocb = &req->rw;
struct file *filp = kiocb->ki_filp;
- off_t io_end = kiocb->ki_pos + len;
- if (filp == async_list->file && kiocb->ki_pos == async_list->io_end) {
+ if (io_should_merge(async_list, kiocb)) {
unsigned long max_bytes;
/* Use 8x RA size as a decent limiter for both reads/writes */
@@ -1170,17 +1236,16 @@ static void io_async_list_note(int rw, struct io_kiocb *req, size_t len)
req->flags |= REQ_F_SEQ_PREV;
async_list->io_len += len;
} else {
- io_end = 0;
- async_list->io_len = 0;
+ async_list->file = NULL;
}
}
/* New file? Reset state. */
if (async_list->file != filp) {
- async_list->io_len = 0;
+ async_list->io_start = kiocb->ki_pos;
+ async_list->io_len = len;
async_list->file = filp;
}
- async_list->io_end = io_end;
}
static int io_read(struct io_kiocb *req, const struct sqe_submit *s,
@@ -1492,7 +1557,7 @@ static void io_poll_remove_one(struct io_kiocb *req)
WRITE_ONCE(poll->canceled, true);
if (!list_empty(&poll->wait.entry)) {
list_del_init(&poll->wait.entry);
- queue_work(req->ctx->sqo_wq, &req->work);
+ io_queue_async_work(req->ctx, req);
}
spin_unlock(&poll->head->lock);
@@ -1606,7 +1671,7 @@ static int io_poll_wake(struct wait_queue_entry *wait, unsigned mode, int sync,
io_cqring_ev_posted(ctx);
io_put_req(req);
} else {
- queue_work(ctx->sqo_wq, &req->work);
+ io_queue_async_work(ctx, req);
}
return 1;
@@ -1949,7 +2014,7 @@ out:
*/
static bool io_add_to_prev_work(struct async_list *list, struct io_kiocb *req)
{
- bool ret = false;
+ bool ret;
if (!list)
return false;
@@ -1995,10 +2060,14 @@ static int io_req_set_file(struct io_ring_ctx *ctx, const struct sqe_submit *s,
flags = READ_ONCE(s->sqe->flags);
fd = READ_ONCE(s->sqe->fd);
- if (flags & IOSQE_IO_DRAIN) {
+ if (flags & IOSQE_IO_DRAIN)
req->flags |= REQ_F_IO_DRAIN;
- req->sequence = ctx->cached_sq_head - 1;
- }
+ /*
+ * All io need record the previous position, if LINK vs DARIN,
+ * it can be used to mark the position of the first IO in the
+ * link list.
+ */
+ req->sequence = s->sequence;
if (!io_op_needs_file(s->sqe))
return 0;
@@ -2020,12 +2089,12 @@ static int io_req_set_file(struct io_ring_ctx *ctx, const struct sqe_submit *s,
return 0;
}
-static int io_queue_sqe(struct io_ring_ctx *ctx, struct io_kiocb *req,
- struct sqe_submit *s)
+static int __io_queue_sqe(struct io_ring_ctx *ctx, struct io_kiocb *req,
+ struct sqe_submit *s, bool force_nonblock)
{
int ret;
- ret = __io_submit_sqe(ctx, req, s, true);
+ ret = __io_submit_sqe(ctx, req, s, force_nonblock);
if (ret == -EAGAIN && !(req->flags & REQ_F_NOWAIT)) {
struct io_uring_sqe *sqe_copy;
@@ -2042,7 +2111,7 @@ static int io_queue_sqe(struct io_ring_ctx *ctx, struct io_kiocb *req,
if (list)
atomic_inc(&list->cnt);
INIT_WORK(&req->work, io_sq_wq_submit_work);
- queue_work(ctx->sqo_wq, &req->work);
+ io_queue_async_work(ctx, req);
}
/*
@@ -2067,10 +2136,70 @@ static int io_queue_sqe(struct io_ring_ctx *ctx, struct io_kiocb *req,
return ret;
}
+static int io_queue_sqe(struct io_ring_ctx *ctx, struct io_kiocb *req,
+ struct sqe_submit *s, bool force_nonblock)
+{
+ int ret;
+
+ ret = io_req_defer(ctx, req, s->sqe);
+ if (ret) {
+ if (ret != -EIOCBQUEUED) {
+ io_free_req(req);
+ io_cqring_add_event(ctx, s->sqe->user_data, ret);
+ }
+ return 0;
+ }
+
+ return __io_queue_sqe(ctx, req, s, force_nonblock);
+}
+
+static int io_queue_link_head(struct io_ring_ctx *ctx, struct io_kiocb *req,
+ struct sqe_submit *s, struct io_kiocb *shadow,
+ bool force_nonblock)
+{
+ int ret;
+ int need_submit = false;
+
+ if (!shadow)
+ return io_queue_sqe(ctx, req, s, force_nonblock);
+
+ /*
+ * Mark the first IO in link list as DRAIN, let all the following
+ * IOs enter the defer list. all IO needs to be completed before link
+ * list.
+ */
+ req->flags |= REQ_F_IO_DRAIN;
+ ret = io_req_defer(ctx, req, s->sqe);
+ if (ret) {
+ if (ret != -EIOCBQUEUED) {
+ io_free_req(req);
+ io_cqring_add_event(ctx, s->sqe->user_data, ret);
+ return 0;
+ }
+ } else {
+ /*
+ * If ret == 0 means that all IOs in front of link io are
+ * running done. let's queue link head.
+ */
+ need_submit = true;
+ }
+
+ /* Insert shadow req to defer_list, blocking next IOs */
+ spin_lock_irq(&ctx->completion_lock);
+ list_add_tail(&shadow->list, &ctx->defer_list);
+ spin_unlock_irq(&ctx->completion_lock);
+
+ if (need_submit)
+ return __io_queue_sqe(ctx, req, s, force_nonblock);
+
+ return 0;
+}
+
#define SQE_VALID_FLAGS (IOSQE_FIXED_FILE|IOSQE_IO_DRAIN|IOSQE_IO_LINK)
static void io_submit_sqe(struct io_ring_ctx *ctx, struct sqe_submit *s,
- struct io_submit_state *state, struct io_kiocb **link)
+ struct io_submit_state *state, struct io_kiocb **link,
+ bool force_nonblock)
{
struct io_uring_sqe *sqe_copy;
struct io_kiocb *req;
@@ -2097,13 +2226,6 @@ err:
return;
}
- ret = io_req_defer(ctx, req, s->sqe);
- if (ret) {
- if (ret != -EIOCBQUEUED)
- goto err_req;
- return;
- }
-
/*
* If we already have a head request, queue this one for async
* submittal once the head completes. If we don't have a head but
@@ -2130,7 +2252,7 @@ err:
INIT_LIST_HEAD(&req->link_list);
*link = req;
} else {
- io_queue_sqe(ctx, req, s);
+ io_queue_sqe(ctx, req, s, force_nonblock);
}
}
@@ -2160,15 +2282,15 @@ static void io_submit_state_start(struct io_submit_state *state,
static void io_commit_sqring(struct io_ring_ctx *ctx)
{
- struct io_sq_ring *ring = ctx->sq_ring;
+ struct io_rings *rings = ctx->rings;
- if (ctx->cached_sq_head != READ_ONCE(ring->r.head)) {
+ if (ctx->cached_sq_head != READ_ONCE(rings->sq.head)) {
/*
* Ensure any loads from the SQEs are done at this point,
* since once we write the new head, the application could
* write new data to them.
*/
- smp_store_release(&ring->r.head, ctx->cached_sq_head);
+ smp_store_release(&rings->sq.head, ctx->cached_sq_head);
}
}
@@ -2182,7 +2304,8 @@ static void io_commit_sqring(struct io_ring_ctx *ctx)
*/
static bool io_get_sqring(struct io_ring_ctx *ctx, struct sqe_submit *s)
{
- struct io_sq_ring *ring = ctx->sq_ring;
+ struct io_rings *rings = ctx->rings;
+ u32 *sq_array = ctx->sq_array;
unsigned head;
/*
@@ -2195,20 +2318,21 @@ static bool io_get_sqring(struct io_ring_ctx *ctx, struct sqe_submit *s)
*/
head = ctx->cached_sq_head;
/* make sure SQ entry isn't read before tail */
- if (head == smp_load_acquire(&ring->r.tail))
+ if (head == smp_load_acquire(&rings->sq.tail))
return false;
- head = READ_ONCE(ring->array[head & ctx->sq_mask]);
+ head = READ_ONCE(sq_array[head & ctx->sq_mask]);
if (head < ctx->sq_entries) {
s->index = head;
s->sqe = &ctx->sq_sqes[head];
+ s->sequence = ctx->cached_sq_head;
ctx->cached_sq_head++;
return true;
}
/* drop invalid entries */
ctx->cached_sq_head++;
- ring->dropped++;
+ rings->sq_dropped++;
return false;
}
@@ -2217,6 +2341,7 @@ static int io_submit_sqes(struct io_ring_ctx *ctx, struct sqe_submit *sqes,
{
struct io_submit_state state, *statep = NULL;
struct io_kiocb *link = NULL;
+ struct io_kiocb *shadow_req = NULL;
bool prev_was_link = false;
int i, submitted = 0;
@@ -2231,11 +2356,21 @@ static int io_submit_sqes(struct io_ring_ctx *ctx, struct sqe_submit *sqes,
* that's the end of the chain. Submit the previous link.
*/
if (!prev_was_link && link) {
- io_queue_sqe(ctx, link, &link->submit);
+ io_queue_link_head(ctx, link, &link->submit, shadow_req,
+ true);
link = NULL;
}
prev_was_link = (sqes[i].sqe->flags & IOSQE_IO_LINK) != 0;
+ if (link && (sqes[i].sqe->flags & IOSQE_IO_DRAIN)) {
+ if (!shadow_req) {
+ shadow_req = io_get_req(ctx, NULL);
+ shadow_req->flags |= (REQ_F_IO_DRAIN | REQ_F_SHADOW_DRAIN);
+ refcount_dec(&shadow_req->refs);
+ }
+ shadow_req->sequence = sqes[i].sequence;
+ }
+
if (unlikely(mm_fault)) {
io_cqring_add_event(ctx, sqes[i].sqe->user_data,
-EFAULT);
@@ -2243,13 +2378,13 @@ static int io_submit_sqes(struct io_ring_ctx *ctx, struct sqe_submit *sqes,
sqes[i].has_user = has_user;
sqes[i].needs_lock = true;
sqes[i].needs_fixed_file = true;
- io_submit_sqe(ctx, &sqes[i], statep, &link);
+ io_submit_sqe(ctx, &sqes[i], statep, &link, true);
submitted++;
}
}
if (link)
- io_queue_sqe(ctx, link, &link->submit);
+ io_queue_link_head(ctx, link, &link->submit, shadow_req, true);
if (statep)
io_submit_state_end(&state);
@@ -2280,15 +2415,7 @@ static int io_sq_thread(void *data)
unsigned nr_events = 0;
if (ctx->flags & IORING_SETUP_IOPOLL) {
- /*
- * We disallow the app entering submit/complete
- * with polling, but we still need to lock the
- * ring to prevent racing with polled issue
- * that got punted to a workqueue.
- */
- mutex_lock(&ctx->uring_lock);
io_iopoll_check(ctx, &nr_events, 0);
- mutex_unlock(&ctx->uring_lock);
} else {
/*
* Normal IO, just pretend everything completed.
@@ -2329,7 +2456,7 @@ static int io_sq_thread(void *data)
TASK_INTERRUPTIBLE);
/* Tell userspace we may need a wakeup call */
- ctx->sq_ring->flags |= IORING_SQ_NEED_WAKEUP;
+ ctx->rings->sq_flags |= IORING_SQ_NEED_WAKEUP;
/* make sure to read SQ tail after writing flags */
smp_mb();
@@ -2343,12 +2470,12 @@ static int io_sq_thread(void *data)
schedule();
finish_wait(&ctx->sqo_wait, &wait);
- ctx->sq_ring->flags &= ~IORING_SQ_NEED_WAKEUP;
+ ctx->rings->sq_flags &= ~IORING_SQ_NEED_WAKEUP;
continue;
}
finish_wait(&ctx->sqo_wait, &wait);
- ctx->sq_ring->flags &= ~IORING_SQ_NEED_WAKEUP;
+ ctx->rings->sq_flags &= ~IORING_SQ_NEED_WAKEUP;
}
i = 0;
@@ -2389,10 +2516,12 @@ static int io_sq_thread(void *data)
return 0;
}
-static int io_ring_submit(struct io_ring_ctx *ctx, unsigned int to_submit)
+static int io_ring_submit(struct io_ring_ctx *ctx, unsigned int to_submit,
+ bool block_for_last)
{
struct io_submit_state state, *statep = NULL;
struct io_kiocb *link = NULL;
+ struct io_kiocb *shadow_req = NULL;
bool prev_was_link = false;
int i, submit = 0;
@@ -2402,6 +2531,7 @@ static int io_ring_submit(struct io_ring_ctx *ctx, unsigned int to_submit)
}
for (i = 0; i < to_submit; i++) {
+ bool force_nonblock = true;
struct sqe_submit s;
if (!io_get_sqring(ctx, &s))
@@ -2412,34 +2542,49 @@ static int io_ring_submit(struct io_ring_ctx *ctx, unsigned int to_submit)
* that's the end of the chain. Submit the previous link.
*/
if (!prev_was_link && link) {
- io_queue_sqe(ctx, link, &link->submit);
+ io_queue_link_head(ctx, link, &link->submit, shadow_req,
+ force_nonblock);
link = NULL;
}
prev_was_link = (s.sqe->flags & IOSQE_IO_LINK) != 0;
+ if (link && (s.sqe->flags & IOSQE_IO_DRAIN)) {
+ if (!shadow_req) {
+ shadow_req = io_get_req(ctx, NULL);
+ shadow_req->flags |= (REQ_F_IO_DRAIN | REQ_F_SHADOW_DRAIN);
+ refcount_dec(&shadow_req->refs);
+ }
+ shadow_req->sequence = s.sequence;
+ }
+
s.has_user = true;
s.needs_lock = false;
s.needs_fixed_file = false;
submit++;
- io_submit_sqe(ctx, &s, statep, &link);
+
+ /*
+ * The caller will block for events after submit, submit the
+ * last IO non-blocking. This is either the only IO it's
+ * submitting, or it already submitted the previous ones. This
+ * improves performance by avoiding an async punt that we don't
+ * need to do.
+ */
+ if (block_for_last && submit == to_submit)
+ force_nonblock = false;
+
+ io_submit_sqe(ctx, &s, statep, &link, force_nonblock);
}
io_commit_sqring(ctx);
if (link)
- io_queue_sqe(ctx, link, &link->submit);
+ io_queue_link_head(ctx, link, &link->submit, shadow_req,
+ block_for_last);
if (statep)
io_submit_state_end(statep);
return submit;
}
-static unsigned io_cqring_events(struct io_cq_ring *ring)
-{
- /* See comment at the top of this file */
- smp_rmb();
- return READ_ONCE(ring->r.tail) - READ_ONCE(ring->r.head);
-}
-
/*
* Wait until events become available, if we don't already have some. The
* application must reap them itself, as they reside on the shared cq ring.
@@ -2447,10 +2592,10 @@ static unsigned io_cqring_events(struct io_cq_ring *ring)
static int io_cqring_wait(struct io_ring_ctx *ctx, int min_events,
const sigset_t __user *sig, size_t sigsz)
{
- struct io_cq_ring *ring = ctx->cq_ring;
+ struct io_rings *rings = ctx->rings;
int ret;
- if (io_cqring_events(ring) >= min_events)
+ if (io_cqring_events(rings) >= min_events)
return 0;
if (sig) {
@@ -2466,12 +2611,12 @@ static int io_cqring_wait(struct io_ring_ctx *ctx, int min_events,
return ret;
}
- ret = wait_event_interruptible(ctx->wait, io_cqring_events(ring) >= min_events);
+ ret = wait_event_interruptible(ctx->wait, io_cqring_events(rings) >= min_events);
restore_saved_sigmask_unless(ret == -ERESTARTSYS);
if (ret == -ERESTARTSYS)
ret = -EINTR;
- return READ_ONCE(ring->r.head) == READ_ONCE(ring->r.tail) ? ret : 0;
+ return READ_ONCE(rings->cq.head) == READ_ONCE(rings->cq.tail) ? ret : 0;
}
static void __io_sqe_files_unregister(struct io_ring_ctx *ctx)
@@ -2521,11 +2666,15 @@ static void io_sq_thread_stop(struct io_ring_ctx *ctx)
static void io_finish_async(struct io_ring_ctx *ctx)
{
+ int i;
+
io_sq_thread_stop(ctx);
- if (ctx->sqo_wq) {
- destroy_workqueue(ctx->sqo_wq);
- ctx->sqo_wq = NULL;
+ for (i = 0; i < ARRAY_SIZE(ctx->sqo_wq); i++) {
+ if (ctx->sqo_wq[i]) {
+ destroy_workqueue(ctx->sqo_wq[i]);
+ ctx->sqo_wq[i] = NULL;
+ }
}
}
@@ -2733,16 +2882,31 @@ static int io_sq_offload_start(struct io_ring_ctx *ctx,
}
/* Do QD, or 2 * CPUS, whatever is smallest */
- ctx->sqo_wq = alloc_workqueue("io_ring-wq", WQ_UNBOUND | WQ_FREEZABLE,
+ ctx->sqo_wq[0] = alloc_workqueue("io_ring-wq",
+ WQ_UNBOUND | WQ_FREEZABLE,
min(ctx->sq_entries - 1, 2 * num_online_cpus()));
- if (!ctx->sqo_wq) {
+ if (!ctx->sqo_wq[0]) {
+ ret = -ENOMEM;
+ goto err;
+ }
+
+ /*
+ * This is for buffered writes, where we want to limit the parallelism
+ * due to file locking in file systems. As "normal" buffered writes
+ * should parellelize on writeout quite nicely, limit us to having 2
+ * pending. This avoids massive contention on the inode when doing
+ * buffered async writes.
+ */
+ ctx->sqo_wq[1] = alloc_workqueue("io_ring-write-wq",
+ WQ_UNBOUND | WQ_FREEZABLE, 2);
+ if (!ctx->sqo_wq[1]) {
ret = -ENOMEM;
goto err;
}
return 0;
err:
- io_sq_thread_stop(ctx);
+ io_finish_async(ctx);
mmdrop(ctx->sqo_mm);
ctx->sqo_mm = NULL;
return ret;
@@ -2791,17 +2955,45 @@ static void *io_mem_alloc(size_t size)
return (void *) __get_free_pages(gfp_flags, get_order(size));
}
+static unsigned long rings_size(unsigned sq_entries, unsigned cq_entries,
+ size_t *sq_offset)
+{
+ struct io_rings *rings;
+ size_t off, sq_array_size;
+
+ off = struct_size(rings, cqes, cq_entries);
+ if (off == SIZE_MAX)
+ return SIZE_MAX;
+
+#ifdef CONFIG_SMP
+ off = ALIGN(off, SMP_CACHE_BYTES);
+ if (off == 0)
+ return SIZE_MAX;
+#endif
+
+ sq_array_size = array_size(sizeof(u32), sq_entries);
+ if (sq_array_size == SIZE_MAX)
+ return SIZE_MAX;
+
+ if (check_add_overflow(off, sq_array_size, &off))
+ return SIZE_MAX;
+
+ if (sq_offset)
+ *sq_offset = off;
+
+ return off;
+}
+
static unsigned long ring_pages(unsigned sq_entries, unsigned cq_entries)
{
- struct io_sq_ring *sq_ring;
- struct io_cq_ring *cq_ring;
- size_t bytes;
+ size_t pages;
- bytes = struct_size(sq_ring, array, sq_entries);
- bytes += array_size(sizeof(struct io_uring_sqe), sq_entries);
- bytes += struct_size(cq_ring, cqes, cq_entries);
+ pages = (size_t)1 << get_order(
+ rings_size(sq_entries, cq_entries, NULL));
+ pages += (size_t)1 << get_order(
+ array_size(sizeof(struct io_uring_sqe), sq_entries));
- return (bytes + PAGE_SIZE - 1) / PAGE_SIZE;
+ return pages;
}
static int io_sqe_buffer_unregister(struct io_ring_ctx *ctx)
@@ -2815,7 +3007,7 @@ static int io_sqe_buffer_unregister(struct io_ring_ctx *ctx)
struct io_mapped_ubuf *imu = &ctx->user_bufs[i];
for (j = 0; j < imu->nr_bvecs; j++)
- put_page(imu->bvec[j].bv_page);
+ put_user_page(imu->bvec[j].bv_page);
if (ctx->account_mem)
io_unaccount_mem(ctx->user, imu->nr_bvecs);
@@ -2959,10 +3151,8 @@ static int io_sqe_buffer_register(struct io_ring_ctx *ctx, void __user *arg,
* if we did partial map, or found file backed vmas,
* release any pages we did get
*/
- if (pret > 0) {
- for (j = 0; j < pret; j++)
- put_page(pages[j]);
- }
+ if (pret > 0)
+ put_user_pages(pages, pret);
if (ctx->account_mem)
io_unaccount_mem(ctx->user, nr_pages);
kvfree(imu->bvec);
@@ -3048,9 +3238,8 @@ static void io_ring_ctx_free(struct io_ring_ctx *ctx)
}
#endif
- io_mem_free(ctx->sq_ring);
+ io_mem_free(ctx->rings);
io_mem_free(ctx->sq_sqes);
- io_mem_free(ctx->cq_ring);
percpu_ref_exit(&ctx->refs);
if (ctx->account_mem)
@@ -3071,10 +3260,10 @@ static __poll_t io_uring_poll(struct file *file, poll_table *wait)
* io_commit_cqring
*/
smp_rmb();
- if (READ_ONCE(ctx->sq_ring->r.tail) - ctx->cached_sq_head !=
- ctx->sq_ring->ring_entries)
+ if (READ_ONCE(ctx->rings->sq.tail) - ctx->cached_sq_head !=
+ ctx->rings->sq_ring_entries)
mask |= EPOLLOUT | EPOLLWRNORM;
- if (READ_ONCE(ctx->cq_ring->r.head) != ctx->cached_cq_tail)
+ if (READ_ONCE(ctx->rings->sq.head) != ctx->cached_cq_tail)
mask |= EPOLLIN | EPOLLRDNORM;
return mask;
@@ -3119,14 +3308,12 @@ static int io_uring_mmap(struct file *file, struct vm_area_struct *vma)
switch (offset) {
case IORING_OFF_SQ_RING:
- ptr = ctx->sq_ring;
+ case IORING_OFF_CQ_RING:
+ ptr = ctx->rings;
break;
case IORING_OFF_SQES:
ptr = ctx->sq_sqes;
break;
- case IORING_OFF_CQ_RING:
- ptr = ctx->cq_ring;
- break;
default:
return -EINVAL;
}
@@ -3169,19 +3356,27 @@ SYSCALL_DEFINE6(io_uring_enter, unsigned int, fd, u32, to_submit,
* Just return the requested submit count, and wake the thread if
* we were asked to.
*/
+ ret = 0;
if (ctx->flags & IORING_SETUP_SQPOLL) {
if (flags & IORING_ENTER_SQ_WAKEUP)
wake_up(&ctx->sqo_wait);
submitted = to_submit;
- goto out_ctx;
- }
+ } else if (to_submit) {
+ bool block_for_last = false;
- ret = 0;
- if (to_submit) {
to_submit = min(to_submit, ctx->sq_entries);
+ /*
+ * Allow last submission to block in a series, IFF the caller
+ * asked to wait for events and we don't currently have
+ * enough. This potentially avoids an async punt.
+ */
+ if (to_submit == min_complete &&
+ io_cqring_events(ctx->rings) < min_complete)
+ block_for_last = true;
+
mutex_lock(&ctx->uring_lock);
- submitted = io_ring_submit(ctx, to_submit);
+ submitted = io_ring_submit(ctx, to_submit, block_for_last);
mutex_unlock(&ctx->uring_lock);
}
if (flags & IORING_ENTER_GETEVENTS) {
@@ -3190,15 +3385,12 @@ SYSCALL_DEFINE6(io_uring_enter, unsigned int, fd, u32, to_submit,
min_complete = min(min_complete, ctx->cq_entries);
if (ctx->flags & IORING_SETUP_IOPOLL) {
- mutex_lock(&ctx->uring_lock);
ret = io_iopoll_check(ctx, &nr_events, min_complete);
- mutex_unlock(&ctx->uring_lock);
} else {
ret = io_cqring_wait(ctx, min_complete, sig, sigsz);
}
}
-out_ctx:
io_ring_drop_ctx_refs(ctx, 1);
out_fput:
fdput(f);
@@ -3215,19 +3407,27 @@ static const struct file_operations io_uring_fops = {
static int io_allocate_scq_urings(struct io_ring_ctx *ctx,
struct io_uring_params *p)
{
- struct io_sq_ring *sq_ring;
- struct io_cq_ring *cq_ring;
- size_t size;
+ struct io_rings *rings;
+ size_t size, sq_array_offset;
- sq_ring = io_mem_alloc(struct_size(sq_ring, array, p->sq_entries));
- if (!sq_ring)
+ size = rings_size(p->sq_entries, p->cq_entries, &sq_array_offset);
+ if (size == SIZE_MAX)
+ return -EOVERFLOW;
+
+ rings = io_mem_alloc(size);
+ if (!rings)
return -ENOMEM;
- ctx->sq_ring = sq_ring;
- sq_ring->ring_mask = p->sq_entries - 1;
- sq_ring->ring_entries = p->sq_entries;
- ctx->sq_mask = sq_ring->ring_mask;
- ctx->sq_entries = sq_ring->ring_entries;
+ ctx->rings = rings;
+ ctx->sq_array = (u32 *)((char *)rings + sq_array_offset);
+ rings->sq_ring_mask = p->sq_entries - 1;
+ rings->cq_ring_mask = p->cq_entries - 1;
+ rings->sq_ring_entries = p->sq_entries;
+ rings->cq_ring_entries = p->cq_entries;
+ ctx->sq_mask = rings->sq_ring_mask;
+ ctx->cq_mask = rings->cq_ring_mask;
+ ctx->sq_entries = rings->sq_ring_entries;
+ ctx->cq_entries = rings->cq_ring_entries;
size = array_size(sizeof(struct io_uring_sqe), p->sq_entries);
if (size == SIZE_MAX)
@@ -3237,15 +3437,6 @@ static int io_allocate_scq_urings(struct io_ring_ctx *ctx,
if (!ctx->sq_sqes)
return -ENOMEM;
- cq_ring = io_mem_alloc(struct_size(cq_ring, cqes, p->cq_entries));
- if (!cq_ring)
- return -ENOMEM;
-
- ctx->cq_ring = cq_ring;
- cq_ring->ring_mask = p->cq_entries - 1;
- cq_ring->ring_entries = p->cq_entries;
- ctx->cq_mask = cq_ring->ring_mask;
- ctx->cq_entries = cq_ring->ring_entries;
return 0;
}
@@ -3349,21 +3540,23 @@ static int io_uring_create(unsigned entries, struct io_uring_params *p)
goto err;
memset(&p->sq_off, 0, sizeof(p->sq_off));
- p->sq_off.head = offsetof(struct io_sq_ring, r.head);
- p->sq_off.tail = offsetof(struct io_sq_ring, r.tail);
- p->sq_off.ring_mask = offsetof(struct io_sq_ring, ring_mask);
- p->sq_off.ring_entries = offsetof(struct io_sq_ring, ring_entries);
- p->sq_off.flags = offsetof(struct io_sq_ring, flags);
- p->sq_off.dropped = offsetof(struct io_sq_ring, dropped);
- p->sq_off.array = offsetof(struct io_sq_ring, array);
+ p->sq_off.head = offsetof(struct io_rings, sq.head);
+ p->sq_off.tail = offsetof(struct io_rings, sq.tail);
+ p->sq_off.ring_mask = offsetof(struct io_rings, sq_ring_mask);
+ p->sq_off.ring_entries = offsetof(struct io_rings, sq_ring_entries);
+ p->sq_off.flags = offsetof(struct io_rings, sq_flags);
+ p->sq_off.dropped = offsetof(struct io_rings, sq_dropped);
+ p->sq_off.array = (char *)ctx->sq_array - (char *)ctx->rings;
memset(&p->cq_off, 0, sizeof(p->cq_off));
- p->cq_off.head = offsetof(struct io_cq_ring, r.head);
- p->cq_off.tail = offsetof(struct io_cq_ring, r.tail);
- p->cq_off.ring_mask = offsetof(struct io_cq_ring, ring_mask);
- p->cq_off.ring_entries = offsetof(struct io_cq_ring, ring_entries);
- p->cq_off.overflow = offsetof(struct io_cq_ring, overflow);
- p->cq_off.cqes = offsetof(struct io_cq_ring, cqes);
+ p->cq_off.head = offsetof(struct io_rings, cq.head);
+ p->cq_off.tail = offsetof(struct io_rings, cq.tail);
+ p->cq_off.ring_mask = offsetof(struct io_rings, cq_ring_mask);
+ p->cq_off.ring_entries = offsetof(struct io_rings, cq_ring_entries);
+ p->cq_off.overflow = offsetof(struct io_rings, cq_overflow);
+ p->cq_off.cqes = offsetof(struct io_rings, cqes);
+
+ p->features = IORING_FEAT_SINGLE_MMAP;
return ret;
err:
io_ring_ctx_wait_and_kill(ctx);
diff --git a/fs/isofs/export.c b/fs/isofs/export.c
index 85a9093769a9..35768a63fb1d 100644
--- a/fs/isofs/export.c
+++ b/fs/isofs/export.c
@@ -10,7 +10,7 @@
*
* The following files are helpful:
*
- * Documentation/filesystems/nfs/Exporting
+ * Documentation/filesystems/nfs/exporting.rst
* fs/exportfs/expfs.c.
*/
diff --git a/fs/jfs/Kconfig b/fs/jfs/Kconfig
index 22a273bd4648..05cb0e8e4382 100644
--- a/fs/jfs/Kconfig
+++ b/fs/jfs/Kconfig
@@ -5,7 +5,7 @@ config JFS_FS
select CRC32
help
This is a port of IBM's Journaled Filesystem . More information is
- available in the file <file:Documentation/filesystems/jfs.txt>.
+ available in the file <file:Documentation/admin-guide/jfs.rst>.
If you do not intend to use the JFS filesystem, say N.
diff --git a/fs/kernfs/dir.c b/fs/kernfs/dir.c
index a387534c9577..6ebae6bbe6a5 100644
--- a/fs/kernfs/dir.c
+++ b/fs/kernfs/dir.c
@@ -137,6 +137,9 @@ static int kernfs_path_from_node_locked(struct kernfs_node *kn_to,
if (kn_from == kn_to)
return strlcpy(buf, "/", buflen);
+ if (!buf)
+ return -EINVAL;
+
common = kernfs_common_ancestor(kn_from, kn_to);
if (WARN_ON(!common))
return -EINVAL;
@@ -144,8 +147,7 @@ static int kernfs_path_from_node_locked(struct kernfs_node *kn_to,
depth_to = kernfs_depth(common, kn_to);
depth_from = kernfs_depth(common, kn_from);
- if (buf)
- buf[0] = '\0';
+ buf[0] = '\0';
for (i = 0; i < depth_from; i++)
len += strlcpy(buf + len, parent_str,
@@ -430,7 +432,6 @@ struct kernfs_node *kernfs_get_active(struct kernfs_node *kn)
*/
void kernfs_put_active(struct kernfs_node *kn)
{
- struct kernfs_root *root = kernfs_root(kn);
int v;
if (unlikely(!kn))
@@ -442,7 +443,7 @@ void kernfs_put_active(struct kernfs_node *kn)
if (likely(v != KN_DEACTIVATED_BIAS))
return;
- wake_up_all(&root->deactivate_waitq);
+ wake_up_all(&kernfs_root(kn)->deactivate_waitq);
}
/**
diff --git a/fs/locks.c b/fs/locks.c
index 686eae21daf6..a364ebc5cec3 100644
--- a/fs/locks.c
+++ b/fs/locks.c
@@ -1592,7 +1592,7 @@ int __break_lease(struct inode *inode, unsigned int mode, unsigned int type)
ctx = smp_load_acquire(&inode->i_flctx);
if (!ctx) {
WARN_ON_ONCE(1);
- return error;
+ goto free_lock;
}
percpu_down_read(&file_rwsem);
@@ -1672,6 +1672,7 @@ out:
spin_unlock(&ctx->flc_lock);
percpu_up_read(&file_rwsem);
locks_dispose_list(&dispose);
+free_lock:
locks_free_lock(new_fl);
return error;
}
@@ -2784,10 +2785,10 @@ static void lock_get_status(struct seq_file *f, struct file_lock *fl,
? (fl->fl_type & LOCK_WRITE) ? "RW " : "READ "
: (fl->fl_type & LOCK_WRITE) ? "WRITE" : "NONE ");
} else {
- seq_printf(f, "%s ",
- (lease_breaking(fl))
- ? (fl->fl_type == F_UNLCK) ? "UNLCK" : "READ "
- : (fl->fl_type == F_WRLCK) ? "WRITE" : "READ ");
+ int type = IS_LEASE(fl) ? target_leasetype(fl) : fl->fl_type;
+
+ seq_printf(f, "%s ", (type == F_WRLCK) ? "WRITE" :
+ (type == F_RDLCK) ? "READ" : "UNLCK");
}
if (inode) {
/* userspace relies on this representation of dev_t */
diff --git a/fs/namei.c b/fs/namei.c
index 209c51a5226c..671c3c1a3425 100644
--- a/fs/namei.c
+++ b/fs/namei.c
@@ -596,14 +596,12 @@ static void terminate_walk(struct nameidata *nd)
path_put(&nd->path);
for (i = 0; i < nd->depth; i++)
path_put(&nd->stack[i].link);
- if (nd->root.mnt && !(nd->flags & LOOKUP_ROOT)) {
+ if (nd->flags & LOOKUP_ROOT_GRABBED) {
path_put(&nd->root);
- nd->root.mnt = NULL;
+ nd->flags &= ~LOOKUP_ROOT_GRABBED;
}
} else {
nd->flags &= ~LOOKUP_RCU;
- if (!(nd->flags & LOOKUP_ROOT))
- nd->root.mnt = NULL;
rcu_read_unlock();
}
nd->depth = 0;
@@ -641,6 +639,14 @@ static bool legitimize_links(struct nameidata *nd)
return true;
}
+static bool legitimize_root(struct nameidata *nd)
+{
+ if (!nd->root.mnt || (nd->flags & LOOKUP_ROOT))
+ return true;
+ nd->flags |= LOOKUP_ROOT_GRABBED;
+ return legitimize_path(nd, &nd->root, nd->root_seq);
+}
+
/*
* Path walking has 2 modes, rcu-walk and ref-walk (see
* Documentation/filesystems/path-lookup.txt). In situations when we can't
@@ -671,23 +677,18 @@ static int unlazy_walk(struct nameidata *nd)
nd->flags &= ~LOOKUP_RCU;
if (unlikely(!legitimize_links(nd)))
- goto out2;
- if (unlikely(!legitimize_path(nd, &nd->path, nd->seq)))
goto out1;
- if (nd->root.mnt && !(nd->flags & LOOKUP_ROOT)) {
- if (unlikely(!legitimize_path(nd, &nd->root, nd->root_seq)))
- goto out;
- }
+ if (unlikely(!legitimize_path(nd, &nd->path, nd->seq)))
+ goto out;
+ if (unlikely(!legitimize_root(nd)))
+ goto out;
rcu_read_unlock();
BUG_ON(nd->inode != parent->d_inode);
return 0;
-out2:
+out1:
nd->path.mnt = NULL;
nd->path.dentry = NULL;
-out1:
- if (!(nd->flags & LOOKUP_ROOT))
- nd->root.mnt = NULL;
out:
rcu_read_unlock();
return -ECHILD;
@@ -727,23 +728,14 @@ static int unlazy_child(struct nameidata *nd, struct dentry *dentry, unsigned se
*/
if (unlikely(!lockref_get_not_dead(&dentry->d_lockref)))
goto out;
- if (unlikely(read_seqcount_retry(&dentry->d_seq, seq))) {
- rcu_read_unlock();
- dput(dentry);
- goto drop_root_mnt;
- }
+ if (unlikely(read_seqcount_retry(&dentry->d_seq, seq)))
+ goto out_dput;
/*
* Sequence counts matched. Now make sure that the root is
* still valid and get it if required.
*/
- if (nd->root.mnt && !(nd->flags & LOOKUP_ROOT)) {
- if (unlikely(!legitimize_path(nd, &nd->root, nd->root_seq))) {
- rcu_read_unlock();
- dput(dentry);
- return -ECHILD;
- }
- }
-
+ if (unlikely(!legitimize_root(nd)))
+ goto out_dput;
rcu_read_unlock();
return 0;
@@ -753,9 +745,10 @@ out1:
nd->path.dentry = NULL;
out:
rcu_read_unlock();
-drop_root_mnt:
- if (!(nd->flags & LOOKUP_ROOT))
- nd->root.mnt = NULL;
+ return -ECHILD;
+out_dput:
+ rcu_read_unlock();
+ dput(dentry);
return -ECHILD;
}
@@ -819,6 +812,7 @@ static void set_root(struct nameidata *nd)
} while (read_seqcount_retry(&fs->seq, seq));
} else {
get_fs_root(fs, &nd->root);
+ nd->flags |= LOOKUP_ROOT_GRABBED;
}
}
@@ -1735,8 +1729,6 @@ static int pick_link(struct nameidata *nd, struct path *link,
nd->flags &= ~LOOKUP_RCU;
nd->path.mnt = NULL;
nd->path.dentry = NULL;
- if (!(nd->flags & LOOKUP_ROOT))
- nd->root.mnt = NULL;
rcu_read_unlock();
} else if (likely(unlazy_walk(nd)) == 0)
error = nd_alloc_stack(nd);
@@ -2350,7 +2342,7 @@ int filename_lookup(int dfd, struct filename *name, unsigned flags,
retval = path_lookupat(&nd, flags | LOOKUP_REVAL, path);
if (likely(!retval))
- audit_inode(name, path->dentry, flags & LOOKUP_PARENT);
+ audit_inode(name, path->dentry, 0);
restore_nameidata();
putname(name);
return retval;
@@ -2391,7 +2383,7 @@ static struct filename *filename_parentat(int dfd, struct filename *name,
if (likely(!retval)) {
*last = nd.last;
*type = nd.last_type;
- audit_inode(name, parent->dentry, LOOKUP_PARENT);
+ audit_inode(name, parent->dentry, AUDIT_INODE_PARENT);
} else {
putname(name);
name = ERR_PTR(retval);
@@ -2718,7 +2710,7 @@ filename_mountpoint(int dfd, struct filename *name, struct path *path,
if (unlikely(error == -ESTALE))
error = path_mountpoint(&nd, flags | LOOKUP_REVAL, path);
if (likely(!error))
- audit_inode(name, path->dentry, flags & LOOKUP_NO_EVAL);
+ audit_inode(name, path->dentry, AUDIT_INODE_NOEVAL);
restore_nameidata();
putname(name);
return error;
@@ -3299,7 +3291,7 @@ static int do_last(struct nameidata *nd,
if (error)
return error;
- audit_inode(nd->name, dir, LOOKUP_PARENT);
+ audit_inode(nd->name, dir, AUDIT_INODE_PARENT);
/* trailing slashes? */
if (unlikely(nd->last.name[nd->last.len]))
return -EISDIR;
diff --git a/fs/namespace.c b/fs/namespace.c
index d28d30b13043..227f7b343034 100644
--- a/fs/namespace.c
+++ b/fs/namespace.c
@@ -1643,13 +1643,18 @@ static inline bool may_mount(void)
return ns_capable(current->nsproxy->mnt_ns->user_ns, CAP_SYS_ADMIN);
}
+#ifdef CONFIG_MANDATORY_FILE_LOCKING
static inline bool may_mandlock(void)
{
-#ifndef CONFIG_MANDATORY_FILE_LOCKING
- return false;
-#endif
return capable(CAP_SYS_ADMIN);
}
+#else
+static inline bool may_mandlock(void)
+{
+ pr_warn("VFS: \"mand\" mount option not supported");
+ return false;
+}
+#endif
/*
* Now umount can handle mount points as well as block devices.
@@ -1675,8 +1680,6 @@ int ksys_umount(char __user *name, int flags)
if (!(flags & UMOUNT_NOFOLLOW))
lookup_flags |= LOOKUP_FOLLOW;
- lookup_flags |= LOOKUP_NO_EVAL;
-
retval = user_path_mountpoint_at(AT_FDCWD, name, lookup_flags, &path);
if (retval)
goto out;
@@ -3046,7 +3049,7 @@ long do_mount(const char *dev_name, const char __user *dir_name,
return -EINVAL;
/* ... and get the mountpoint */
- retval = user_path(dir_name, &path);
+ retval = user_path_at(AT_FDCWD, dir_name, LOOKUP_FOLLOW, &path);
if (retval)
return retval;
@@ -3593,11 +3596,13 @@ SYSCALL_DEFINE2(pivot_root, const char __user *, new_root,
if (!may_mount())
return -EPERM;
- error = user_path_dir(new_root, &new);
+ error = user_path_at(AT_FDCWD, new_root,
+ LOOKUP_FOLLOW | LOOKUP_DIRECTORY, &new);
if (error)
goto out0;
- error = user_path_dir(put_old, &old);
+ error = user_path_at(AT_FDCWD, put_old,
+ LOOKUP_FOLLOW | LOOKUP_DIRECTORY, &old);
if (error)
goto out1;
diff --git a/fs/nfs/delegation.c b/fs/nfs/delegation.c
index 0ff3facf81da..071b90a45933 100644
--- a/fs/nfs/delegation.c
+++ b/fs/nfs/delegation.c
@@ -153,7 +153,7 @@ again:
/* Block nfs4_proc_unlck */
mutex_lock(&sp->so_delegreturn_mutex);
seq = raw_seqcount_begin(&sp->so_reclaim_seqcount);
- err = nfs4_open_delegation_recall(ctx, state, stateid, type);
+ err = nfs4_open_delegation_recall(ctx, state, stateid);
if (!err)
err = nfs_delegation_claim_locks(state, stateid);
if (!err && read_seqcount_retry(&sp->so_reclaim_seqcount, seq))
@@ -1046,6 +1046,22 @@ void nfs_test_expired_all_delegations(struct nfs_client *clp)
nfs4_schedule_state_manager(clp);
}
+static void
+nfs_delegation_test_free_expired(struct inode *inode,
+ nfs4_stateid *stateid,
+ const struct cred *cred)
+{
+ struct nfs_server *server = NFS_SERVER(inode);
+ const struct nfs4_minor_version_ops *ops = server->nfs_client->cl_mvops;
+ int status;
+
+ if (!cred)
+ return;
+ status = ops->test_and_free_expired(server, stateid, cred);
+ if (status == -NFS4ERR_EXPIRED || status == -NFS4ERR_BAD_STATEID)
+ nfs_remove_bad_delegation(inode, stateid);
+}
+
/**
* nfs_reap_expired_delegations - reap expired delegations
* @clp: nfs_client to process
@@ -1057,7 +1073,6 @@ void nfs_test_expired_all_delegations(struct nfs_client *clp)
*/
void nfs_reap_expired_delegations(struct nfs_client *clp)
{
- const struct nfs4_minor_version_ops *ops = clp->cl_mvops;
struct nfs_delegation *delegation;
struct nfs_server *server;
struct inode *inode;
@@ -1088,11 +1103,7 @@ restart:
nfs4_stateid_copy(&stateid, &delegation->stateid);
clear_bit(NFS_DELEGATION_TEST_EXPIRED, &delegation->flags);
rcu_read_unlock();
- if (cred != NULL &&
- ops->test_and_free_expired(server, &stateid, cred) < 0) {
- nfs_revoke_delegation(inode, &stateid);
- nfs_inode_find_state_and_recover(inode, &stateid);
- }
+ nfs_delegation_test_free_expired(inode, &stateid, cred);
put_cred(cred);
if (nfs4_server_rebooted(clp)) {
nfs_inode_mark_test_expired_delegation(server,inode);
diff --git a/fs/nfs/delegation.h b/fs/nfs/delegation.h
index 5799777df5ec..9eb87ae4c982 100644
--- a/fs/nfs/delegation.h
+++ b/fs/nfs/delegation.h
@@ -63,7 +63,7 @@ void nfs_reap_expired_delegations(struct nfs_client *clp);
/* NFSv4 delegation-related procedures */
int nfs4_proc_delegreturn(struct inode *inode, const struct cred *cred, const nfs4_stateid *stateid, int issync);
-int nfs4_open_delegation_recall(struct nfs_open_context *ctx, struct nfs4_state *state, const nfs4_stateid *stateid, fmode_t type);
+int nfs4_open_delegation_recall(struct nfs_open_context *ctx, struct nfs4_state *state, const nfs4_stateid *stateid);
int nfs4_lock_delegation_recall(struct file_lock *fl, struct nfs4_state *state, const nfs4_stateid *stateid);
bool nfs4_copy_delegation_stateid(struct inode *inode, fmode_t flags, nfs4_stateid *dst, const struct cred **cred);
bool nfs4_refresh_delegation_stateid(nfs4_stateid *dst, struct inode *inode);
diff --git a/fs/nfs/dir.c b/fs/nfs/dir.c
index 8d501093660f..0adfd8840110 100644
--- a/fs/nfs/dir.c
+++ b/fs/nfs/dir.c
@@ -1487,7 +1487,7 @@ static int nfs_finish_open(struct nfs_open_context *ctx,
if (S_ISREG(file->f_path.dentry->d_inode->i_mode))
nfs_file_set_open_context(file, ctx);
else
- err = -ESTALE;
+ err = -EOPENSTALE;
out:
return err;
}
diff --git a/fs/nfs/direct.c b/fs/nfs/direct.c
index 0cb442406168..222d7115db71 100644
--- a/fs/nfs/direct.c
+++ b/fs/nfs/direct.c
@@ -401,15 +401,21 @@ static void nfs_direct_read_completion(struct nfs_pgio_header *hdr)
unsigned long bytes = 0;
struct nfs_direct_req *dreq = hdr->dreq;
- if (test_bit(NFS_IOHDR_REDO, &hdr->flags))
- goto out_put;
-
spin_lock(&dreq->lock);
- if (test_bit(NFS_IOHDR_ERROR, &hdr->flags) && (hdr->good_bytes == 0))
+ if (test_bit(NFS_IOHDR_ERROR, &hdr->flags))
dreq->error = hdr->error;
- else
+
+ if (test_bit(NFS_IOHDR_REDO, &hdr->flags)) {
+ spin_unlock(&dreq->lock);
+ goto out_put;
+ }
+
+ if (hdr->good_bytes != 0)
nfs_direct_good_bytes(dreq, hdr);
+ if (test_bit(NFS_IOHDR_EOF, &hdr->flags))
+ dreq->error = 0;
+
spin_unlock(&dreq->lock);
while (!list_empty(&hdr->pages)) {
@@ -782,16 +788,19 @@ static void nfs_direct_write_completion(struct nfs_pgio_header *hdr)
bool request_commit = false;
struct nfs_page *req = nfs_list_entry(hdr->pages.next);
- if (test_bit(NFS_IOHDR_REDO, &hdr->flags))
- goto out_put;
-
nfs_init_cinfo_from_dreq(&cinfo, dreq);
spin_lock(&dreq->lock);
if (test_bit(NFS_IOHDR_ERROR, &hdr->flags))
dreq->error = hdr->error;
- if (dreq->error == 0) {
+
+ if (test_bit(NFS_IOHDR_REDO, &hdr->flags)) {
+ spin_unlock(&dreq->lock);
+ goto out_put;
+ }
+
+ if (hdr->good_bytes != 0) {
nfs_direct_good_bytes(dreq, hdr);
if (nfs_write_need_commit(hdr)) {
if (dreq->flags == NFS_ODIRECT_RESCHED_WRITES)
diff --git a/fs/nfs/flexfilelayout/flexfilelayout.c b/fs/nfs/flexfilelayout/flexfilelayout.c
index b04e20d28162..5657b7f2611f 100644
--- a/fs/nfs/flexfilelayout/flexfilelayout.c
+++ b/fs/nfs/flexfilelayout/flexfilelayout.c
@@ -8,6 +8,7 @@
*/
#include <linux/nfs_fs.h>
+#include <linux/nfs_mount.h>
#include <linux/nfs_page.h>
#include <linux/module.h>
#include <linux/sched/mm.h>
@@ -928,7 +929,9 @@ retry:
pgm = &pgio->pg_mirrors[0];
pgm->pg_bsize = mirror->mirror_ds->ds_versions[0].rsize;
- pgio->pg_maxretrans = io_maxretrans;
+ if (NFS_SERVER(pgio->pg_inode)->flags &
+ (NFS_MOUNT_SOFT|NFS_MOUNT_SOFTERR))
+ pgio->pg_maxretrans = io_maxretrans;
return;
out_nolseg:
if (pgio->pg_error < 0)
@@ -940,6 +943,7 @@ out_mds:
pgio->pg_lseg);
pnfs_put_lseg(pgio->pg_lseg);
pgio->pg_lseg = NULL;
+ pgio->pg_maxretrans = 0;
nfs_pageio_reset_read_mds(pgio);
}
@@ -1000,7 +1004,9 @@ retry:
pgm->pg_bsize = mirror->mirror_ds->ds_versions[0].wsize;
}
- pgio->pg_maxretrans = io_maxretrans;
+ if (NFS_SERVER(pgio->pg_inode)->flags &
+ (NFS_MOUNT_SOFT|NFS_MOUNT_SOFTERR))
+ pgio->pg_maxretrans = io_maxretrans;
return;
out_mds:
@@ -1010,6 +1016,7 @@ out_mds:
pgio->pg_lseg);
pnfs_put_lseg(pgio->pg_lseg);
pgio->pg_lseg = NULL;
+ pgio->pg_maxretrans = 0;
nfs_pageio_reset_write_mds(pgio);
}
@@ -1148,8 +1155,6 @@ static int ff_layout_async_handle_error_v4(struct rpc_task *task,
break;
case -NFS4ERR_RETRY_UNCACHED_REP:
break;
- case -EAGAIN:
- return -NFS4ERR_RESET_TO_PNFS;
/* Invalidate Layout errors */
case -NFS4ERR_PNFS_NO_LAYOUT:
case -ESTALE: /* mapped NFS4ERR_STALE */
@@ -1210,7 +1215,6 @@ static int ff_layout_async_handle_error_v3(struct rpc_task *task,
case -EBADHANDLE:
case -ELOOP:
case -ENOSPC:
- case -EAGAIN:
break;
case -EJUKEBOX:
nfs_inc_stats(lseg->pls_layout->plh_inode, NFSIOS_DELAY);
@@ -1445,16 +1449,6 @@ static void ff_layout_read_prepare_v4(struct rpc_task *task, void *data)
ff_layout_read_prepare_common(task, hdr);
}
-static void
-ff_layout_io_prepare_transmit(struct rpc_task *task,
- void *data)
-{
- struct nfs_pgio_header *hdr = data;
-
- if (!pnfs_is_valid_lseg(hdr->lseg))
- rpc_exit(task, -EAGAIN);
-}
-
static void ff_layout_read_call_done(struct rpc_task *task, void *data)
{
struct nfs_pgio_header *hdr = data;
@@ -1740,7 +1734,6 @@ static void ff_layout_commit_release(void *data)
static const struct rpc_call_ops ff_layout_read_call_ops_v3 = {
.rpc_call_prepare = ff_layout_read_prepare_v3,
- .rpc_call_prepare_transmit = ff_layout_io_prepare_transmit,
.rpc_call_done = ff_layout_read_call_done,
.rpc_count_stats = ff_layout_read_count_stats,
.rpc_release = ff_layout_read_release,
@@ -1748,7 +1741,6 @@ static const struct rpc_call_ops ff_layout_read_call_ops_v3 = {
static const struct rpc_call_ops ff_layout_read_call_ops_v4 = {
.rpc_call_prepare = ff_layout_read_prepare_v4,
- .rpc_call_prepare_transmit = ff_layout_io_prepare_transmit,
.rpc_call_done = ff_layout_read_call_done,
.rpc_count_stats = ff_layout_read_count_stats,
.rpc_release = ff_layout_read_release,
@@ -1756,7 +1748,6 @@ static const struct rpc_call_ops ff_layout_read_call_ops_v4 = {
static const struct rpc_call_ops ff_layout_write_call_ops_v3 = {
.rpc_call_prepare = ff_layout_write_prepare_v3,
- .rpc_call_prepare_transmit = ff_layout_io_prepare_transmit,
.rpc_call_done = ff_layout_write_call_done,
.rpc_count_stats = ff_layout_write_count_stats,
.rpc_release = ff_layout_write_release,
@@ -1764,7 +1755,6 @@ static const struct rpc_call_ops ff_layout_write_call_ops_v3 = {
static const struct rpc_call_ops ff_layout_write_call_ops_v4 = {
.rpc_call_prepare = ff_layout_write_prepare_v4,
- .rpc_call_prepare_transmit = ff_layout_io_prepare_transmit,
.rpc_call_done = ff_layout_write_call_done,
.rpc_count_stats = ff_layout_write_count_stats,
.rpc_release = ff_layout_write_release,
diff --git a/fs/nfs/fscache.c b/fs/nfs/fscache.c
index 53507aa96b0b..3800ab6f08fa 100644
--- a/fs/nfs/fscache.c
+++ b/fs/nfs/fscache.c
@@ -114,6 +114,10 @@ void nfs_fscache_get_super_cookie(struct super_block *sb, const char *uniq, int
struct rb_node **p, *parent;
int diff;
+ nfss->fscache_key = NULL;
+ nfss->fscache = NULL;
+ if (!(nfss->options & NFS_OPTION_FSCACHE))
+ return;
if (!uniq) {
uniq = "";
ulen = 1;
@@ -226,10 +230,11 @@ void nfs_fscache_release_super_cookie(struct super_block *sb)
void nfs_fscache_init_inode(struct inode *inode)
{
struct nfs_fscache_inode_auxdata auxdata;
+ struct nfs_server *nfss = NFS_SERVER(inode);
struct nfs_inode *nfsi = NFS_I(inode);
nfsi->fscache = NULL;
- if (!S_ISREG(inode->i_mode))
+ if (!(nfss->fscache && S_ISREG(inode->i_mode)))
return;
memset(&auxdata, 0, sizeof(auxdata));
diff --git a/fs/nfs/fscache.h b/fs/nfs/fscache.h
index 25a75e40d91d..ad041cfbf9ec 100644
--- a/fs/nfs/fscache.h
+++ b/fs/nfs/fscache.h
@@ -182,7 +182,7 @@ static inline void nfs_fscache_wait_on_invalidate(struct inode *inode)
*/
static inline const char *nfs_server_fscache_state(struct nfs_server *server)
{
- if (server->fscache && (server->options & NFS_OPTION_FSCACHE))
+ if (server->fscache)
return "yes";
return "no ";
}
diff --git a/fs/nfs/inode.c b/fs/nfs/inode.c
index 8a1758200b57..2a03bfeec10a 100644
--- a/fs/nfs/inode.c
+++ b/fs/nfs/inode.c
@@ -1403,12 +1403,22 @@ static int nfs_check_inode_attributes(struct inode *inode, struct nfs_fattr *fat
if (NFS_PROTO(inode)->have_delegation(inode, FMODE_READ))
return 0;
+ if (!(fattr->valid & NFS_ATTR_FATTR_FILEID)) {
+ /* Only a mounted-on-fileid? Just exit */
+ if (fattr->valid & NFS_ATTR_FATTR_MOUNTED_ON_FILEID)
+ return 0;
/* Has the inode gone and changed behind our back? */
- if ((fattr->valid & NFS_ATTR_FATTR_FILEID) && nfsi->fileid != fattr->fileid)
+ } else if (nfsi->fileid != fattr->fileid) {
+ /* Is this perhaps the mounted-on fileid? */
+ if ((fattr->valid & NFS_ATTR_FATTR_MOUNTED_ON_FILEID) &&
+ nfsi->fileid == fattr->mounted_on_fileid)
+ return 0;
return -ESTALE;
+ }
if ((fattr->valid & NFS_ATTR_FATTR_TYPE) && (inode->i_mode & S_IFMT) != (fattr->mode & S_IFMT))
return -ESTALE;
+
if (!nfs_file_has_buffered_writers(nfsi)) {
/* Verify a few of the more important attributes */
if ((fattr->valid & NFS_ATTR_FATTR_CHANGE) != 0 && !inode_eq_iversion_raw(inode, fattr->change_attr))
@@ -1768,18 +1778,6 @@ int nfs_post_op_update_inode_force_wcc(struct inode *inode, struct nfs_fattr *fa
EXPORT_SYMBOL_GPL(nfs_post_op_update_inode_force_wcc);
-static inline bool nfs_fileid_valid(struct nfs_inode *nfsi,
- struct nfs_fattr *fattr)
-{
- bool ret1 = true, ret2 = true;
-
- if (fattr->valid & NFS_ATTR_FATTR_FILEID)
- ret1 = (nfsi->fileid == fattr->fileid);
- if (fattr->valid & NFS_ATTR_FATTR_MOUNTED_ON_FILEID)
- ret2 = (nfsi->fileid == fattr->mounted_on_fileid);
- return ret1 || ret2;
-}
-
/*
* Many nfs protocol calls return the new file attributes after
* an operation. Here we update the inode to reflect the state
@@ -1810,7 +1808,16 @@ static int nfs_update_inode(struct inode *inode, struct nfs_fattr *fattr)
nfs_display_fhandle_hash(NFS_FH(inode)),
atomic_read(&inode->i_count), fattr->valid);
- if (!nfs_fileid_valid(nfsi, fattr)) {
+ if (!(fattr->valid & NFS_ATTR_FATTR_FILEID)) {
+ /* Only a mounted-on-fileid? Just exit */
+ if (fattr->valid & NFS_ATTR_FATTR_MOUNTED_ON_FILEID)
+ return 0;
+ /* Has the inode gone and changed behind our back? */
+ } else if (nfsi->fileid != fattr->fileid) {
+ /* Is this perhaps the mounted-on fileid? */
+ if ((fattr->valid & NFS_ATTR_FATTR_MOUNTED_ON_FILEID) &&
+ nfsi->fileid == fattr->mounted_on_fileid)
+ return 0;
printk(KERN_ERR "NFS: server %s error: fileid changed\n"
"fsid %s: expected fileid 0x%Lx, got 0x%Lx\n",
NFS_SERVER(inode)->nfs_client->cl_hostname,
diff --git a/fs/nfs/internal.h b/fs/nfs/internal.h
index a2346a2f8361..e64f810223be 100644
--- a/fs/nfs/internal.h
+++ b/fs/nfs/internal.h
@@ -775,3 +775,13 @@ static inline bool nfs_error_is_fatal(int err)
}
}
+static inline bool nfs_error_is_fatal_on_server(int err)
+{
+ switch (err) {
+ case 0:
+ case -ERESTARTSYS:
+ case -EINTR:
+ return false;
+ }
+ return nfs_error_is_fatal(err);
+}
diff --git a/fs/nfs/nfs4_fs.h b/fs/nfs/nfs4_fs.h
index d778dad9a75e..3564da1ba8a1 100644
--- a/fs/nfs/nfs4_fs.h
+++ b/fs/nfs/nfs4_fs.h
@@ -465,7 +465,8 @@ static inline void nfs4_schedule_session_recovery(struct nfs4_session *session,
extern struct nfs4_state_owner *nfs4_get_state_owner(struct nfs_server *, const struct cred *, gfp_t);
extern void nfs4_put_state_owner(struct nfs4_state_owner *);
-extern void nfs4_purge_state_owners(struct nfs_server *);
+extern void nfs4_purge_state_owners(struct nfs_server *, struct list_head *);
+extern void nfs4_free_state_owners(struct list_head *head);
extern struct nfs4_state * nfs4_get_open_state(struct inode *, struct nfs4_state_owner *);
extern void nfs4_put_open_state(struct nfs4_state *);
extern void nfs4_close_state(struct nfs4_state *, fmode_t);
diff --git a/fs/nfs/nfs4client.c b/fs/nfs/nfs4client.c
index 616393a01c06..da6204025a2d 100644
--- a/fs/nfs/nfs4client.c
+++ b/fs/nfs/nfs4client.c
@@ -758,9 +758,12 @@ out:
static void nfs4_destroy_server(struct nfs_server *server)
{
+ LIST_HEAD(freeme);
+
nfs_server_return_all_delegations(server);
unset_pnfs_layoutdriver(server);
- nfs4_purge_state_owners(server);
+ nfs4_purge_state_owners(server, &freeme);
+ nfs4_free_state_owners(&freeme);
}
/*
diff --git a/fs/nfs/nfs4file.c b/fs/nfs/nfs4file.c
index 96db471ca2e5..339663d04bf8 100644
--- a/fs/nfs/nfs4file.c
+++ b/fs/nfs/nfs4file.c
@@ -73,13 +73,13 @@ nfs4_file_open(struct inode *inode, struct file *filp)
if (IS_ERR(inode)) {
err = PTR_ERR(inode);
switch (err) {
- case -EPERM:
- case -EACCES:
- case -EDQUOT:
- case -ENOSPC:
- case -EROFS:
- goto out_put_ctx;
default:
+ goto out_put_ctx;
+ case -ENOENT:
+ case -ESTALE:
+ case -EISDIR:
+ case -ENOTDIR:
+ case -ELOOP:
goto out_drop;
}
}
diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c
index 39896afc6edf..1406858bae6c 100644
--- a/fs/nfs/nfs4proc.c
+++ b/fs/nfs/nfs4proc.c
@@ -1683,6 +1683,14 @@ static void nfs_state_set_open_stateid(struct nfs4_state *state,
write_sequnlock(&state->seqlock);
}
+static void nfs_state_clear_open_state_flags(struct nfs4_state *state)
+{
+ clear_bit(NFS_O_RDWR_STATE, &state->flags);
+ clear_bit(NFS_O_WRONLY_STATE, &state->flags);
+ clear_bit(NFS_O_RDONLY_STATE, &state->flags);
+ clear_bit(NFS_OPEN_STATE, &state->flags);
+}
+
static void nfs_state_set_delegation(struct nfs4_state *state,
const nfs4_stateid *deleg_stateid,
fmode_t fmode)
@@ -1907,8 +1915,9 @@ _nfs4_opendata_reclaim_to_nfs4_state(struct nfs4_opendata *data)
if (data->o_res.delegation_type != 0)
nfs4_opendata_check_deleg(data, state);
update:
- update_open_stateid(state, &data->o_res.stateid, NULL,
- data->o_arg.fmode);
+ if (!update_open_stateid(state, &data->o_res.stateid,
+ NULL, data->o_arg.fmode))
+ return ERR_PTR(-EAGAIN);
refcount_inc(&state->count);
return state;
@@ -1973,8 +1982,11 @@ _nfs4_opendata_to_nfs4_state(struct nfs4_opendata *data)
if (data->o_res.delegation_type != 0)
nfs4_opendata_check_deleg(data, state);
- update_open_stateid(state, &data->o_res.stateid, NULL,
- data->o_arg.fmode);
+ if (!update_open_stateid(state, &data->o_res.stateid,
+ NULL, data->o_arg.fmode)) {
+ nfs4_put_open_state(state);
+ state = ERR_PTR(-EAGAIN);
+ }
out:
nfs_release_seqid(data->o_arg.seqid);
return state;
@@ -2074,13 +2086,7 @@ static int nfs4_open_recover(struct nfs4_opendata *opendata, struct nfs4_state *
{
int ret;
- /* Don't trigger recovery in nfs_test_and_clear_all_open_stateid */
- clear_bit(NFS_O_RDWR_STATE, &state->flags);
- clear_bit(NFS_O_WRONLY_STATE, &state->flags);
- clear_bit(NFS_O_RDONLY_STATE, &state->flags);
/* memory barrier prior to reading state->n_* */
- clear_bit(NFS_DELEGATED_STATE, &state->flags);
- clear_bit(NFS_OPEN_STATE, &state->flags);
smp_rmb();
ret = nfs4_open_recover_helper(opendata, FMODE_READ|FMODE_WRITE);
if (ret != 0)
@@ -2156,6 +2162,8 @@ static int nfs4_open_reclaim(struct nfs4_state_owner *sp, struct nfs4_state *sta
ctx = nfs4_state_find_open_context(state);
if (IS_ERR(ctx))
return -EAGAIN;
+ clear_bit(NFS_DELEGATED_STATE, &state->flags);
+ nfs_state_clear_open_state_flags(state);
ret = nfs4_do_open_reclaim(ctx, state);
put_nfs_open_context(ctx);
return ret;
@@ -2171,18 +2179,17 @@ static int nfs4_handle_delegation_recall_error(struct nfs_server *server, struct
case -ENOENT:
case -EAGAIN:
case -ESTALE:
+ case -ETIMEDOUT:
break;
case -NFS4ERR_BADSESSION:
case -NFS4ERR_BADSLOT:
case -NFS4ERR_BAD_HIGH_SLOT:
case -NFS4ERR_CONN_NOT_BOUND_TO_SESSION:
case -NFS4ERR_DEADSESSION:
- set_bit(NFS_DELEGATED_STATE, &state->flags);
nfs4_schedule_session_recovery(server->nfs_client->cl_session, err);
return -EAGAIN;
case -NFS4ERR_STALE_CLIENTID:
case -NFS4ERR_STALE_STATEID:
- set_bit(NFS_DELEGATED_STATE, &state->flags);
/* Don't recall a delegation if it was lost */
nfs4_schedule_lease_recovery(server->nfs_client);
return -EAGAIN;
@@ -2203,7 +2210,6 @@ static int nfs4_handle_delegation_recall_error(struct nfs_server *server, struct
return -EAGAIN;
case -NFS4ERR_DELAY:
case -NFS4ERR_GRACE:
- set_bit(NFS_DELEGATED_STATE, &state->flags);
ssleep(1);
return -EAGAIN;
case -ENOMEM:
@@ -2219,8 +2225,7 @@ static int nfs4_handle_delegation_recall_error(struct nfs_server *server, struct
}
int nfs4_open_delegation_recall(struct nfs_open_context *ctx,
- struct nfs4_state *state, const nfs4_stateid *stateid,
- fmode_t type)
+ struct nfs4_state *state, const nfs4_stateid *stateid)
{
struct nfs_server *server = NFS_SERVER(state->inode);
struct nfs4_opendata *opendata;
@@ -2231,20 +2236,23 @@ int nfs4_open_delegation_recall(struct nfs_open_context *ctx,
if (IS_ERR(opendata))
return PTR_ERR(opendata);
nfs4_stateid_copy(&opendata->o_arg.u.delegation, stateid);
- nfs_state_clear_delegation(state);
- switch (type & (FMODE_READ|FMODE_WRITE)) {
- case FMODE_READ|FMODE_WRITE:
- case FMODE_WRITE:
+ if (!test_bit(NFS_O_RDWR_STATE, &state->flags)) {
err = nfs4_open_recover_helper(opendata, FMODE_READ|FMODE_WRITE);
if (err)
- break;
+ goto out;
+ }
+ if (!test_bit(NFS_O_WRONLY_STATE, &state->flags)) {
err = nfs4_open_recover_helper(opendata, FMODE_WRITE);
if (err)
- break;
- /* Fall through */
- case FMODE_READ:
+ goto out;
+ }
+ if (!test_bit(NFS_O_RDONLY_STATE, &state->flags)) {
err = nfs4_open_recover_helper(opendata, FMODE_READ);
+ if (err)
+ goto out;
}
+ nfs_state_clear_delegation(state);
+out:
nfs4_opendata_put(opendata);
return nfs4_handle_delegation_recall_error(server, state, stateid, NULL, err);
}
@@ -2492,6 +2500,7 @@ static int nfs4_run_open_task(struct nfs4_opendata *data,
if (!ctx) {
nfs4_init_sequence(&o_arg->seq_args, &o_res->seq_res, 1, 1);
data->is_recover = true;
+ task_setup_data.flags |= RPC_TASK_TIMEOUT;
} else {
nfs4_init_sequence(&o_arg->seq_args, &o_res->seq_res, 1, 0);
pnfs_lgopen_prepare(data, ctx);
@@ -2698,6 +2707,7 @@ static int nfs40_open_expired(struct nfs4_state_owner *sp, struct nfs4_state *st
{
/* NFSv4.0 doesn't allow for delegation recovery on open expire */
nfs40_clear_delegation_stateid(state);
+ nfs_state_clear_open_state_flags(state);
return nfs4_open_expired(sp, state);
}
@@ -2740,13 +2750,13 @@ out_free:
return -NFS4ERR_EXPIRED;
}
-static void nfs41_check_delegation_stateid(struct nfs4_state *state)
+static int nfs41_check_delegation_stateid(struct nfs4_state *state)
{
struct nfs_server *server = NFS_SERVER(state->inode);
nfs4_stateid stateid;
struct nfs_delegation *delegation;
const struct cred *cred = NULL;
- int status;
+ int status, ret = NFS_OK;
/* Get the delegation credential for use by test/free_stateid */
rcu_read_lock();
@@ -2754,20 +2764,15 @@ static void nfs41_check_delegation_stateid(struct nfs4_state *state)
if (delegation == NULL) {
rcu_read_unlock();
nfs_state_clear_delegation(state);
- return;
+ return NFS_OK;
}
nfs4_stateid_copy(&stateid, &delegation->stateid);
- if (test_bit(NFS_DELEGATION_REVOKED, &delegation->flags)) {
- rcu_read_unlock();
- nfs_state_clear_delegation(state);
- return;
- }
if (!test_and_clear_bit(NFS_DELEGATION_TEST_EXPIRED,
&delegation->flags)) {
rcu_read_unlock();
- return;
+ return NFS_OK;
}
if (delegation->cred)
@@ -2777,9 +2782,24 @@ static void nfs41_check_delegation_stateid(struct nfs4_state *state)
trace_nfs4_test_delegation_stateid(state, NULL, status);
if (status == -NFS4ERR_EXPIRED || status == -NFS4ERR_BAD_STATEID)
nfs_finish_clear_delegation_stateid(state, &stateid);
+ else
+ ret = status;
- if (delegation->cred)
- put_cred(cred);
+ put_cred(cred);
+ return ret;
+}
+
+static void nfs41_delegation_recover_stateid(struct nfs4_state *state)
+{
+ nfs4_stateid tmp;
+
+ if (test_bit(NFS_DELEGATED_STATE, &state->flags) &&
+ nfs4_copy_delegation_stateid(state->inode, state->state,
+ &tmp, NULL) &&
+ nfs4_stateid_match_other(&state->stateid, &tmp))
+ nfs_state_set_delegation(state, &tmp, state->state);
+ else
+ nfs_state_clear_delegation(state);
}
/**
@@ -2849,21 +2869,12 @@ static int nfs41_check_open_stateid(struct nfs4_state *state)
const struct cred *cred = state->owner->so_cred;
int status;
- if (test_bit(NFS_OPEN_STATE, &state->flags) == 0) {
- if (test_bit(NFS_DELEGATED_STATE, &state->flags) == 0) {
- if (nfs4_have_delegation(state->inode, state->state))
- return NFS_OK;
- return -NFS4ERR_OPENMODE;
- }
+ if (test_bit(NFS_OPEN_STATE, &state->flags) == 0)
return -NFS4ERR_BAD_STATEID;
- }
status = nfs41_test_and_free_expired_stateid(server, stateid, cred);
trace_nfs4_test_open_stateid(state, NULL, status);
if (status == -NFS4ERR_EXPIRED || status == -NFS4ERR_BAD_STATEID) {
- clear_bit(NFS_O_RDONLY_STATE, &state->flags);
- clear_bit(NFS_O_WRONLY_STATE, &state->flags);
- clear_bit(NFS_O_RDWR_STATE, &state->flags);
- clear_bit(NFS_OPEN_STATE, &state->flags);
+ nfs_state_clear_open_state_flags(state);
stateid->type = NFS4_INVALID_STATEID_TYPE;
return status;
}
@@ -2876,7 +2887,11 @@ static int nfs41_open_expired(struct nfs4_state_owner *sp, struct nfs4_state *st
{
int status;
- nfs41_check_delegation_stateid(state);
+ status = nfs41_check_delegation_stateid(state);
+ if (status != NFS_OK)
+ return status;
+ nfs41_delegation_recover_stateid(state);
+
status = nfs41_check_expired_locks(state);
if (status != NFS_OK)
return status;
@@ -3201,7 +3216,7 @@ static int _nfs4_do_setattr(struct inode *inode,
if (nfs4_copy_delegation_stateid(inode, FMODE_WRITE, &arg->stateid, &delegation_cred)) {
/* Use that stateid */
- } else if (ctx != NULL) {
+ } else if (ctx != NULL && ctx->state) {
struct nfs_lock_context *l_ctx;
if (!nfs4_valid_open_stateid(ctx->state))
return -EBADF;
diff --git a/fs/nfs/nfs4state.c b/fs/nfs/nfs4state.c
index 9afd051a4876..cad4e064b328 100644
--- a/fs/nfs/nfs4state.c
+++ b/fs/nfs/nfs4state.c
@@ -624,24 +624,39 @@ void nfs4_put_state_owner(struct nfs4_state_owner *sp)
/**
* nfs4_purge_state_owners - Release all cached state owners
* @server: nfs_server with cached state owners to release
+ * @head: resulting list of state owners
*
* Called at umount time. Remaining state owners will be on
* the LRU with ref count of zero.
+ * Note that the state owners are not freed, but are added
+ * to the list @head, which can later be used as an argument
+ * to nfs4_free_state_owners.
*/
-void nfs4_purge_state_owners(struct nfs_server *server)
+void nfs4_purge_state_owners(struct nfs_server *server, struct list_head *head)
{
struct nfs_client *clp = server->nfs_client;
struct nfs4_state_owner *sp, *tmp;
- LIST_HEAD(doomed);
spin_lock(&clp->cl_lock);
list_for_each_entry_safe(sp, tmp, &server->state_owners_lru, so_lru) {
- list_move(&sp->so_lru, &doomed);
+ list_move(&sp->so_lru, head);
nfs4_remove_state_owner_locked(sp);
}
spin_unlock(&clp->cl_lock);
+}
- list_for_each_entry_safe(sp, tmp, &doomed, so_lru) {
+/**
+ * nfs4_purge_state_owners - Release all cached state owners
+ * @head: resulting list of state owners
+ *
+ * Frees a list of state owners that was generated by
+ * nfs4_purge_state_owners
+ */
+void nfs4_free_state_owners(struct list_head *head)
+{
+ struct nfs4_state_owner *sp, *tmp;
+
+ list_for_each_entry_safe(sp, tmp, head, so_lru) {
list_del(&sp->so_lru);
nfs4_free_state_owner(sp);
}
@@ -1463,7 +1478,7 @@ void nfs_inode_find_state_and_recover(struct inode *inode,
nfs4_schedule_state_manager(clp);
}
-static void nfs4_state_mark_open_context_bad(struct nfs4_state *state)
+static void nfs4_state_mark_open_context_bad(struct nfs4_state *state, int err)
{
struct inode *inode = state->inode;
struct nfs_inode *nfsi = NFS_I(inode);
@@ -1474,6 +1489,8 @@ static void nfs4_state_mark_open_context_bad(struct nfs4_state *state)
if (ctx->state != state)
continue;
set_bit(NFS_CONTEXT_BAD, &ctx->flags);
+ pr_warn("NFSv4: state recovery failed for open file %pd2, "
+ "error = %d\n", ctx->dentry, err);
}
rcu_read_unlock();
}
@@ -1481,7 +1498,7 @@ static void nfs4_state_mark_open_context_bad(struct nfs4_state *state)
static void nfs4_state_mark_recovery_failed(struct nfs4_state *state, int error)
{
set_bit(NFS_STATE_RECOVERY_FAILED, &state->flags);
- nfs4_state_mark_open_context_bad(state);
+ nfs4_state_mark_open_context_bad(state, error);
}
@@ -1512,6 +1529,7 @@ restart:
switch (status) {
case 0:
break;
+ case -ETIMEDOUT:
case -ESTALE:
case -NFS4ERR_ADMIN_REVOKED:
case -NFS4ERR_STALE_STATEID:
@@ -1605,6 +1623,7 @@ static int __nfs4_reclaim_open_state(struct nfs4_state_owner *sp, struct nfs4_st
static int nfs4_reclaim_open_state(struct nfs4_state_owner *sp, const struct nfs4_state_recovery_ops *ops)
{
struct nfs4_state *state;
+ unsigned int loop = 0;
int status = 0;
/* Note: we rely on the sp->so_states list being ordered
@@ -1631,8 +1650,10 @@ restart:
switch (status) {
default:
- if (status >= 0)
+ if (status >= 0) {
+ loop = 0;
break;
+ }
printk(KERN_ERR "NFS: %s: unhandled error %d\n", __func__, status);
/* Fall through */
case -ENOENT:
@@ -1646,6 +1667,10 @@ restart:
break;
case -EAGAIN:
ssleep(1);
+ if (loop++ < 10) {
+ set_bit(ops->state_flag_bit, &state->flags);
+ break;
+ }
/* Fall through */
case -NFS4ERR_ADMIN_REVOKED:
case -NFS4ERR_STALE_STATEID:
@@ -1658,11 +1683,13 @@ restart:
case -NFS4ERR_EXPIRED:
case -NFS4ERR_NO_GRACE:
nfs4_state_mark_reclaim_nograce(sp->so_server->nfs_client, state);
+ /* Fall through */
case -NFS4ERR_STALE_CLIENTID:
case -NFS4ERR_BADSESSION:
case -NFS4ERR_BADSLOT:
case -NFS4ERR_BAD_HIGH_SLOT:
case -NFS4ERR_CONN_NOT_BOUND_TO_SESSION:
+ case -ETIMEDOUT:
goto out_err;
}
nfs4_put_open_state(state);
@@ -1856,12 +1883,13 @@ static int nfs4_do_reclaim(struct nfs_client *clp, const struct nfs4_state_recov
struct nfs4_state_owner *sp;
struct nfs_server *server;
struct rb_node *pos;
+ LIST_HEAD(freeme);
int status = 0;
restart:
rcu_read_lock();
list_for_each_entry_rcu(server, &clp->cl_superblocks, client_link) {
- nfs4_purge_state_owners(server);
+ nfs4_purge_state_owners(server, &freeme);
spin_lock(&clp->cl_lock);
for (pos = rb_first(&server->state_owners);
pos != NULL;
@@ -1890,6 +1918,7 @@ restart:
spin_unlock(&clp->cl_lock);
}
rcu_read_unlock();
+ nfs4_free_state_owners(&freeme);
return 0;
}
@@ -1945,7 +1974,6 @@ static int nfs4_handle_reclaim_lease_error(struct nfs_client *clp, int status)
return -EPERM;
case -EACCES:
case -NFS4ERR_DELAY:
- case -ETIMEDOUT:
case -EAGAIN:
ssleep(1);
break;
@@ -2574,7 +2602,7 @@ static void nfs4_state_manager(struct nfs_client *clp)
}
/* Now recover expired state... */
- if (test_and_clear_bit(NFS4CLNT_RECLAIM_NOGRACE, &clp->cl_state)) {
+ if (test_bit(NFS4CLNT_RECLAIM_NOGRACE, &clp->cl_state)) {
section = "reclaim nograce";
status = nfs4_do_reclaim(clp,
clp->cl_mvops->nograce_recovery_ops);
@@ -2582,6 +2610,7 @@ static void nfs4_state_manager(struct nfs_client *clp)
continue;
if (status < 0)
goto out_error;
+ clear_bit(NFS4CLNT_RECLAIM_NOGRACE, &clp->cl_state);
}
nfs4_end_drain_session(clp);
diff --git a/fs/nfs/nfstrace.h b/fs/nfs/nfstrace.h
index 976d4089e267..361cc10d6f95 100644
--- a/fs/nfs/nfstrace.h
+++ b/fs/nfs/nfstrace.h
@@ -207,7 +207,6 @@ TRACE_DEFINE_ENUM(LOOKUP_PARENT);
TRACE_DEFINE_ENUM(LOOKUP_REVAL);
TRACE_DEFINE_ENUM(LOOKUP_RCU);
TRACE_DEFINE_ENUM(LOOKUP_NO_REVAL);
-TRACE_DEFINE_ENUM(LOOKUP_NO_EVAL);
TRACE_DEFINE_ENUM(LOOKUP_OPEN);
TRACE_DEFINE_ENUM(LOOKUP_CREATE);
TRACE_DEFINE_ENUM(LOOKUP_EXCL);
@@ -226,7 +225,6 @@ TRACE_DEFINE_ENUM(LOOKUP_DOWN);
{ LOOKUP_REVAL, "REVAL" }, \
{ LOOKUP_RCU, "RCU" }, \
{ LOOKUP_NO_REVAL, "NO_REVAL" }, \
- { LOOKUP_NO_EVAL, "NO_EVAL" }, \
{ LOOKUP_OPEN, "OPEN" }, \
{ LOOKUP_CREATE, "CREATE" }, \
{ LOOKUP_EXCL, "EXCL" }, \
diff --git a/fs/nfs/pagelist.c b/fs/nfs/pagelist.c
index ed4e1b07447b..20b3717cd7ca 100644
--- a/fs/nfs/pagelist.c
+++ b/fs/nfs/pagelist.c
@@ -590,7 +590,7 @@ static void nfs_pgio_rpcsetup(struct nfs_pgio_header *hdr,
}
hdr->res.fattr = &hdr->fattr;
- hdr->res.count = count;
+ hdr->res.count = 0;
hdr->res.eof = 0;
hdr->res.verf = &hdr->verf;
nfs_fattr_init(&hdr->fattr);
@@ -1251,20 +1251,23 @@ static void nfs_pageio_complete_mirror(struct nfs_pageio_descriptor *desc,
int nfs_pageio_resend(struct nfs_pageio_descriptor *desc,
struct nfs_pgio_header *hdr)
{
- LIST_HEAD(failed);
+ LIST_HEAD(pages);
desc->pg_io_completion = hdr->io_completion;
desc->pg_dreq = hdr->dreq;
- while (!list_empty(&hdr->pages)) {
- struct nfs_page *req = nfs_list_entry(hdr->pages.next);
+ list_splice_init(&hdr->pages, &pages);
+ while (!list_empty(&pages)) {
+ struct nfs_page *req = nfs_list_entry(pages.next);
if (!nfs_pageio_add_request(desc, req))
- nfs_list_move_request(req, &failed);
+ break;
}
nfs_pageio_complete(desc);
- if (!list_empty(&failed)) {
- list_move(&failed, &hdr->pages);
- return desc->pg_error < 0 ? desc->pg_error : -EIO;
+ if (!list_empty(&pages)) {
+ int err = desc->pg_error < 0 ? desc->pg_error : -EIO;
+ hdr->completion_ops->error_cleanup(&pages, err);
+ nfs_set_pgio_error(hdr, err, hdr->io_start);
+ return err;
}
return 0;
}
diff --git a/fs/nfs/pnfs.c b/fs/nfs/pnfs.c
index 75bd5b552ba4..4525d5acae38 100644
--- a/fs/nfs/pnfs.c
+++ b/fs/nfs/pnfs.c
@@ -1903,12 +1903,6 @@ lookup_again:
goto out_unlock;
}
- if (!nfs4_valid_open_stateid(ctx->state)) {
- trace_pnfs_update_layout(ino, pos, count, iomode, lo, lseg,
- PNFS_UPDATE_LAYOUT_INVALID_OPEN);
- goto out_unlock;
- }
-
/*
* Choose a stateid for the LAYOUTGET. If we don't have a layout
* stateid, or it has been invalidated, then we must use the open
@@ -1939,6 +1933,7 @@ lookup_again:
iomode == IOMODE_RW ? FMODE_WRITE : FMODE_READ,
NULL, &stateid, NULL);
if (status != 0) {
+ lseg = ERR_PTR(status);
trace_pnfs_update_layout(ino, pos, count,
iomode, lo, lseg,
PNFS_UPDATE_LAYOUT_INVALID_OPEN);
diff --git a/fs/nfs/pnfs_nfs.c b/fs/nfs/pnfs_nfs.c
index c0046c348910..82af4809b869 100644
--- a/fs/nfs/pnfs_nfs.c
+++ b/fs/nfs/pnfs_nfs.c
@@ -627,11 +627,16 @@ static int _nfs4_pnfs_v3_ds_connect(struct nfs_server *mds_srv,
/* Add this address as an alias */
rpc_clnt_add_xprt(clp->cl_rpcclient, &xprt_args,
rpc_clnt_test_and_add_xprt, NULL);
- } else
- clp = get_v3_ds_connect(mds_srv,
- (struct sockaddr *)&da->da_addr,
- da->da_addrlen, IPPROTO_TCP,
- timeo, retrans);
+ continue;
+ }
+ clp = get_v3_ds_connect(mds_srv,
+ (struct sockaddr *)&da->da_addr,
+ da->da_addrlen, IPPROTO_TCP,
+ timeo, retrans);
+ if (IS_ERR(clp))
+ continue;
+ clp->cl_rpcclient->cl_softerr = 0;
+ clp->cl_rpcclient->cl_softrtry = 0;
}
if (IS_ERR(clp)) {
diff --git a/fs/nfs/proc.c b/fs/nfs/proc.c
index 5552fa8b6e12..0f7288b94633 100644
--- a/fs/nfs/proc.c
+++ b/fs/nfs/proc.c
@@ -594,7 +594,8 @@ static int nfs_read_done(struct rpc_task *task, struct nfs_pgio_header *hdr)
/* Emulate the eof flag, which isn't normally needed in NFSv2
* as it is guaranteed to always return the file attributes
*/
- if (hdr->args.offset + hdr->res.count >= hdr->res.fattr->size)
+ if ((hdr->res.count == 0 && hdr->args.count > 0) ||
+ hdr->args.offset + hdr->res.count >= hdr->res.fattr->size)
hdr->res.eof = 1;
}
return 0;
@@ -615,8 +616,10 @@ static int nfs_proc_pgio_rpc_prepare(struct rpc_task *task,
static int nfs_write_done(struct rpc_task *task, struct nfs_pgio_header *hdr)
{
- if (task->tk_status >= 0)
+ if (task->tk_status >= 0) {
+ hdr->res.count = hdr->args.count;
nfs_writeback_update_inode(hdr);
+ }
return 0;
}
diff --git a/fs/nfs/read.c b/fs/nfs/read.c
index c19841c82b6a..cfe0b586eadd 100644
--- a/fs/nfs/read.c
+++ b/fs/nfs/read.c
@@ -91,19 +91,25 @@ void nfs_pageio_reset_read_mds(struct nfs_pageio_descriptor *pgio)
}
EXPORT_SYMBOL_GPL(nfs_pageio_reset_read_mds);
-static void nfs_readpage_release(struct nfs_page *req)
+static void nfs_readpage_release(struct nfs_page *req, int error)
{
struct inode *inode = d_inode(nfs_req_openctx(req)->dentry);
+ struct page *page = req->wb_page;
dprintk("NFS: read done (%s/%llu %d@%lld)\n", inode->i_sb->s_id,
(unsigned long long)NFS_FILEID(inode), req->wb_bytes,
(long long)req_offset(req));
+ if (nfs_error_is_fatal_on_server(error) && error != -ETIMEDOUT)
+ SetPageError(page);
if (nfs_page_group_sync_on_bit(req, PG_UNLOCKPAGE)) {
- if (PageUptodate(req->wb_page))
- nfs_readpage_to_fscache(inode, req->wb_page, 0);
+ struct address_space *mapping = page_file_mapping(page);
- unlock_page(req->wb_page);
+ if (PageUptodate(page))
+ nfs_readpage_to_fscache(inode, page, 0);
+ else if (!PageError(page) && !PagePrivate(page))
+ generic_error_remove_page(mapping, page);
+ unlock_page(page);
}
nfs_release_request(req);
}
@@ -131,7 +137,7 @@ int nfs_readpage_async(struct nfs_open_context *ctx, struct inode *inode,
&nfs_async_read_completion_ops);
if (!nfs_pageio_add_request(&pgio, new)) {
nfs_list_remove_request(new);
- nfs_readpage_release(new);
+ nfs_readpage_release(new, pgio.pg_error);
}
nfs_pageio_complete(&pgio);
@@ -153,6 +159,7 @@ static void nfs_page_group_set_uptodate(struct nfs_page *req)
static void nfs_read_completion(struct nfs_pgio_header *hdr)
{
unsigned long bytes = 0;
+ int error;
if (test_bit(NFS_IOHDR_REDO, &hdr->flags))
goto out;
@@ -179,14 +186,19 @@ static void nfs_read_completion(struct nfs_pgio_header *hdr)
zero_user_segment(page, start, end);
}
}
+ error = 0;
bytes += req->wb_bytes;
if (test_bit(NFS_IOHDR_ERROR, &hdr->flags)) {
if (bytes <= hdr->good_bytes)
nfs_page_group_set_uptodate(req);
+ else {
+ error = hdr->error;
+ xchg(&nfs_req_openctx(req)->error, error);
+ }
} else
nfs_page_group_set_uptodate(req);
nfs_list_remove_request(req);
- nfs_readpage_release(req);
+ nfs_readpage_release(req, error);
}
out:
hdr->release(hdr);
@@ -213,7 +225,7 @@ nfs_async_read_error(struct list_head *head, int error)
while (!list_empty(head)) {
req = nfs_list_entry(head->next);
nfs_list_remove_request(req);
- nfs_readpage_release(req);
+ nfs_readpage_release(req, error);
}
}
@@ -337,8 +349,13 @@ int nfs_readpage(struct file *file, struct page *page)
goto out;
}
+ xchg(&ctx->error, 0);
error = nfs_readpage_async(ctx, inode, page);
-
+ if (!error) {
+ error = wait_on_page_locked_killable(page);
+ if (!PageUptodate(page) && !error)
+ error = xchg(&ctx->error, 0);
+ }
out:
put_nfs_open_context(ctx);
return error;
@@ -372,8 +389,8 @@ readpage_async_filler(void *data, struct page *page)
zero_user_segment(page, len, PAGE_SIZE);
if (!nfs_pageio_add_request(desc->pgio, new)) {
nfs_list_remove_request(new);
- nfs_readpage_release(new);
error = desc->pgio->pg_error;
+ nfs_readpage_release(new, error);
goto out;
}
return 0;
diff --git a/fs/nfs/super.c b/fs/nfs/super.c
index 628631e2e34f..703f595dce90 100644
--- a/fs/nfs/super.c
+++ b/fs/nfs/super.c
@@ -2260,6 +2260,7 @@ nfs_compare_remount_data(struct nfs_server *nfss,
data->acdirmin != nfss->acdirmin / HZ ||
data->acdirmax != nfss->acdirmax / HZ ||
data->timeo != (10U * nfss->client->cl_timeout->to_initval / HZ) ||
+ (data->options & NFS_OPTION_FSCACHE) != (nfss->options & NFS_OPTION_FSCACHE) ||
data->nfs_server.port != nfss->port ||
data->nfs_server.addrlen != nfss->nfs_client->cl_addrlen ||
!rpc_cmp_addr((struct sockaddr *)&data->nfs_server.address,
diff --git a/fs/nfs/write.c b/fs/nfs/write.c
index 92d9cadc6102..85ca49549b39 100644
--- a/fs/nfs/write.c
+++ b/fs/nfs/write.c
@@ -57,6 +57,7 @@ static const struct rpc_call_ops nfs_commit_ops;
static const struct nfs_pgio_completion_ops nfs_async_write_completion_ops;
static const struct nfs_commit_completion_ops nfs_commit_completion_ops;
static const struct nfs_rw_ops nfs_rw_write_ops;
+static void nfs_inode_remove_request(struct nfs_page *req);
static void nfs_clear_request_commit(struct nfs_page *req);
static void nfs_init_cinfo_from_inode(struct nfs_commit_info *cinfo,
struct inode *inode);
@@ -591,23 +592,13 @@ release_request:
static void nfs_write_error(struct nfs_page *req, int error)
{
+ nfs_set_pageerror(page_file_mapping(req->wb_page));
nfs_mapping_set_error(req->wb_page, error);
+ nfs_inode_remove_request(req);
nfs_end_page_writeback(req);
nfs_release_request(req);
}
-static bool
-nfs_error_is_fatal_on_server(int err)
-{
- switch (err) {
- case 0:
- case -ERESTARTSYS:
- case -EINTR:
- return false;
- }
- return nfs_error_is_fatal(err);
-}
-
/*
* Find an associated nfs write request, and prepare to flush it out
* May return an error if the user signalled nfs_wait_on_request().
@@ -615,7 +606,6 @@ nfs_error_is_fatal_on_server(int err)
static int nfs_page_async_flush(struct nfs_pageio_descriptor *pgio,
struct page *page)
{
- struct address_space *mapping;
struct nfs_page *req;
int ret = 0;
@@ -630,12 +620,11 @@ static int nfs_page_async_flush(struct nfs_pageio_descriptor *pgio,
WARN_ON_ONCE(test_bit(PG_CLEAN, &req->wb_flags));
/* If there is a fatal error that covers this write, just exit */
- ret = 0;
- mapping = page_file_mapping(page);
- if (test_bit(AS_ENOSPC, &mapping->flags) ||
- test_bit(AS_EIO, &mapping->flags))
+ ret = pgio->pg_error;
+ if (nfs_error_is_fatal_on_server(ret))
goto out_launder;
+ ret = 0;
if (!nfs_pageio_add_request(pgio, req)) {
ret = pgio->pg_error;
/*
@@ -647,6 +636,7 @@ static int nfs_page_async_flush(struct nfs_pageio_descriptor *pgio,
} else
ret = -EAGAIN;
nfs_redirty_request(req);
+ pgio->pg_error = 0;
} else
nfs_add_stats(page_file_mapping(page)->host,
NFSIOS_WRITEPAGES, 1);
@@ -666,7 +656,7 @@ static int nfs_do_writepage(struct page *page, struct writeback_control *wbc,
ret = nfs_page_async_flush(pgio, page);
if (ret == -EAGAIN) {
redirty_page_for_writepage(wbc, page);
- ret = 0;
+ ret = AOP_WRITEPAGE_ACTIVATE;
}
return ret;
}
@@ -685,10 +675,11 @@ static int nfs_writepage_locked(struct page *page,
nfs_pageio_init_write(&pgio, inode, 0,
false, &nfs_async_write_completion_ops);
err = nfs_do_writepage(page, wbc, &pgio);
+ pgio.pg_error = 0;
nfs_pageio_complete(&pgio);
if (err < 0)
return err;
- if (pgio.pg_error < 0)
+ if (nfs_error_is_fatal(pgio.pg_error))
return pgio.pg_error;
return 0;
}
@@ -698,7 +689,8 @@ int nfs_writepage(struct page *page, struct writeback_control *wbc)
int ret;
ret = nfs_writepage_locked(page, wbc);
- unlock_page(page);
+ if (ret != AOP_WRITEPAGE_ACTIVATE)
+ unlock_page(page);
return ret;
}
@@ -707,7 +699,8 @@ static int nfs_writepages_callback(struct page *page, struct writeback_control *
int ret;
ret = nfs_do_writepage(page, wbc, data);
- unlock_page(page);
+ if (ret != AOP_WRITEPAGE_ACTIVATE)
+ unlock_page(page);
return ret;
}
@@ -733,13 +726,14 @@ int nfs_writepages(struct address_space *mapping, struct writeback_control *wbc)
&nfs_async_write_completion_ops);
pgio.pg_io_completion = ioc;
err = write_cache_pages(mapping, wbc, nfs_writepages_callback, &pgio);
+ pgio.pg_error = 0;
nfs_pageio_complete(&pgio);
nfs_io_completion_put(ioc);
if (err < 0)
goto out_err;
err = pgio.pg_error;
- if (err < 0)
+ if (nfs_error_is_fatal(err))
goto out_err;
return 0;
out_err:
diff --git a/fs/nfsd/nfscache.c b/fs/nfsd/nfscache.c
index 26ad75ae2be0..96352ab7bd81 100644
--- a/fs/nfsd/nfscache.c
+++ b/fs/nfsd/nfscache.c
@@ -571,7 +571,7 @@ nfsd_cache_append(struct svc_rqst *rqstp, struct kvec *data)
*/
static int nfsd_reply_cache_stats_show(struct seq_file *m, void *v)
{
- struct nfsd_net *nn = v;
+ struct nfsd_net *nn = m->private;
seq_printf(m, "max entries: %u\n", nn->max_drc_entries);
seq_printf(m, "num entries: %u\n",
diff --git a/fs/nfsd/nfsctl.c b/fs/nfsd/nfsctl.c
index 13c548733860..2c215171c0eb 100644
--- a/fs/nfsd/nfsctl.c
+++ b/fs/nfsd/nfsctl.c
@@ -1171,13 +1171,17 @@ static struct inode *nfsd_get_inode(struct super_block *sb, umode_t mode)
return inode;
}
-static int __nfsd_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode)
+static int __nfsd_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode, struct nfsdfs_client *ncl)
{
struct inode *inode;
inode = nfsd_get_inode(dir->i_sb, mode);
if (!inode)
return -ENOMEM;
+ if (ncl) {
+ inode->i_private = ncl;
+ kref_get(&ncl->cl_ref);
+ }
d_add(dentry, inode);
inc_nlink(dir);
fsnotify_mkdir(dir, dentry);
@@ -1194,17 +1198,14 @@ static struct dentry *nfsd_mkdir(struct dentry *parent, struct nfsdfs_client *nc
dentry = d_alloc_name(parent, name);
if (!dentry)
goto out_err;
- ret = __nfsd_mkdir(d_inode(parent), dentry, S_IFDIR | 0600);
+ ret = __nfsd_mkdir(d_inode(parent), dentry, S_IFDIR | 0600, ncl);
if (ret)
goto out_err;
- if (ncl) {
- d_inode(dentry)->i_private = ncl;
- kref_get(&ncl->cl_ref);
- }
out:
inode_unlock(dir);
return dentry;
out_err:
+ dput(dentry);
dentry = ERR_PTR(ret);
goto out;
}
@@ -1214,11 +1215,9 @@ static void clear_ncl(struct inode *inode)
struct nfsdfs_client *ncl = inode->i_private;
inode->i_private = NULL;
- synchronize_rcu();
kref_put(&ncl->cl_ref, ncl->cl_release);
}
-
static struct nfsdfs_client *__get_nfsdfs_client(struct inode *inode)
{
struct nfsdfs_client *nc = inode->i_private;
@@ -1232,9 +1231,9 @@ struct nfsdfs_client *get_nfsdfs_client(struct inode *inode)
{
struct nfsdfs_client *nc;
- rcu_read_lock();
+ inode_lock_shared(inode);
nc = __get_nfsdfs_client(inode);
- rcu_read_unlock();
+ inode_unlock_shared(inode);
return nc;
}
/* from __rpc_unlink */
@@ -1386,8 +1385,7 @@ static int nfsd_fill_super(struct super_block *sb, struct fs_context *fc)
static int nfsd_fs_get_tree(struct fs_context *fc)
{
- fc->s_fs_info = get_net(fc->net_ns);
- return vfs_get_super(fc, vfs_get_keyed_super, nfsd_fill_super);
+ return get_tree_keyed(fc, nfsd_fill_super, get_net(fc->net_ns));
}
static void nfsd_fs_free_fc(struct fs_context *fc)
diff --git a/fs/orangefs/file.c b/fs/orangefs/file.c
index 960f9a3c012d..a5612abc0936 100644
--- a/fs/orangefs/file.c
+++ b/fs/orangefs/file.c
@@ -555,7 +555,7 @@ static int orangefs_fsync(struct file *file,
* Change the file pointer position for an instance of an open file.
*
* \note If .llseek is overriden, we must acquire lock as described in
- * Documentation/filesystems/Locking.
+ * Documentation/filesystems/locking.rst.
*
* Future upgrade could support SEEK_DATA and SEEK_HOLE but would
* require much changes to the FS
diff --git a/fs/orangefs/orangefs-kernel.h b/fs/orangefs/orangefs-kernel.h
index 572dd29fbd54..34a6c99fa29b 100644
--- a/fs/orangefs/orangefs-kernel.h
+++ b/fs/orangefs/orangefs-kernel.h
@@ -246,7 +246,7 @@ struct orangefs_read_options {
extern struct orangefs_stats orangefs_stats;
/*
- * NOTE: See Documentation/filesystems/porting for information
+ * NOTE: See Documentation/filesystems/porting.rst for information
* on implementing FOO_I and properly accessing fs private data
*/
static inline struct orangefs_inode_s *ORANGEFS_I(struct inode *inode)
diff --git a/fs/proc/root.c b/fs/proc/root.c
index 33f72d1b92cc..0b7c8dffc9ae 100644
--- a/fs/proc/root.c
+++ b/fs/proc/root.c
@@ -157,8 +157,7 @@ static int proc_get_tree(struct fs_context *fc)
{
struct proc_fs_context *ctx = fc->fs_private;
- fc->s_fs_info = ctx->pid_ns;
- return vfs_get_super(fc, vfs_get_keyed_super, proc_fill_super);
+ return get_tree_keyed(fc, proc_fill_super, ctx->pid_ns);
}
static void proc_fs_context_free(struct fs_context *fc)
diff --git a/fs/read_write.c b/fs/read_write.c
index 1f5088dec566..5bbf587f5bc1 100644
--- a/fs/read_write.c
+++ b/fs/read_write.c
@@ -1811,10 +1811,7 @@ static int generic_remap_check_len(struct inode *inode_in,
return (remap_flags & REMAP_FILE_DEDUP) ? -EBADE : -EINVAL;
}
-/*
- * Read a page's worth of file data into the page cache. Return the page
- * locked.
- */
+/* Read a page's worth of file data into the page cache. */
static struct page *vfs_dedupe_get_page(struct inode *inode, loff_t offset)
{
struct page *page;
@@ -1826,11 +1823,33 @@ static struct page *vfs_dedupe_get_page(struct inode *inode, loff_t offset)
put_page(page);
return ERR_PTR(-EIO);
}
- lock_page(page);
return page;
}
/*
+ * Lock two pages, ensuring that we lock in offset order if the pages are from
+ * the same file.
+ */
+static void vfs_lock_two_pages(struct page *page1, struct page *page2)
+{
+ /* Always lock in order of increasing index. */
+ if (page1->index > page2->index)
+ swap(page1, page2);
+
+ lock_page(page1);
+ if (page1 != page2)
+ lock_page(page2);
+}
+
+/* Unlock two pages, being careful not to unlock the same page twice. */
+static void vfs_unlock_two_pages(struct page *page1, struct page *page2)
+{
+ unlock_page(page1);
+ if (page1 != page2)
+ unlock_page(page2);
+}
+
+/*
* Compare extents of two files to see if they are the same.
* Caller must have locked both inodes to prevent write races.
*/
@@ -1867,10 +1886,24 @@ static int vfs_dedupe_file_range_compare(struct inode *src, loff_t srcoff,
dest_page = vfs_dedupe_get_page(dest, destoff);
if (IS_ERR(dest_page)) {
error = PTR_ERR(dest_page);
- unlock_page(src_page);
put_page(src_page);
goto out_error;
}
+
+ vfs_lock_two_pages(src_page, dest_page);
+
+ /*
+ * Now that we've locked both pages, make sure they're still
+ * mapped to the file data we're interested in. If not,
+ * someone is invalidating pages on us and we lose.
+ */
+ if (!PageUptodate(src_page) || !PageUptodate(dest_page) ||
+ src_page->mapping != src->i_mapping ||
+ dest_page->mapping != dest->i_mapping) {
+ same = false;
+ goto unlock;
+ }
+
src_addr = kmap_atomic(src_page);
dest_addr = kmap_atomic(dest_page);
@@ -1882,8 +1915,8 @@ static int vfs_dedupe_file_range_compare(struct inode *src, loff_t srcoff,
kunmap_atomic(dest_addr);
kunmap_atomic(src_addr);
- unlock_page(dest_page);
- unlock_page(src_page);
+unlock:
+ vfs_unlock_two_pages(src_page, dest_page);
put_page(dest_page);
put_page(src_page);
diff --git a/fs/seq_file.c b/fs/seq_file.c
index 04f09689cd6d..1600034a929b 100644
--- a/fs/seq_file.c
+++ b/fs/seq_file.c
@@ -119,6 +119,7 @@ static int traverse(struct seq_file *m, loff_t offset)
}
if (seq_has_overflowed(m))
goto Eoverflow;
+ p = m->op->next(m, p, &m->index);
if (pos + m->count > offset) {
m->from = offset - pos;
m->count -= m->from;
@@ -126,7 +127,6 @@ static int traverse(struct seq_file *m, loff_t offset)
}
pos += m->count;
m->count = 0;
- p = m->op->next(m, p, &m->index);
if (pos == offset)
break;
}
diff --git a/fs/super.c b/fs/super.c
index e486a442a61f..9459ba75a32e 100644
--- a/fs/super.c
+++ b/fs/super.c
@@ -1213,7 +1213,18 @@ int get_tree_single(struct fs_context *fc,
}
EXPORT_SYMBOL(get_tree_single);
+int get_tree_keyed(struct fs_context *fc,
+ int (*fill_super)(struct super_block *sb,
+ struct fs_context *fc),
+ void *key)
+{
+ fc->s_fs_info = key;
+ return vfs_get_super(fc, vfs_get_keyed_super, fill_super);
+}
+EXPORT_SYMBOL(get_tree_keyed);
+
#ifdef CONFIG_BLOCK
+
static int set_bdev_super(struct super_block *s, void *data)
{
s->s_bdev = data;
@@ -1223,6 +1234,99 @@ static int set_bdev_super(struct super_block *s, void *data)
return 0;
}
+static int set_bdev_super_fc(struct super_block *s, struct fs_context *fc)
+{
+ return set_bdev_super(s, fc->sget_key);
+}
+
+static int test_bdev_super_fc(struct super_block *s, struct fs_context *fc)
+{
+ return s->s_bdev == fc->sget_key;
+}
+
+/**
+ * get_tree_bdev - Get a superblock based on a single block device
+ * @fc: The filesystem context holding the parameters
+ * @fill_super: Helper to initialise a new superblock
+ */
+int get_tree_bdev(struct fs_context *fc,
+ int (*fill_super)(struct super_block *,
+ struct fs_context *))
+{
+ struct block_device *bdev;
+ struct super_block *s;
+ fmode_t mode = FMODE_READ | FMODE_EXCL;
+ int error = 0;
+
+ if (!(fc->sb_flags & SB_RDONLY))
+ mode |= FMODE_WRITE;
+
+ if (!fc->source)
+ return invalf(fc, "No source specified");
+
+ bdev = blkdev_get_by_path(fc->source, mode, fc->fs_type);
+ if (IS_ERR(bdev)) {
+ errorf(fc, "%s: Can't open blockdev", fc->source);
+ return PTR_ERR(bdev);
+ }
+
+ /* Once the superblock is inserted into the list by sget_fc(), s_umount
+ * will protect the lockfs code from trying to start a snapshot while
+ * we are mounting
+ */
+ mutex_lock(&bdev->bd_fsfreeze_mutex);
+ if (bdev->bd_fsfreeze_count > 0) {
+ mutex_unlock(&bdev->bd_fsfreeze_mutex);
+ warnf(fc, "%pg: Can't mount, blockdev is frozen", bdev);
+ return -EBUSY;
+ }
+
+ fc->sb_flags |= SB_NOSEC;
+ fc->sget_key = bdev;
+ s = sget_fc(fc, test_bdev_super_fc, set_bdev_super_fc);
+ mutex_unlock(&bdev->bd_fsfreeze_mutex);
+ if (IS_ERR(s))
+ return PTR_ERR(s);
+
+ if (s->s_root) {
+ /* Don't summarily change the RO/RW state. */
+ if ((fc->sb_flags ^ s->s_flags) & SB_RDONLY) {
+ warnf(fc, "%pg: Can't mount, would change RO state", bdev);
+ deactivate_locked_super(s);
+ blkdev_put(bdev, mode);
+ return -EBUSY;
+ }
+
+ /*
+ * s_umount nests inside bd_mutex during
+ * __invalidate_device(). blkdev_put() acquires
+ * bd_mutex and can't be called under s_umount. Drop
+ * s_umount temporarily. This is safe as we're
+ * holding an active reference.
+ */
+ up_write(&s->s_umount);
+ blkdev_put(bdev, mode);
+ down_write(&s->s_umount);
+ } else {
+ s->s_mode = mode;
+ snprintf(s->s_id, sizeof(s->s_id), "%pg", bdev);
+ sb_set_blocksize(s, block_size(bdev));
+ error = fill_super(s, fc);
+ if (error) {
+ deactivate_locked_super(s);
+ return error;
+ }
+
+ s->s_flags |= SB_ACTIVE;
+ bdev->bd_super = s;
+ }
+
+ BUG_ON(fc->root);
+ fc->root = dget(s->s_root);
+ return 0;
+}
+EXPORT_SYMBOL(get_tree_bdev);
+
static int test_bdev_super(struct super_block *s, void *data)
{
return (void *)s->s_bdev == data;
diff --git a/fs/timerfd.c b/fs/timerfd.c
index 6a6fc8aa1de7..48305ba41e3c 100644
--- a/fs/timerfd.c
+++ b/fs/timerfd.c
@@ -471,7 +471,11 @@ static int do_timerfd_settime(int ufd, int flags,
break;
}
spin_unlock_irq(&ctx->wqh.lock);
- cpu_relax();
+
+ if (isalarm(ctx))
+ hrtimer_cancel_wait_running(&ctx->t.alarm.timer);
+ else
+ hrtimer_cancel_wait_running(&ctx->t.tmr);
}
/*
diff --git a/fs/ubifs/budget.c b/fs/ubifs/budget.c
index 80d7301ab76d..c0b84e960b20 100644
--- a/fs/ubifs/budget.c
+++ b/fs/ubifs/budget.c
@@ -51,7 +51,7 @@
static void shrink_liability(struct ubifs_info *c, int nr_to_write)
{
down_read(&c->vfs_sb->s_umount);
- writeback_inodes_sb(c->vfs_sb, WB_REASON_FS_FREE_SPACE);
+ writeback_inodes_sb_nr(c->vfs_sb, nr_to_write, WB_REASON_FS_FREE_SPACE);
up_read(&c->vfs_sb->s_umount);
}
diff --git a/fs/ubifs/orphan.c b/fs/ubifs/orphan.c
index b52624e28fa1..3b4b4114f208 100644
--- a/fs/ubifs/orphan.c
+++ b/fs/ubifs/orphan.c
@@ -129,7 +129,6 @@ static void __orphan_drop(struct ubifs_info *c, struct ubifs_orphan *o)
static void orphan_delete(struct ubifs_info *c, struct ubifs_orphan *orph)
{
if (orph->del) {
- spin_unlock(&c->orphan_lock);
dbg_gen("deleted twice ino %lu", orph->inum);
return;
}
@@ -138,7 +137,6 @@ static void orphan_delete(struct ubifs_info *c, struct ubifs_orphan *orph)
orph->del = 1;
orph->dnext = c->orph_dnext;
c->orph_dnext = orph;
- spin_unlock(&c->orphan_lock);
dbg_gen("delete later ino %lu", orph->inum);
return;
}
diff --git a/fs/ubifs/super.c b/fs/ubifs/super.c
index 3ad6620f14fb..5e1e8ec0589e 100644
--- a/fs/ubifs/super.c
+++ b/fs/ubifs/super.c
@@ -619,6 +619,10 @@ static int init_constants_early(struct ubifs_info *c)
c->max_bu_buf_len = UBIFS_MAX_BULK_READ * UBIFS_MAX_DATA_NODE_SZ;
if (c->max_bu_buf_len > c->leb_size)
c->max_bu_buf_len = c->leb_size;
+
+ /* Log is ready, preserve one LEB for commits. */
+ c->min_log_bytes = c->leb_size;
+
return 0;
}
diff --git a/fs/ufs/Kconfig b/fs/ufs/Kconfig
index fcb41516ea59..6d30adb6b890 100644
--- a/fs/ufs/Kconfig
+++ b/fs/ufs/Kconfig
@@ -9,7 +9,7 @@ config UFS_FS
this file system as well. Saying Y here will allow you to read from
these partitions; if you also want to write to them, say Y to the
experimental "UFS file system write support", below. Please read the
- file <file:Documentation/filesystems/ufs.txt> for more information.
+ file <file:Documentation/admin-guide/ufs.rst> for more information.
The recently released UFS2 variant (used in FreeBSD 5.x) is
READ-ONLY supported.
diff --git a/fs/userfaultfd.c b/fs/userfaultfd.c
index ccbdbd62f0d8..fe6d804a38dc 100644
--- a/fs/userfaultfd.c
+++ b/fs/userfaultfd.c
@@ -880,6 +880,7 @@ static int userfaultfd_release(struct inode *inode, struct file *file)
/* len == 0 means wake all */
struct userfaultfd_wake_range range = { .len = 0, };
unsigned long new_flags;
+ bool still_valid;
WRITE_ONCE(ctx->released, true);
@@ -895,8 +896,7 @@ static int userfaultfd_release(struct inode *inode, struct file *file)
* taking the mmap_sem for writing.
*/
down_write(&mm->mmap_sem);
- if (!mmget_still_valid(mm))
- goto skip_mm;
+ still_valid = mmget_still_valid(mm);
prev = NULL;
for (vma = mm->mmap; vma; vma = vma->vm_next) {
cond_resched();
@@ -907,19 +907,20 @@ static int userfaultfd_release(struct inode *inode, struct file *file)
continue;
}
new_flags = vma->vm_flags & ~(VM_UFFD_MISSING | VM_UFFD_WP);
- prev = vma_merge(mm, prev, vma->vm_start, vma->vm_end,
- new_flags, vma->anon_vma,
- vma->vm_file, vma->vm_pgoff,
- vma_policy(vma),
- NULL_VM_UFFD_CTX);
- if (prev)
- vma = prev;
- else
- prev = vma;
+ if (still_valid) {
+ prev = vma_merge(mm, prev, vma->vm_start, vma->vm_end,
+ new_flags, vma->anon_vma,
+ vma->vm_file, vma->vm_pgoff,
+ vma_policy(vma),
+ NULL_VM_UFFD_CTX);
+ if (prev)
+ vma = prev;
+ else
+ prev = vma;
+ }
vma->vm_flags = new_flags;
vma->vm_userfaultfd_ctx = NULL_VM_UFFD_CTX;
}
-skip_mm:
up_write(&mm->mmap_sem);
mmput(mm);
wakeup:
diff --git a/fs/xfs/libxfs/xfs_bmap.c b/fs/xfs/libxfs/xfs_bmap.c
index baf0b72c0a37..07aad70f3931 100644
--- a/fs/xfs/libxfs/xfs_bmap.c
+++ b/fs/xfs/libxfs/xfs_bmap.c
@@ -3835,15 +3835,28 @@ xfs_bmapi_read(
XFS_STATS_INC(mp, xs_blk_mapr);
ifp = XFS_IFORK_PTR(ip, whichfork);
+ if (!ifp) {
+ /* No CoW fork? Return a hole. */
+ if (whichfork == XFS_COW_FORK) {
+ mval->br_startoff = bno;
+ mval->br_startblock = HOLESTARTBLOCK;
+ mval->br_blockcount = len;
+ mval->br_state = XFS_EXT_NORM;
+ *nmap = 1;
+ return 0;
+ }
- /* No CoW fork? Return a hole. */
- if (whichfork == XFS_COW_FORK && !ifp) {
- mval->br_startoff = bno;
- mval->br_startblock = HOLESTARTBLOCK;
- mval->br_blockcount = len;
- mval->br_state = XFS_EXT_NORM;
- *nmap = 1;
- return 0;
+ /*
+ * A missing attr ifork implies that the inode says we're in
+ * extents or btree format but failed to pass the inode fork
+ * verifier while trying to load it. Treat that as a file
+ * corruption too.
+ */
+#ifdef DEBUG
+ xfs_alert(mp, "%s: inode %llu missing fork %d",
+ __func__, ip->i_ino, whichfork);
+#endif /* DEBUG */
+ return -EFSCORRUPTED;
}
if (!(ifp->if_flags & XFS_IFEXTENTS)) {
diff --git a/fs/xfs/libxfs/xfs_da_btree.c b/fs/xfs/libxfs/xfs_da_btree.c
index d1c77fd0815d..0bf56e94bfe9 100644
--- a/fs/xfs/libxfs/xfs_da_btree.c
+++ b/fs/xfs/libxfs/xfs_da_btree.c
@@ -487,10 +487,8 @@ xfs_da3_split(
ASSERT(state->path.active == 0);
oldblk = &state->path.blk[0];
error = xfs_da3_root_split(state, oldblk, addblk);
- if (error) {
- addblk->bp = NULL;
- return error; /* GROT: dir is inconsistent */
- }
+ if (error)
+ goto out;
/*
* Update pointers to the node which used to be block 0 and just got
@@ -505,7 +503,10 @@ xfs_da3_split(
*/
node = oldblk->bp->b_addr;
if (node->hdr.info.forw) {
- ASSERT(be32_to_cpu(node->hdr.info.forw) == addblk->blkno);
+ if (be32_to_cpu(node->hdr.info.forw) != addblk->blkno) {
+ error = -EFSCORRUPTED;
+ goto out;
+ }
node = addblk->bp->b_addr;
node->hdr.info.back = cpu_to_be32(oldblk->blkno);
xfs_trans_log_buf(state->args->trans, addblk->bp,
@@ -514,15 +515,19 @@ xfs_da3_split(
}
node = oldblk->bp->b_addr;
if (node->hdr.info.back) {
- ASSERT(be32_to_cpu(node->hdr.info.back) == addblk->blkno);
+ if (be32_to_cpu(node->hdr.info.back) != addblk->blkno) {
+ error = -EFSCORRUPTED;
+ goto out;
+ }
node = addblk->bp->b_addr;
node->hdr.info.forw = cpu_to_be32(oldblk->blkno);
xfs_trans_log_buf(state->args->trans, addblk->bp,
XFS_DA_LOGRANGE(node, &node->hdr.info,
sizeof(node->hdr.info)));
}
+out:
addblk->bp = NULL;
- return 0;
+ return error;
}
/*
diff --git a/fs/xfs/libxfs/xfs_dir2_node.c b/fs/xfs/libxfs/xfs_dir2_node.c
index afcc6642690a..1fc44efc344d 100644
--- a/fs/xfs/libxfs/xfs_dir2_node.c
+++ b/fs/xfs/libxfs/xfs_dir2_node.c
@@ -741,7 +741,8 @@ xfs_dir2_leafn_lookup_for_entry(
ents = dp->d_ops->leaf_ents_p(leaf);
xfs_dir3_leaf_check(dp, bp);
- ASSERT(leafhdr.count > 0);
+ if (leafhdr.count <= 0)
+ return -EFSCORRUPTED;
/*
* Look up the hash value in the leaf entries.
diff --git a/fs/xfs/xfs_ioctl.c b/fs/xfs/xfs_ioctl.c
index 6f7848cd5527..affa557c2337 100644
--- a/fs/xfs/xfs_ioctl.c
+++ b/fs/xfs/xfs_ioctl.c
@@ -67,7 +67,7 @@ xfs_find_handle(
return -EBADF;
inode = file_inode(f.file);
} else {
- error = user_lpath((const char __user *)hreq->path, &path);
+ error = user_path_at(AT_FDCWD, hreq->path, 0, &path);
if (error)
return error;
inode = d_inode(path.dentry);
diff --git a/fs/xfs/xfs_ioctl32.c b/fs/xfs/xfs_ioctl32.c
index 7fcf7569743f..7bd7534f5051 100644
--- a/fs/xfs/xfs_ioctl32.c
+++ b/fs/xfs/xfs_ioctl32.c
@@ -547,63 +547,12 @@ xfs_file_compat_ioctl(
struct inode *inode = file_inode(filp);
struct xfs_inode *ip = XFS_I(inode);
struct xfs_mount *mp = ip->i_mount;
- void __user *arg = (void __user *)p;
+ void __user *arg = compat_ptr(p);
int error;
trace_xfs_file_compat_ioctl(ip);
switch (cmd) {
- /* No size or alignment issues on any arch */
- case XFS_IOC_DIOINFO:
- case XFS_IOC_FSGEOMETRY_V4:
- case XFS_IOC_FSGEOMETRY:
- case XFS_IOC_AG_GEOMETRY:
- case XFS_IOC_FSGETXATTR:
- case XFS_IOC_FSSETXATTR:
- case XFS_IOC_FSGETXATTRA:
- case XFS_IOC_FSSETDM:
- case XFS_IOC_GETBMAP:
- case XFS_IOC_GETBMAPA:
- case XFS_IOC_GETBMAPX:
- case XFS_IOC_FSCOUNTS:
- case XFS_IOC_SET_RESBLKS:
- case XFS_IOC_GET_RESBLKS:
- case XFS_IOC_FSGROWFSLOG:
- case XFS_IOC_GOINGDOWN:
- case XFS_IOC_ERROR_INJECTION:
- case XFS_IOC_ERROR_CLEARALL:
- case FS_IOC_GETFSMAP:
- case XFS_IOC_SCRUB_METADATA:
- case XFS_IOC_BULKSTAT:
- case XFS_IOC_INUMBERS:
- return xfs_file_ioctl(filp, cmd, p);
-#if !defined(BROKEN_X86_ALIGNMENT) || defined(CONFIG_X86_X32)
- /*
- * These are handled fine if no alignment issues. To support x32
- * which uses native 64-bit alignment we must emit these cases in
- * addition to the ia-32 compat set below.
- */
- case XFS_IOC_ALLOCSP:
- case XFS_IOC_FREESP:
- case XFS_IOC_RESVSP:
- case XFS_IOC_UNRESVSP:
- case XFS_IOC_ALLOCSP64:
- case XFS_IOC_FREESP64:
- case XFS_IOC_RESVSP64:
- case XFS_IOC_UNRESVSP64:
- case XFS_IOC_FSGEOMETRY_V1:
- case XFS_IOC_FSGROWFSDATA:
- case XFS_IOC_FSGROWFSRT:
- case XFS_IOC_ZERO_RANGE:
-#ifdef CONFIG_X86_X32
- /*
- * x32 special: this gets a different cmd number from the ia-32 compat
- * case below; the associated data will match native 64-bit alignment.
- */
- case XFS_IOC_SWAPEXT:
-#endif
- return xfs_file_ioctl(filp, cmd, p);
-#endif
#if defined(BROKEN_X86_ALIGNMENT)
case XFS_IOC_ALLOCSP_32:
case XFS_IOC_FREESP_32:
@@ -705,6 +654,7 @@ xfs_file_compat_ioctl(
case XFS_IOC_FSSETDM_BY_HANDLE_32:
return xfs_compat_fssetdm_by_handle(filp, arg);
default:
- return -ENOIOCTLCMD;
+ /* try the native version */
+ return xfs_file_ioctl(filp, cmd, (unsigned long)arg);
}
}
diff --git a/fs/xfs/xfs_iops.c b/fs/xfs/xfs_iops.c
index ff3c1fae5357..fe285d123d69 100644
--- a/fs/xfs/xfs_iops.c
+++ b/fs/xfs/xfs_iops.c
@@ -793,6 +793,7 @@ xfs_setattr_nonsize(
out_cancel:
xfs_trans_cancel(tp);
+ xfs_iunlock(ip, XFS_ILOCK_EXCL);
out_dqrele:
xfs_qm_dqrele(udqp);
xfs_qm_dqrele(gdqp);
diff --git a/fs/xfs/xfs_log.c b/fs/xfs/xfs_log.c
index 00e9f5c388d3..7fc3c1ad36bc 100644
--- a/fs/xfs/xfs_log.c
+++ b/fs/xfs/xfs_log.c
@@ -429,10 +429,7 @@ xfs_log_reserve(
ASSERT(*ticp == NULL);
tic = xlog_ticket_alloc(log, unit_bytes, cnt, client, permanent,
- KM_SLEEP | KM_MAYFAIL);
- if (!tic)
- return -ENOMEM;
-
+ KM_SLEEP);
*ticp = tic;
xlog_grant_push_ail(log, tic->t_cnt ? tic->t_unit_res * tic->t_cnt
diff --git a/fs/xfs/xfs_pnfs.c b/fs/xfs/xfs_pnfs.c
index 0c954cad7449..a339bd5fa260 100644
--- a/fs/xfs/xfs_pnfs.c
+++ b/fs/xfs/xfs_pnfs.c
@@ -32,7 +32,7 @@ xfs_break_leased_layouts(
struct xfs_inode *ip = XFS_I(inode);
int error;
- while ((error = break_layout(inode, false) == -EWOULDBLOCK)) {
+ while ((error = break_layout(inode, false)) == -EWOULDBLOCK) {
xfs_iunlock(ip, *iolock);
*did_unlock = true;
error = break_layout(inode, true);
diff --git a/fs/xfs/xfs_reflink.c b/fs/xfs/xfs_reflink.c
index c4ec7afd1170..edbe37b7f636 100644
--- a/fs/xfs/xfs_reflink.c
+++ b/fs/xfs/xfs_reflink.c
@@ -1190,11 +1190,11 @@ xfs_reflink_remap_blocks(
}
/*
- * Grab the exclusive iolock for a data copy from src to dest, making
- * sure to abide vfs locking order (lowest pointer value goes first) and
- * breaking the pnfs layout leases on dest before proceeding. The loop
- * is needed because we cannot call the blocking break_layout() with the
- * src iolock held, and therefore have to back out both locks.
+ * Grab the exclusive iolock for a data copy from src to dest, making sure to
+ * abide vfs locking order (lowest pointer value goes first) and breaking the
+ * layout leases before proceeding. The loop is needed because we cannot call
+ * the blocking break_layout() with the iolocks held, and therefore have to
+ * back out both locks.
*/
static int
xfs_iolock_two_inodes_and_break_layout(
@@ -1203,33 +1203,44 @@ xfs_iolock_two_inodes_and_break_layout(
{
int error;
-retry:
- if (src < dest) {
- inode_lock_shared(src);
- inode_lock_nested(dest, I_MUTEX_NONDIR2);
- } else {
- /* src >= dest */
- inode_lock(dest);
- }
+ if (src > dest)
+ swap(src, dest);
- error = break_layout(dest, false);
- if (error == -EWOULDBLOCK) {
- inode_unlock(dest);
- if (src < dest)
- inode_unlock_shared(src);
+retry:
+ /* Wait to break both inodes' layouts before we start locking. */
+ error = break_layout(src, true);
+ if (error)
+ return error;
+ if (src != dest) {
error = break_layout(dest, true);
if (error)
return error;
- goto retry;
}
+
+ /* Lock one inode and make sure nobody got in and leased it. */
+ inode_lock(src);
+ error = break_layout(src, false);
if (error) {
+ inode_unlock(src);
+ if (error == -EWOULDBLOCK)
+ goto retry;
+ return error;
+ }
+
+ if (src == dest)
+ return 0;
+
+ /* Lock the other inode and make sure nobody got in and leased it. */
+ inode_lock_nested(dest, I_MUTEX_NONDIR2);
+ error = break_layout(dest, false);
+ if (error) {
+ inode_unlock(src);
inode_unlock(dest);
- if (src < dest)
- inode_unlock_shared(src);
+ if (error == -EWOULDBLOCK)
+ goto retry;
return error;
}
- if (src > dest)
- inode_lock_shared_nested(src, I_MUTEX_NONDIR2);
+
return 0;
}
@@ -1247,10 +1258,10 @@ xfs_reflink_remap_unlock(
xfs_iunlock(dest, XFS_MMAPLOCK_EXCL);
if (!same_inode)
- xfs_iunlock(src, XFS_MMAPLOCK_SHARED);
+ xfs_iunlock(src, XFS_MMAPLOCK_EXCL);
inode_unlock(inode_out);
if (!same_inode)
- inode_unlock_shared(inode_in);
+ inode_unlock(inode_in);
}
/*
@@ -1325,7 +1336,7 @@ xfs_reflink_remap_prep(
if (same_inode)
xfs_ilock(src, XFS_MMAPLOCK_EXCL);
else
- xfs_lock_two_inodes(src, XFS_MMAPLOCK_SHARED, dest,
+ xfs_lock_two_inodes(src, XFS_MMAPLOCK_EXCL, dest,
XFS_MMAPLOCK_EXCL);
/* Check file eligibility and prepare for block sharing. */