summaryrefslogtreecommitdiff
path: root/fs
diff options
context:
space:
mode:
Diffstat (limited to 'fs')
-rw-r--r--fs/9p/vfs_addr.c3
-rw-r--r--fs/afs/file.c3
-rw-r--r--fs/bcachefs/acl.c11
-rw-r--r--fs/bcachefs/acl.h2
-rw-r--r--fs/bcachefs/alloc_background.c77
-rw-r--r--fs/bcachefs/alloc_background.h42
-rw-r--r--fs/bcachefs/alloc_foreground.c34
-rw-r--r--fs/bcachefs/alloc_foreground.h9
-rw-r--r--fs/bcachefs/backpointers.c23
-rw-r--r--fs/bcachefs/backpointers.h5
-rw-r--r--fs/bcachefs/bcachefs.h3
-rw-r--r--fs/bcachefs/bcachefs_format.h6
-rw-r--r--fs/bcachefs/bkey.h7
-rw-r--r--fs/bcachefs/bkey_methods.c109
-rw-r--r--fs/bcachefs/bkey_methods.h21
-rw-r--r--fs/bcachefs/btree_gc.c5
-rw-r--r--fs/bcachefs/btree_io.c69
-rw-r--r--fs/bcachefs/btree_iter.c6
-rw-r--r--fs/bcachefs/btree_iter.h42
-rw-r--r--fs/bcachefs/btree_key_cache.c5
-rw-r--r--fs/bcachefs/btree_key_cache.h18
-rw-r--r--fs/bcachefs/btree_node_scan.c2
-rw-r--r--fs/bcachefs/btree_trans_commit.c82
-rw-r--r--fs/bcachefs/btree_update_interior.c18
-rw-r--r--fs/bcachefs/buckets.c42
-rw-r--r--fs/bcachefs/buckets.h2
-rw-r--r--fs/bcachefs/buckets_waiting_for_journal.c11
-rw-r--r--fs/bcachefs/data_update.c6
-rw-r--r--fs/bcachefs/debug.c38
-rw-r--r--fs/bcachefs/dirent.c33
-rw-r--r--fs/bcachefs/dirent.h5
-rw-r--r--fs/bcachefs/disk_accounting.c89
-rw-r--r--fs/bcachefs/disk_accounting.h60
-rw-r--r--fs/bcachefs/disk_accounting_format.h23
-rw-r--r--fs/bcachefs/ec.c49
-rw-r--r--fs/bcachefs/ec.h5
-rw-r--r--fs/bcachefs/errcode.h1
-rw-r--r--fs/bcachefs/error.c22
-rw-r--r--fs/bcachefs/error.h39
-rw-r--r--fs/bcachefs/extents.c144
-rw-r--r--fs/bcachefs/extents.h24
-rw-r--r--fs/bcachefs/fs.c10
-rw-r--r--fs/bcachefs/inode.c77
-rw-r--r--fs/bcachefs/inode.h24
-rw-r--r--fs/bcachefs/io_misc.c6
-rw-r--r--fs/bcachefs/io_read.c1
-rw-r--r--fs/bcachefs/io_write.c5
-rw-r--r--fs/bcachefs/journal_io.c24
-rw-r--r--fs/bcachefs/lru.c9
-rw-r--r--fs/bcachefs/lru.h5
-rw-r--r--fs/bcachefs/opts.h5
-rw-r--r--fs/bcachefs/quota.c8
-rw-r--r--fs/bcachefs/quota.h5
-rw-r--r--fs/bcachefs/reflink.c19
-rw-r--r--fs/bcachefs/reflink.h22
-rw-r--r--fs/bcachefs/replicas.c1
-rw-r--r--fs/bcachefs/sb-downgrade.c29
-rw-r--r--fs/bcachefs/sb-errors_format.h6
-rw-r--r--fs/bcachefs/snapshot.c42
-rw-r--r--fs/bcachefs/snapshot.h11
-rw-r--r--fs/bcachefs/subvolume.c16
-rw-r--r--fs/bcachefs/subvolume.h5
-rw-r--r--fs/bcachefs/super-io.c4
-rw-r--r--fs/bcachefs/super.c1
-rw-r--r--fs/bcachefs/sysfs.c6
-rw-r--r--fs/bcachefs/trace.c1
-rw-r--r--fs/bcachefs/trace.h27
-rw-r--r--fs/bcachefs/xattr.c21
-rw-r--r--fs/bcachefs/xattr.h5
-rw-r--r--fs/binfmt_flat.c4
-rw-r--r--fs/btrfs/block-group.c13
-rw-r--r--fs/btrfs/ctree.h1
-rw-r--r--fs/btrfs/delayed-ref.c67
-rw-r--r--fs/btrfs/delayed-ref.h2
-rw-r--r--fs/btrfs/direct-io.c38
-rw-r--r--fs/btrfs/extent-tree.c54
-rw-r--r--fs/btrfs/extent_io.c14
-rw-r--r--fs/btrfs/extent_map.c24
-rw-r--r--fs/btrfs/file.c22
-rw-r--r--fs/btrfs/free-space-cache.c18
-rw-r--r--fs/btrfs/inode.c29
-rw-r--r--fs/btrfs/print-tree.c2
-rw-r--r--fs/btrfs/scrub.c25
-rw-r--r--fs/btrfs/send.c54
-rw-r--r--fs/btrfs/space-info.c5
-rw-r--r--fs/btrfs/space-info.h1
-rw-r--r--fs/btrfs/super.c23
-rw-r--r--fs/btrfs/tests/extent-map-tests.c99
-rw-r--r--fs/btrfs/tree-checker.c123
-rw-r--r--fs/ceph/addr.c28
-rw-r--r--fs/ceph/caps.c35
-rw-r--r--fs/ceph/inode.c2
-rw-r--r--fs/ceph/super.h7
-rw-r--r--fs/exec.c8
-rw-r--r--fs/file.c31
-rw-r--r--fs/fuse/dev.c6
-rw-r--r--fs/inode.c39
-rw-r--r--fs/libfs.c35
-rw-r--r--fs/locks.c2
-rw-r--r--fs/netfs/Kconfig2
-rw-r--r--fs/netfs/buffered_read.c123
-rw-r--r--fs/netfs/buffered_write.c2
-rw-r--r--fs/netfs/fscache_cookie.c4
-rw-r--r--fs/netfs/io.c161
-rw-r--r--fs/netfs/objects.c10
-rw-r--r--fs/netfs/write_issue.c4
-rw-r--r--fs/nfs/fscache.c5
-rw-r--r--fs/nfs/fscache.h2
-rw-r--r--fs/nfsd/nfsctl.c3
-rw-r--r--fs/smb/client/cifs_debug.c2
-rw-r--r--fs/smb/client/cifsfs.h4
-rw-r--r--fs/smb/client/cifsglob.h36
-rw-r--r--fs/smb/client/cifsproto.h2
-rw-r--r--fs/smb/client/file.c24
-rw-r--r--fs/smb/client/inode.c17
-rw-r--r--fs/smb/client/ioctl.c32
-rw-r--r--fs/smb/client/misc.c65
-rw-r--r--fs/smb/client/reparse.c4
-rw-r--r--fs/smb/client/reparse.h19
-rw-r--r--fs/smb/client/smb2inode.c8
-rw-r--r--fs/smb/client/smb2pdu.c3
-rw-r--r--fs/smb/client/smbdirect.c8
-rw-r--r--fs/smb/client/trace.h51
-rw-r--r--fs/smb/client/transport.c2
-rw-r--r--fs/smb/common/smb2pdu.h2
-rw-r--r--fs/smb/server/mgmt/share_config.c15
-rw-r--r--fs/smb/server/mgmt/share_config.h4
-rw-r--r--fs/smb/server/mgmt/tree_connect.c9
-rw-r--r--fs/smb/server/mgmt/tree_connect.h4
-rw-r--r--fs/smb/server/smb2pdu.c9
-rw-r--r--fs/smb/server/smb_common.c9
-rw-r--r--fs/smb/server/smb_common.h2
-rw-r--r--fs/squashfs/inode.c7
-rw-r--r--fs/tracefs/event_inode.c4
-rw-r--r--fs/tracefs/inode.c12
-rw-r--r--fs/tracefs/internal.h5
-rw-r--r--fs/xfs/libxfs/xfs_quota_defs.h2
-rw-r--r--fs/xfs/libxfs/xfs_trans_resv.c28
-rw-r--r--fs/xfs/scrub/agheader_repair.c2
-rw-r--r--fs/xfs/scrub/bmap.c8
-rw-r--r--fs/xfs/scrub/parent.c2
-rw-r--r--fs/xfs/scrub/trace.h10
-rw-r--r--fs/xfs/xfs_attr_list.c2
-rw-r--r--fs/xfs/xfs_ioctl.c11
-rw-r--r--fs/xfs/xfs_trace.h10
-rw-r--r--fs/xfs/xfs_trans_ail.c7
-rw-r--r--fs/xfs/xfs_xattr.c19
147 files changed, 2087 insertions, 1090 deletions
diff --git a/fs/9p/vfs_addr.c b/fs/9p/vfs_addr.c
index a97ceb105cd8..24fdc74caeba 100644
--- a/fs/9p/vfs_addr.c
+++ b/fs/9p/vfs_addr.c
@@ -75,7 +75,8 @@ static void v9fs_issue_read(struct netfs_io_subrequest *subreq)
/* if we just extended the file size, any portion not in
* cache won't be on server and is zeroes */
- __set_bit(NETFS_SREQ_CLEAR_TAIL, &subreq->flags);
+ if (subreq->rreq->origin != NETFS_DIO_READ)
+ __set_bit(NETFS_SREQ_CLEAR_TAIL, &subreq->flags);
netfs_subreq_terminated(subreq, err ?: total, false);
}
diff --git a/fs/afs/file.c b/fs/afs/file.c
index c3f0c45ae9a9..ec1be0091fdb 100644
--- a/fs/afs/file.c
+++ b/fs/afs/file.c
@@ -242,7 +242,8 @@ static void afs_fetch_data_notify(struct afs_operation *op)
req->error = error;
if (subreq) {
- __set_bit(NETFS_SREQ_CLEAR_TAIL, &subreq->flags);
+ if (subreq->rreq->origin != NETFS_DIO_READ)
+ __set_bit(NETFS_SREQ_CLEAR_TAIL, &subreq->flags);
netfs_subreq_terminated(subreq, error ?: req->actual_len, false);
req->subreq = NULL;
} else if (req->done) {
diff --git a/fs/bcachefs/acl.c b/fs/bcachefs/acl.c
index a7b425d3c8a0..331a17f3f113 100644
--- a/fs/bcachefs/acl.c
+++ b/fs/bcachefs/acl.c
@@ -272,16 +272,19 @@ bch2_acl_to_xattr(struct btree_trans *trans,
return xattr;
}
-struct posix_acl *bch2_get_acl(struct mnt_idmap *idmap,
- struct dentry *dentry, int type)
+struct posix_acl *bch2_get_acl(struct inode *vinode, int type, bool rcu)
{
- struct bch_inode_info *inode = to_bch_ei(dentry->d_inode);
+ struct bch_inode_info *inode = to_bch_ei(vinode);
struct bch_fs *c = inode->v.i_sb->s_fs_info;
struct bch_hash_info hash = bch2_hash_info_init(c, &inode->ei_inode);
struct xattr_search_key search = X_SEARCH(acl_to_xattr_type(type), "", 0);
- struct btree_trans *trans = bch2_trans_get(c);
struct btree_iter iter = { NULL };
struct posix_acl *acl = NULL;
+
+ if (rcu)
+ return ERR_PTR(-ECHILD);
+
+ struct btree_trans *trans = bch2_trans_get(c);
retry:
bch2_trans_begin(trans);
diff --git a/fs/bcachefs/acl.h b/fs/bcachefs/acl.h
index 27e7eec0f278..fe730a6bf0c1 100644
--- a/fs/bcachefs/acl.h
+++ b/fs/bcachefs/acl.h
@@ -28,7 +28,7 @@ void bch2_acl_to_text(struct printbuf *, const void *, size_t);
#ifdef CONFIG_BCACHEFS_POSIX_ACL
-struct posix_acl *bch2_get_acl(struct mnt_idmap *, struct dentry *, int);
+struct posix_acl *bch2_get_acl(struct inode *, int, bool);
int bch2_set_acl_trans(struct btree_trans *, subvol_inum,
struct bch_inode_unpacked *,
diff --git a/fs/bcachefs/alloc_background.c b/fs/bcachefs/alloc_background.c
index d9c5a92fa708..fd3a2522bc3e 100644
--- a/fs/bcachefs/alloc_background.c
+++ b/fs/bcachefs/alloc_background.c
@@ -196,75 +196,71 @@ static unsigned bch_alloc_v1_val_u64s(const struct bch_alloc *a)
return DIV_ROUND_UP(bytes, sizeof(u64));
}
-int bch2_alloc_v1_invalid(struct bch_fs *c, struct bkey_s_c k,
- enum bch_validate_flags flags,
- struct printbuf *err)
+int bch2_alloc_v1_validate(struct bch_fs *c, struct bkey_s_c k,
+ enum bch_validate_flags flags)
{
struct bkey_s_c_alloc a = bkey_s_c_to_alloc(k);
int ret = 0;
/* allow for unknown fields */
- bkey_fsck_err_on(bkey_val_u64s(a.k) < bch_alloc_v1_val_u64s(a.v), c, err,
- alloc_v1_val_size_bad,
+ bkey_fsck_err_on(bkey_val_u64s(a.k) < bch_alloc_v1_val_u64s(a.v),
+ c, alloc_v1_val_size_bad,
"incorrect value size (%zu < %u)",
bkey_val_u64s(a.k), bch_alloc_v1_val_u64s(a.v));
fsck_err:
return ret;
}
-int bch2_alloc_v2_invalid(struct bch_fs *c, struct bkey_s_c k,
- enum bch_validate_flags flags,
- struct printbuf *err)
+int bch2_alloc_v2_validate(struct bch_fs *c, struct bkey_s_c k,
+ enum bch_validate_flags flags)
{
struct bkey_alloc_unpacked u;
int ret = 0;
- bkey_fsck_err_on(bch2_alloc_unpack_v2(&u, k), c, err,
- alloc_v2_unpack_error,
+ bkey_fsck_err_on(bch2_alloc_unpack_v2(&u, k),
+ c, alloc_v2_unpack_error,
"unpack error");
fsck_err:
return ret;
}
-int bch2_alloc_v3_invalid(struct bch_fs *c, struct bkey_s_c k,
- enum bch_validate_flags flags,
- struct printbuf *err)
+int bch2_alloc_v3_validate(struct bch_fs *c, struct bkey_s_c k,
+ enum bch_validate_flags flags)
{
struct bkey_alloc_unpacked u;
int ret = 0;
- bkey_fsck_err_on(bch2_alloc_unpack_v3(&u, k), c, err,
- alloc_v2_unpack_error,
+ bkey_fsck_err_on(bch2_alloc_unpack_v3(&u, k),
+ c, alloc_v2_unpack_error,
"unpack error");
fsck_err:
return ret;
}
-int bch2_alloc_v4_invalid(struct bch_fs *c, struct bkey_s_c k,
- enum bch_validate_flags flags, struct printbuf *err)
+int bch2_alloc_v4_validate(struct bch_fs *c, struct bkey_s_c k,
+ enum bch_validate_flags flags)
{
struct bkey_s_c_alloc_v4 a = bkey_s_c_to_alloc_v4(k);
int ret = 0;
- bkey_fsck_err_on(alloc_v4_u64s_noerror(a.v) > bkey_val_u64s(k.k), c, err,
- alloc_v4_val_size_bad,
+ bkey_fsck_err_on(alloc_v4_u64s_noerror(a.v) > bkey_val_u64s(k.k),
+ c, alloc_v4_val_size_bad,
"bad val size (%u > %zu)",
alloc_v4_u64s_noerror(a.v), bkey_val_u64s(k.k));
bkey_fsck_err_on(!BCH_ALLOC_V4_BACKPOINTERS_START(a.v) &&
- BCH_ALLOC_V4_NR_BACKPOINTERS(a.v), c, err,
- alloc_v4_backpointers_start_bad,
+ BCH_ALLOC_V4_NR_BACKPOINTERS(a.v),
+ c, alloc_v4_backpointers_start_bad,
"invalid backpointers_start");
- bkey_fsck_err_on(alloc_data_type(*a.v, a.v->data_type) != a.v->data_type, c, err,
- alloc_key_data_type_bad,
+ bkey_fsck_err_on(alloc_data_type(*a.v, a.v->data_type) != a.v->data_type,
+ c, alloc_key_data_type_bad,
"invalid data type (got %u should be %u)",
a.v->data_type, alloc_data_type(*a.v, a.v->data_type));
for (unsigned i = 0; i < 2; i++)
bkey_fsck_err_on(a.v->io_time[i] > LRU_TIME_MAX,
- c, err,
- alloc_key_io_time_bad,
+ c, alloc_key_io_time_bad,
"invalid io_time[%s]: %llu, max %llu",
i == READ ? "read" : "write",
a.v->io_time[i], LRU_TIME_MAX);
@@ -282,7 +278,7 @@ int bch2_alloc_v4_invalid(struct bch_fs *c, struct bkey_s_c k,
a.v->dirty_sectors ||
a.v->cached_sectors ||
a.v->stripe,
- c, err, alloc_key_empty_but_have_data,
+ c, alloc_key_empty_but_have_data,
"empty data type free but have data %u.%u.%u %u",
stripe_sectors,
a.v->dirty_sectors,
@@ -296,7 +292,7 @@ int bch2_alloc_v4_invalid(struct bch_fs *c, struct bkey_s_c k,
case BCH_DATA_parity:
bkey_fsck_err_on(!a.v->dirty_sectors &&
!stripe_sectors,
- c, err, alloc_key_dirty_sectors_0,
+ c, alloc_key_dirty_sectors_0,
"data_type %s but dirty_sectors==0",
bch2_data_type_str(a.v->data_type));
break;
@@ -305,12 +301,12 @@ int bch2_alloc_v4_invalid(struct bch_fs *c, struct bkey_s_c k,
a.v->dirty_sectors ||
stripe_sectors ||
a.v->stripe,
- c, err, alloc_key_cached_inconsistency,
+ c, alloc_key_cached_inconsistency,
"data type inconsistency");
bkey_fsck_err_on(!a.v->io_time[READ] &&
c->curr_recovery_pass > BCH_RECOVERY_PASS_check_alloc_to_lru_refs,
- c, err, alloc_key_cached_but_read_time_zero,
+ c, alloc_key_cached_but_read_time_zero,
"cached bucket with read_time == 0");
break;
case BCH_DATA_stripe:
@@ -513,14 +509,13 @@ static unsigned alloc_gen(struct bkey_s_c k, unsigned offset)
: 0;
}
-int bch2_bucket_gens_invalid(struct bch_fs *c, struct bkey_s_c k,
- enum bch_validate_flags flags,
- struct printbuf *err)
+int bch2_bucket_gens_validate(struct bch_fs *c, struct bkey_s_c k,
+ enum bch_validate_flags flags)
{
int ret = 0;
- bkey_fsck_err_on(bkey_val_bytes(k.k) != sizeof(struct bch_bucket_gens), c, err,
- bucket_gens_val_size_bad,
+ bkey_fsck_err_on(bkey_val_bytes(k.k) != sizeof(struct bch_bucket_gens),
+ c, bucket_gens_val_size_bad,
"bad val size (%zu != %zu)",
bkey_val_bytes(k.k), sizeof(struct bch_bucket_gens));
fsck_err:
@@ -829,7 +824,19 @@ int bch2_trigger_alloc(struct btree_trans *trans,
struct bch_alloc_v4 old_a_convert;
const struct bch_alloc_v4 *old_a = bch2_alloc_to_v4(old, &old_a_convert);
- struct bch_alloc_v4 *new_a = bkey_s_to_alloc_v4(new).v;
+
+ struct bch_alloc_v4 *new_a;
+ if (likely(new.k->type == KEY_TYPE_alloc_v4)) {
+ new_a = bkey_s_to_alloc_v4(new).v;
+ } else {
+ BUG_ON(!(flags & BTREE_TRIGGER_gc));
+
+ struct bkey_i_alloc_v4 *new_ka = bch2_alloc_to_v4_mut_inlined(trans, new.s_c);
+ ret = PTR_ERR_OR_ZERO(new_ka);
+ if (unlikely(ret))
+ goto err;
+ new_a = &new_ka->v;
+ }
if (flags & BTREE_TRIGGER_transactional) {
alloc_data_type_set(new_a, new_a->data_type);
diff --git a/fs/bcachefs/alloc_background.h b/fs/bcachefs/alloc_background.h
index 8d2b62c9588e..fd790b03fbe1 100644
--- a/fs/bcachefs/alloc_background.h
+++ b/fs/bcachefs/alloc_background.h
@@ -82,6 +82,14 @@ static inline bool bucket_data_type_mismatch(enum bch_data_type bucket,
bucket_data_type(bucket) != bucket_data_type(ptr);
}
+/*
+ * It is my general preference to use unsigned types for unsigned quantities -
+ * however, these helpers are used in disk accounting calculations run by
+ * triggers where the output will be negated and added to an s64. unsigned is
+ * right out even though all these quantities will fit in 32 bits, since it
+ * won't be sign extended correctly; u64 will negate "correctly", but s64 is the
+ * simpler option here.
+ */
static inline s64 bch2_bucket_sectors_total(struct bch_alloc_v4 a)
{
return a.stripe_sectors + a.dirty_sectors + a.cached_sectors;
@@ -142,7 +150,9 @@ static inline void alloc_data_type_set(struct bch_alloc_v4 *a, enum bch_data_typ
static inline u64 alloc_lru_idx_read(struct bch_alloc_v4 a)
{
- return a.data_type == BCH_DATA_cached ? a.io_time[READ] : 0;
+ return a.data_type == BCH_DATA_cached
+ ? a.io_time[READ] & LRU_TIME_MAX
+ : 0;
}
#define DATA_TYPES_MOVABLE \
@@ -166,8 +176,8 @@ static inline u64 alloc_lru_idx_fragmentation(struct bch_alloc_v4 a,
* avoid overflowing LRU_TIME_BITS on a corrupted fs, when
* bucket_sectors_dirty is (much) bigger than bucket_size
*/
- u64 d = min(bch2_bucket_sectors_dirty(a),
- ca->mi.bucket_size);
+ u64 d = min_t(s64, bch2_bucket_sectors_dirty(a),
+ ca->mi.bucket_size);
return div_u64(d * (1ULL << 31), ca->mi.bucket_size);
}
@@ -232,52 +242,48 @@ struct bkey_i_alloc_v4 *bch2_alloc_to_v4_mut(struct btree_trans *, struct bkey_s
int bch2_bucket_io_time_reset(struct btree_trans *, unsigned, size_t, int);
-int bch2_alloc_v1_invalid(struct bch_fs *, struct bkey_s_c,
- enum bch_validate_flags, struct printbuf *);
-int bch2_alloc_v2_invalid(struct bch_fs *, struct bkey_s_c,
- enum bch_validate_flags, struct printbuf *);
-int bch2_alloc_v3_invalid(struct bch_fs *, struct bkey_s_c,
- enum bch_validate_flags, struct printbuf *);
-int bch2_alloc_v4_invalid(struct bch_fs *, struct bkey_s_c,
- enum bch_validate_flags, struct printbuf *);
+int bch2_alloc_v1_validate(struct bch_fs *, struct bkey_s_c, enum bch_validate_flags);
+int bch2_alloc_v2_validate(struct bch_fs *, struct bkey_s_c, enum bch_validate_flags);
+int bch2_alloc_v3_validate(struct bch_fs *, struct bkey_s_c, enum bch_validate_flags);
+int bch2_alloc_v4_validate(struct bch_fs *, struct bkey_s_c, enum bch_validate_flags);
void bch2_alloc_v4_swab(struct bkey_s);
void bch2_alloc_to_text(struct printbuf *, struct bch_fs *, struct bkey_s_c);
#define bch2_bkey_ops_alloc ((struct bkey_ops) { \
- .key_invalid = bch2_alloc_v1_invalid, \
+ .key_validate = bch2_alloc_v1_validate, \
.val_to_text = bch2_alloc_to_text, \
.trigger = bch2_trigger_alloc, \
.min_val_size = 8, \
})
#define bch2_bkey_ops_alloc_v2 ((struct bkey_ops) { \
- .key_invalid = bch2_alloc_v2_invalid, \
+ .key_validate = bch2_alloc_v2_validate, \
.val_to_text = bch2_alloc_to_text, \
.trigger = bch2_trigger_alloc, \
.min_val_size = 8, \
})
#define bch2_bkey_ops_alloc_v3 ((struct bkey_ops) { \
- .key_invalid = bch2_alloc_v3_invalid, \
+ .key_validate = bch2_alloc_v3_validate, \
.val_to_text = bch2_alloc_to_text, \
.trigger = bch2_trigger_alloc, \
.min_val_size = 16, \
})
#define bch2_bkey_ops_alloc_v4 ((struct bkey_ops) { \
- .key_invalid = bch2_alloc_v4_invalid, \
+ .key_validate = bch2_alloc_v4_validate, \
.val_to_text = bch2_alloc_to_text, \
.swab = bch2_alloc_v4_swab, \
.trigger = bch2_trigger_alloc, \
.min_val_size = 48, \
})
-int bch2_bucket_gens_invalid(struct bch_fs *, struct bkey_s_c,
- enum bch_validate_flags, struct printbuf *);
+int bch2_bucket_gens_validate(struct bch_fs *, struct bkey_s_c,
+ enum bch_validate_flags);
void bch2_bucket_gens_to_text(struct printbuf *, struct bch_fs *, struct bkey_s_c);
#define bch2_bkey_ops_bucket_gens ((struct bkey_ops) { \
- .key_invalid = bch2_bucket_gens_invalid, \
+ .key_validate = bch2_bucket_gens_validate, \
.val_to_text = bch2_bucket_gens_to_text, \
})
diff --git a/fs/bcachefs/alloc_foreground.c b/fs/bcachefs/alloc_foreground.c
index 618d2ff0292e..8563c2d26847 100644
--- a/fs/bcachefs/alloc_foreground.c
+++ b/fs/bcachefs/alloc_foreground.c
@@ -1603,7 +1603,8 @@ void bch2_open_bucket_to_text(struct printbuf *out, struct bch_fs *c, struct ope
prt_newline(out);
}
-void bch2_open_buckets_to_text(struct printbuf *out, struct bch_fs *c)
+void bch2_open_buckets_to_text(struct printbuf *out, struct bch_fs *c,
+ struct bch_dev *ca)
{
struct open_bucket *ob;
@@ -1613,7 +1614,8 @@ void bch2_open_buckets_to_text(struct printbuf *out, struct bch_fs *c)
ob < c->open_buckets + ARRAY_SIZE(c->open_buckets);
ob++) {
spin_lock(&ob->lock);
- if (ob->valid && !ob->on_partial_list)
+ if (ob->valid && !ob->on_partial_list &&
+ (!ca || ob->dev == ca->dev_idx))
bch2_open_bucket_to_text(out, c, ob);
spin_unlock(&ob->lock);
}
@@ -1738,7 +1740,7 @@ void bch2_dev_alloc_debug_to_text(struct printbuf *out, struct bch_dev *ca)
printbuf_tabstop_push(out, 16);
printbuf_tabstop_push(out, 16);
- bch2_dev_usage_to_text(out, &stats);
+ bch2_dev_usage_to_text(out, ca, &stats);
prt_newline(out);
@@ -1756,11 +1758,12 @@ void bch2_dev_alloc_debug_to_text(struct printbuf *out, struct bch_dev *ca)
prt_printf(out, "buckets to invalidate\t%llu\r\n", should_invalidate_buckets(ca, stats));
}
-void bch2_print_allocator_stuck(struct bch_fs *c)
+static noinline void bch2_print_allocator_stuck(struct bch_fs *c)
{
struct printbuf buf = PRINTBUF;
- prt_printf(&buf, "Allocator stuck? Waited for 10 seconds\n");
+ prt_printf(&buf, "Allocator stuck? Waited for %u seconds\n",
+ c->opts.allocator_stuck_timeout);
prt_printf(&buf, "Allocator debug:\n");
printbuf_indent_add(&buf, 2);
@@ -1790,3 +1793,24 @@ void bch2_print_allocator_stuck(struct bch_fs *c)
bch2_print_string_as_lines(KERN_ERR, buf.buf);
printbuf_exit(&buf);
}
+
+static inline unsigned allocator_wait_timeout(struct bch_fs *c)
+{
+ if (c->allocator_last_stuck &&
+ time_after(c->allocator_last_stuck + HZ * 60 * 2, jiffies))
+ return 0;
+
+ return c->opts.allocator_stuck_timeout * HZ;
+}
+
+void __bch2_wait_on_allocator(struct bch_fs *c, struct closure *cl)
+{
+ unsigned t = allocator_wait_timeout(c);
+
+ if (t && closure_sync_timeout(cl, t)) {
+ c->allocator_last_stuck = jiffies;
+ bch2_print_allocator_stuck(c);
+ }
+
+ closure_sync(cl);
+}
diff --git a/fs/bcachefs/alloc_foreground.h b/fs/bcachefs/alloc_foreground.h
index 6da9e7e29026..386d231ceca3 100644
--- a/fs/bcachefs/alloc_foreground.h
+++ b/fs/bcachefs/alloc_foreground.h
@@ -223,7 +223,7 @@ static inline struct write_point_specifier writepoint_ptr(struct write_point *wp
void bch2_fs_allocator_foreground_init(struct bch_fs *);
void bch2_open_bucket_to_text(struct printbuf *, struct bch_fs *, struct open_bucket *);
-void bch2_open_buckets_to_text(struct printbuf *, struct bch_fs *);
+void bch2_open_buckets_to_text(struct printbuf *, struct bch_fs *, struct bch_dev *);
void bch2_open_buckets_partial_to_text(struct printbuf *, struct bch_fs *);
void bch2_write_points_to_text(struct printbuf *, struct bch_fs *);
@@ -231,6 +231,11 @@ void bch2_write_points_to_text(struct printbuf *, struct bch_fs *);
void bch2_fs_alloc_debug_to_text(struct printbuf *, struct bch_fs *);
void bch2_dev_alloc_debug_to_text(struct printbuf *, struct bch_dev *);
-void bch2_print_allocator_stuck(struct bch_fs *);
+void __bch2_wait_on_allocator(struct bch_fs *, struct closure *);
+static inline void bch2_wait_on_allocator(struct bch_fs *c, struct closure *cl)
+{
+ if (cl->closure_get_happened)
+ __bch2_wait_on_allocator(c, cl);
+}
#endif /* _BCACHEFS_ALLOC_FOREGROUND_H */
diff --git a/fs/bcachefs/backpointers.c b/fs/bcachefs/backpointers.c
index 3cc02479a982..d4da6343efa9 100644
--- a/fs/bcachefs/backpointers.c
+++ b/fs/bcachefs/backpointers.c
@@ -47,9 +47,8 @@ static bool extent_matches_bp(struct bch_fs *c,
return false;
}
-int bch2_backpointer_invalid(struct bch_fs *c, struct bkey_s_c k,
- enum bch_validate_flags flags,
- struct printbuf *err)
+int bch2_backpointer_validate(struct bch_fs *c, struct bkey_s_c k,
+ enum bch_validate_flags flags)
{
struct bkey_s_c_backpointer bp = bkey_s_c_to_backpointer(k);
@@ -68,8 +67,7 @@ int bch2_backpointer_invalid(struct bch_fs *c, struct bkey_s_c k,
bkey_fsck_err_on((bp.v->bucket_offset >> MAX_EXTENT_COMPRESS_RATIO_SHIFT) >= ca->mi.bucket_size ||
!bpos_eq(bp.k->p, bp_pos),
- c, err,
- backpointer_bucket_offset_wrong,
+ c, backpointer_bucket_offset_wrong,
"backpointer bucket_offset wrong");
fsck_err:
return ret;
@@ -763,27 +761,22 @@ static int bch2_get_btree_in_memory_pos(struct btree_trans *trans,
btree < BTREE_ID_NR && !ret;
btree++) {
unsigned depth = (BIT_ULL(btree) & btree_leaf_mask) ? 0 : 1;
- struct btree_iter iter;
- struct btree *b;
if (!(BIT_ULL(btree) & btree_leaf_mask) &&
!(BIT_ULL(btree) & btree_interior_mask))
continue;
- bch2_trans_begin(trans);
-
- __for_each_btree_node(trans, iter, btree,
+ ret = __for_each_btree_node(trans, iter, btree,
btree == start.btree ? start.pos : POS_MIN,
- 0, depth, BTREE_ITER_prefetch, b, ret) {
+ 0, depth, BTREE_ITER_prefetch, b, ({
mem_may_pin -= btree_buf_bytes(b);
if (mem_may_pin <= 0) {
c->btree_cache.pinned_nodes_end = *end =
BBPOS(btree, b->key.k.p);
- bch2_trans_iter_exit(trans, &iter);
- return 0;
+ break;
}
- }
- bch2_trans_iter_exit(trans, &iter);
+ 0;
+ }));
}
return ret;
diff --git a/fs/bcachefs/backpointers.h b/fs/bcachefs/backpointers.h
index 6021de1c5e98..7daecadb764e 100644
--- a/fs/bcachefs/backpointers.h
+++ b/fs/bcachefs/backpointers.h
@@ -18,14 +18,13 @@ static inline u64 swab40(u64 x)
((x & 0xff00000000ULL) >> 32));
}
-int bch2_backpointer_invalid(struct bch_fs *, struct bkey_s_c k,
- enum bch_validate_flags, struct printbuf *);
+int bch2_backpointer_validate(struct bch_fs *, struct bkey_s_c k, enum bch_validate_flags);
void bch2_backpointer_to_text(struct printbuf *, const struct bch_backpointer *);
void bch2_backpointer_k_to_text(struct printbuf *, struct bch_fs *, struct bkey_s_c);
void bch2_backpointer_swab(struct bkey_s);
#define bch2_bkey_ops_backpointer ((struct bkey_ops) { \
- .key_invalid = bch2_backpointer_invalid, \
+ .key_validate = bch2_backpointer_validate, \
.val_to_text = bch2_backpointer_k_to_text, \
.swab = bch2_backpointer_swab, \
.min_val_size = 32, \
diff --git a/fs/bcachefs/bcachefs.h b/fs/bcachefs/bcachefs.h
index 91361a167dcd..0c7086e00d18 100644
--- a/fs/bcachefs/bcachefs.h
+++ b/fs/bcachefs/bcachefs.h
@@ -447,6 +447,7 @@ BCH_DEBUG_PARAMS_DEBUG()
x(blocked_journal_low_on_space) \
x(blocked_journal_low_on_pin) \
x(blocked_journal_max_in_flight) \
+ x(blocked_key_cache_flush) \
x(blocked_allocate) \
x(blocked_allocate_open_bucket) \
x(blocked_write_buffer_full) \
@@ -893,6 +894,8 @@ struct bch_fs {
struct bch_fs_usage_base __percpu *usage;
u64 __percpu *online_reserved;
+ unsigned long allocator_last_stuck;
+
struct io_clock io_clock[2];
/* JOURNAL SEQ BLACKLIST */
diff --git a/fs/bcachefs/bcachefs_format.h b/fs/bcachefs/bcachefs_format.h
index 74a60b1a4ddf..c75f2e0f32bb 100644
--- a/fs/bcachefs/bcachefs_format.h
+++ b/fs/bcachefs/bcachefs_format.h
@@ -675,7 +675,9 @@ struct bch_sb_field_ext {
x(btree_subvolume_children, BCH_VERSION(1, 6)) \
x(mi_btree_bitmap, BCH_VERSION(1, 7)) \
x(bucket_stripe_sectors, BCH_VERSION(1, 8)) \
- x(disk_accounting_v2, BCH_VERSION(1, 9))
+ x(disk_accounting_v2, BCH_VERSION(1, 9)) \
+ x(disk_accounting_v3, BCH_VERSION(1, 10)) \
+ x(disk_accounting_inum, BCH_VERSION(1, 11))
enum bcachefs_metadata_version {
bcachefs_metadata_version_min = 9,
@@ -836,6 +838,8 @@ LE64_BITMASK(BCH_SB_BACKGROUND_COMPRESSION_TYPE_HI,
LE64_BITMASK(BCH_SB_VERSION_UPGRADE_COMPLETE,
struct bch_sb, flags[5], 0, 16);
+LE64_BITMASK(BCH_SB_ALLOCATOR_STUCK_TIMEOUT,
+ struct bch_sb, flags[5], 16, 32);
static inline __u64 BCH_SB_COMPRESSION_TYPE(const struct bch_sb *sb)
{
diff --git a/fs/bcachefs/bkey.h b/fs/bcachefs/bkey.h
index 936357149cf0..e34cb2bf329c 100644
--- a/fs/bcachefs/bkey.h
+++ b/fs/bcachefs/bkey.h
@@ -10,9 +10,10 @@
#include "vstructs.h"
enum bch_validate_flags {
- BCH_VALIDATE_write = (1U << 0),
- BCH_VALIDATE_commit = (1U << 1),
- BCH_VALIDATE_journal = (1U << 2),
+ BCH_VALIDATE_write = BIT(0),
+ BCH_VALIDATE_commit = BIT(1),
+ BCH_VALIDATE_journal = BIT(2),
+ BCH_VALIDATE_silent = BIT(3),
};
#if 0
diff --git a/fs/bcachefs/bkey_methods.c b/fs/bcachefs/bkey_methods.c
index 5f07cf853d0c..88d8958281e8 100644
--- a/fs/bcachefs/bkey_methods.c
+++ b/fs/bcachefs/bkey_methods.c
@@ -27,27 +27,27 @@ const char * const bch2_bkey_types[] = {
NULL
};
-static int deleted_key_invalid(struct bch_fs *c, struct bkey_s_c k,
- enum bch_validate_flags flags, struct printbuf *err)
+static int deleted_key_validate(struct bch_fs *c, struct bkey_s_c k,
+ enum bch_validate_flags flags)
{
return 0;
}
#define bch2_bkey_ops_deleted ((struct bkey_ops) { \
- .key_invalid = deleted_key_invalid, \
+ .key_validate = deleted_key_validate, \
})
#define bch2_bkey_ops_whiteout ((struct bkey_ops) { \
- .key_invalid = deleted_key_invalid, \
+ .key_validate = deleted_key_validate, \
})
-static int empty_val_key_invalid(struct bch_fs *c, struct bkey_s_c k,
- enum bch_validate_flags flags, struct printbuf *err)
+static int empty_val_key_validate(struct bch_fs *c, struct bkey_s_c k,
+ enum bch_validate_flags flags)
{
int ret = 0;
- bkey_fsck_err_on(bkey_val_bytes(k.k), c, err,
- bkey_val_size_nonzero,
+ bkey_fsck_err_on(bkey_val_bytes(k.k),
+ c, bkey_val_size_nonzero,
"incorrect value size (%zu != 0)",
bkey_val_bytes(k.k));
fsck_err:
@@ -55,11 +55,11 @@ fsck_err:
}
#define bch2_bkey_ops_error ((struct bkey_ops) { \
- .key_invalid = empty_val_key_invalid, \
+ .key_validate = empty_val_key_validate, \
})
-static int key_type_cookie_invalid(struct bch_fs *c, struct bkey_s_c k,
- enum bch_validate_flags flags, struct printbuf *err)
+static int key_type_cookie_validate(struct bch_fs *c, struct bkey_s_c k,
+ enum bch_validate_flags flags)
{
return 0;
}
@@ -73,17 +73,17 @@ static void key_type_cookie_to_text(struct printbuf *out, struct bch_fs *c,
}
#define bch2_bkey_ops_cookie ((struct bkey_ops) { \
- .key_invalid = key_type_cookie_invalid, \
+ .key_validate = key_type_cookie_validate, \
.val_to_text = key_type_cookie_to_text, \
.min_val_size = 8, \
})
#define bch2_bkey_ops_hash_whiteout ((struct bkey_ops) {\
- .key_invalid = empty_val_key_invalid, \
+ .key_validate = empty_val_key_validate, \
})
-static int key_type_inline_data_invalid(struct bch_fs *c, struct bkey_s_c k,
- enum bch_validate_flags flags, struct printbuf *err)
+static int key_type_inline_data_validate(struct bch_fs *c, struct bkey_s_c k,
+ enum bch_validate_flags flags)
{
return 0;
}
@@ -98,9 +98,9 @@ static void key_type_inline_data_to_text(struct printbuf *out, struct bch_fs *c,
datalen, min(datalen, 32U), d.v->data);
}
-#define bch2_bkey_ops_inline_data ((struct bkey_ops) { \
- .key_invalid = key_type_inline_data_invalid, \
- .val_to_text = key_type_inline_data_to_text, \
+#define bch2_bkey_ops_inline_data ((struct bkey_ops) { \
+ .key_validate = key_type_inline_data_validate, \
+ .val_to_text = key_type_inline_data_to_text, \
})
static bool key_type_set_merge(struct bch_fs *c, struct bkey_s l, struct bkey_s_c r)
@@ -110,7 +110,7 @@ static bool key_type_set_merge(struct bch_fs *c, struct bkey_s l, struct bkey_s_
}
#define bch2_bkey_ops_set ((struct bkey_ops) { \
- .key_invalid = empty_val_key_invalid, \
+ .key_validate = empty_val_key_validate, \
.key_merge = key_type_set_merge, \
})
@@ -123,9 +123,8 @@ const struct bkey_ops bch2_bkey_ops[] = {
const struct bkey_ops bch2_bkey_null_ops = {
};
-int bch2_bkey_val_invalid(struct bch_fs *c, struct bkey_s_c k,
- enum bch_validate_flags flags,
- struct printbuf *err)
+int bch2_bkey_val_validate(struct bch_fs *c, struct bkey_s_c k,
+ enum bch_validate_flags flags)
{
if (test_bit(BCH_FS_no_invalid_checks, &c->flags))
return 0;
@@ -133,15 +132,15 @@ int bch2_bkey_val_invalid(struct bch_fs *c, struct bkey_s_c k,
const struct bkey_ops *ops = bch2_bkey_type_ops(k.k->type);
int ret = 0;
- bkey_fsck_err_on(bkey_val_bytes(k.k) < ops->min_val_size, c, err,
- bkey_val_size_too_small,
+ bkey_fsck_err_on(bkey_val_bytes(k.k) < ops->min_val_size,
+ c, bkey_val_size_too_small,
"bad val size (%zu < %u)",
bkey_val_bytes(k.k), ops->min_val_size);
- if (!ops->key_invalid)
+ if (!ops->key_validate)
return 0;
- ret = ops->key_invalid(c, k, flags, err);
+ ret = ops->key_validate(c, k, flags);
fsck_err:
return ret;
}
@@ -161,18 +160,17 @@ const char *bch2_btree_node_type_str(enum btree_node_type type)
return type == BKEY_TYPE_btree ? "internal btree node" : bch2_btree_id_str(type - 1);
}
-int __bch2_bkey_invalid(struct bch_fs *c, struct bkey_s_c k,
- enum btree_node_type type,
- enum bch_validate_flags flags,
- struct printbuf *err)
+int __bch2_bkey_validate(struct bch_fs *c, struct bkey_s_c k,
+ enum btree_node_type type,
+ enum bch_validate_flags flags)
{
if (test_bit(BCH_FS_no_invalid_checks, &c->flags))
return 0;
int ret = 0;
- bkey_fsck_err_on(k.k->u64s < BKEY_U64s, c, err,
- bkey_u64s_too_small,
+ bkey_fsck_err_on(k.k->u64s < BKEY_U64s,
+ c, bkey_u64s_too_small,
"u64s too small (%u < %zu)", k.k->u64s, BKEY_U64s);
if (type >= BKEY_TYPE_NR)
@@ -180,8 +178,8 @@ int __bch2_bkey_invalid(struct bch_fs *c, struct bkey_s_c k,
bkey_fsck_err_on(k.k->type < KEY_TYPE_MAX &&
(type == BKEY_TYPE_btree || (flags & BCH_VALIDATE_commit)) &&
- !(bch2_key_types_allowed[type] & BIT_ULL(k.k->type)), c, err,
- bkey_invalid_type_for_btree,
+ !(bch2_key_types_allowed[type] & BIT_ULL(k.k->type)),
+ c, bkey_invalid_type_for_btree,
"invalid key type for btree %s (%s)",
bch2_btree_node_type_str(type),
k.k->type < KEY_TYPE_MAX
@@ -189,17 +187,17 @@ int __bch2_bkey_invalid(struct bch_fs *c, struct bkey_s_c k,
: "(unknown)");
if (btree_node_type_is_extents(type) && !bkey_whiteout(k.k)) {
- bkey_fsck_err_on(k.k->size == 0, c, err,
- bkey_extent_size_zero,
+ bkey_fsck_err_on(k.k->size == 0,
+ c, bkey_extent_size_zero,
"size == 0");
- bkey_fsck_err_on(k.k->size > k.k->p.offset, c, err,
- bkey_extent_size_greater_than_offset,
+ bkey_fsck_err_on(k.k->size > k.k->p.offset,
+ c, bkey_extent_size_greater_than_offset,
"size greater than offset (%u > %llu)",
k.k->size, k.k->p.offset);
} else {
- bkey_fsck_err_on(k.k->size, c, err,
- bkey_size_nonzero,
+ bkey_fsck_err_on(k.k->size,
+ c, bkey_size_nonzero,
"size != 0");
}
@@ -207,12 +205,12 @@ int __bch2_bkey_invalid(struct bch_fs *c, struct bkey_s_c k,
enum btree_id btree = type - 1;
if (btree_type_has_snapshots(btree)) {
- bkey_fsck_err_on(!k.k->p.snapshot, c, err,
- bkey_snapshot_zero,
+ bkey_fsck_err_on(!k.k->p.snapshot,
+ c, bkey_snapshot_zero,
"snapshot == 0");
} else if (!btree_type_has_snapshot_field(btree)) {
- bkey_fsck_err_on(k.k->p.snapshot, c, err,
- bkey_snapshot_nonzero,
+ bkey_fsck_err_on(k.k->p.snapshot,
+ c, bkey_snapshot_nonzero,
"nonzero snapshot");
} else {
/*
@@ -221,34 +219,33 @@ int __bch2_bkey_invalid(struct bch_fs *c, struct bkey_s_c k,
*/
}
- bkey_fsck_err_on(bkey_eq(k.k->p, POS_MAX), c, err,
- bkey_at_pos_max,
+ bkey_fsck_err_on(bkey_eq(k.k->p, POS_MAX),
+ c, bkey_at_pos_max,
"key at POS_MAX");
}
fsck_err:
return ret;
}
-int bch2_bkey_invalid(struct bch_fs *c, struct bkey_s_c k,
+int bch2_bkey_validate(struct bch_fs *c, struct bkey_s_c k,
enum btree_node_type type,
- enum bch_validate_flags flags,
- struct printbuf *err)
+ enum bch_validate_flags flags)
{
- return __bch2_bkey_invalid(c, k, type, flags, err) ?:
- bch2_bkey_val_invalid(c, k, flags, err);
+ return __bch2_bkey_validate(c, k, type, flags) ?:
+ bch2_bkey_val_validate(c, k, flags);
}
int bch2_bkey_in_btree_node(struct bch_fs *c, struct btree *b,
- struct bkey_s_c k, struct printbuf *err)
+ struct bkey_s_c k, enum bch_validate_flags flags)
{
int ret = 0;
- bkey_fsck_err_on(bpos_lt(k.k->p, b->data->min_key), c, err,
- bkey_before_start_of_btree_node,
+ bkey_fsck_err_on(bpos_lt(k.k->p, b->data->min_key),
+ c, bkey_before_start_of_btree_node,
"key before start of btree node");
- bkey_fsck_err_on(bpos_gt(k.k->p, b->data->max_key), c, err,
- bkey_after_end_of_btree_node,
+ bkey_fsck_err_on(bpos_gt(k.k->p, b->data->max_key),
+ c, bkey_after_end_of_btree_node,
"key past end of btree node");
fsck_err:
return ret;
diff --git a/fs/bcachefs/bkey_methods.h b/fs/bcachefs/bkey_methods.h
index baef0722f5fb..3df3dd2723a1 100644
--- a/fs/bcachefs/bkey_methods.h
+++ b/fs/bcachefs/bkey_methods.h
@@ -14,15 +14,15 @@ extern const char * const bch2_bkey_types[];
extern const struct bkey_ops bch2_bkey_null_ops;
/*
- * key_invalid: checks validity of @k, returns 0 if good or -EINVAL if bad. If
+ * key_validate: checks validity of @k, returns 0 if good or -EINVAL if bad. If
* invalid, entire key will be deleted.
*
* When invalid, error string is returned via @err. @rw indicates whether key is
* being read or written; more aggressive checks can be enabled when rw == WRITE.
*/
struct bkey_ops {
- int (*key_invalid)(struct bch_fs *c, struct bkey_s_c k,
- enum bch_validate_flags flags, struct printbuf *err);
+ int (*key_validate)(struct bch_fs *c, struct bkey_s_c k,
+ enum bch_validate_flags flags);
void (*val_to_text)(struct printbuf *, struct bch_fs *,
struct bkey_s_c);
void (*swab)(struct bkey_s);
@@ -48,14 +48,13 @@ static inline const struct bkey_ops *bch2_bkey_type_ops(enum bch_bkey_type type)
: &bch2_bkey_null_ops;
}
-int bch2_bkey_val_invalid(struct bch_fs *, struct bkey_s_c,
- enum bch_validate_flags, struct printbuf *);
-int __bch2_bkey_invalid(struct bch_fs *, struct bkey_s_c, enum btree_node_type,
- enum bch_validate_flags, struct printbuf *);
-int bch2_bkey_invalid(struct bch_fs *, struct bkey_s_c, enum btree_node_type,
- enum bch_validate_flags, struct printbuf *);
-int bch2_bkey_in_btree_node(struct bch_fs *, struct btree *,
- struct bkey_s_c, struct printbuf *);
+int bch2_bkey_val_validate(struct bch_fs *, struct bkey_s_c, enum bch_validate_flags);
+int __bch2_bkey_validate(struct bch_fs *, struct bkey_s_c, enum btree_node_type,
+ enum bch_validate_flags);
+int bch2_bkey_validate(struct bch_fs *, struct bkey_s_c, enum btree_node_type,
+ enum bch_validate_flags);
+int bch2_bkey_in_btree_node(struct bch_fs *, struct btree *, struct bkey_s_c,
+ enum bch_validate_flags);
void bch2_bpos_to_text(struct printbuf *, struct bpos);
void bch2_bkey_to_text(struct printbuf *, const struct bkey *);
diff --git a/fs/bcachefs/btree_gc.c b/fs/bcachefs/btree_gc.c
index 6cbf2aa6a947..eb3002c4eae7 100644
--- a/fs/bcachefs/btree_gc.c
+++ b/fs/bcachefs/btree_gc.c
@@ -741,12 +741,9 @@ fsck_err:
static int bch2_mark_superblocks(struct bch_fs *c)
{
- mutex_lock(&c->sb_lock);
gc_pos_set(c, gc_phase(GC_PHASE_sb));
- int ret = bch2_trans_mark_dev_sbs_flags(c, BTREE_TRIGGER_gc);
- mutex_unlock(&c->sb_lock);
- return ret;
+ return bch2_trans_mark_dev_sbs_flags(c, BTREE_TRIGGER_gc);
}
static void bch2_gc_free(struct bch_fs *c)
diff --git a/fs/bcachefs/btree_io.c b/fs/bcachefs/btree_io.c
index 2c424435ca4a..56ea9a77cd4a 100644
--- a/fs/bcachefs/btree_io.c
+++ b/fs/bcachefs/btree_io.c
@@ -836,14 +836,13 @@ fsck_err:
return ret;
}
-static int bset_key_invalid(struct bch_fs *c, struct btree *b,
- struct bkey_s_c k,
- bool updated_range, int rw,
- struct printbuf *err)
+static int bset_key_validate(struct bch_fs *c, struct btree *b,
+ struct bkey_s_c k,
+ bool updated_range, int rw)
{
- return __bch2_bkey_invalid(c, k, btree_node_type(b), READ, err) ?:
- (!updated_range ? bch2_bkey_in_btree_node(c, b, k, err) : 0) ?:
- (rw == WRITE ? bch2_bkey_val_invalid(c, k, READ, err) : 0);
+ return __bch2_bkey_validate(c, k, btree_node_type(b), 0) ?:
+ (!updated_range ? bch2_bkey_in_btree_node(c, b, k, 0) : 0) ?:
+ (rw == WRITE ? bch2_bkey_val_validate(c, k, 0) : 0);
}
static bool bkey_packed_valid(struct bch_fs *c, struct btree *b,
@@ -858,12 +857,9 @@ static bool bkey_packed_valid(struct bch_fs *c, struct btree *b,
if (!bkeyp_u64s_valid(&b->format, k))
return false;
- struct printbuf buf = PRINTBUF;
struct bkey tmp;
struct bkey_s u = __bkey_disassemble(b, k, &tmp);
- bool ret = __bch2_bkey_invalid(c, u.s_c, btree_node_type(b), READ, &buf);
- printbuf_exit(&buf);
- return ret;
+ return !__bch2_bkey_validate(c, u.s_c, btree_node_type(b), BCH_VALIDATE_silent);
}
static int validate_bset_keys(struct bch_fs *c, struct btree *b,
@@ -915,19 +911,11 @@ static int validate_bset_keys(struct bch_fs *c, struct btree *b,
u = __bkey_disassemble(b, k, &tmp);
- printbuf_reset(&buf);
- if (bset_key_invalid(c, b, u.s_c, updated_range, write, &buf)) {
- printbuf_reset(&buf);
- bset_key_invalid(c, b, u.s_c, updated_range, write, &buf);
- prt_printf(&buf, "\n ");
- bch2_bkey_val_to_text(&buf, c, u.s_c);
-
- btree_err(-BCH_ERR_btree_node_read_err_fixable,
- c, NULL, b, i, k,
- btree_node_bad_bkey,
- "invalid bkey: %s", buf.buf);
+ ret = bset_key_validate(c, b, u.s_c, updated_range, write);
+ if (ret == -BCH_ERR_fsck_delete_bkey)
goto drop_this_key;
- }
+ if (ret)
+ goto fsck_err;
if (write)
bch2_bkey_compat(b->c.level, b->c.btree_id, version,
@@ -1228,23 +1216,10 @@ int bch2_btree_node_read_done(struct bch_fs *c, struct bch_dev *ca,
struct bkey tmp;
struct bkey_s u = __bkey_disassemble(b, k, &tmp);
- printbuf_reset(&buf);
-
- if (bch2_bkey_val_invalid(c, u.s_c, READ, &buf) ||
+ ret = bch2_bkey_val_validate(c, u.s_c, READ);
+ if (ret == -BCH_ERR_fsck_delete_bkey ||
(bch2_inject_invalid_keys &&
!bversion_cmp(u.k->version, MAX_VERSION))) {
- printbuf_reset(&buf);
-
- prt_printf(&buf, "invalid bkey: ");
- bch2_bkey_val_invalid(c, u.s_c, READ, &buf);
- prt_printf(&buf, "\n ");
- bch2_bkey_val_to_text(&buf, c, u.s_c);
-
- btree_err(-BCH_ERR_btree_node_read_err_fixable,
- c, NULL, b, i, k,
- btree_node_bad_bkey,
- "%s", buf.buf);
-
btree_keys_account_key_drop(&b->nr, 0, k);
i->u64s = cpu_to_le16(le16_to_cpu(i->u64s) - k->u64s);
@@ -1253,6 +1228,8 @@ int bch2_btree_node_read_done(struct bch_fs *c, struct bch_dev *ca,
set_btree_bset_end(b, b->set);
continue;
}
+ if (ret)
+ goto fsck_err;
if (u.k->type == KEY_TYPE_btree_ptr_v2) {
struct bkey_s_btree_ptr_v2 bp = bkey_s_to_btree_ptr_v2(u);
@@ -1767,6 +1744,8 @@ static int __bch2_btree_root_read(struct btree_trans *trans, enum btree_id id,
set_btree_node_read_in_flight(b);
+ /* we can't pass the trans to read_done() for fsck errors, so it must be unlocked */
+ bch2_trans_unlock(trans);
bch2_btree_node_read(trans, b, true);
if (btree_node_read_error(b)) {
@@ -1952,18 +1931,14 @@ static void btree_node_write_endio(struct bio *bio)
static int validate_bset_for_write(struct bch_fs *c, struct btree *b,
struct bset *i, unsigned sectors)
{
- struct printbuf buf = PRINTBUF;
bool saw_error;
- int ret;
-
- ret = bch2_bkey_invalid(c, bkey_i_to_s_c(&b->key),
- BKEY_TYPE_btree, WRITE, &buf);
- if (ret)
- bch2_fs_inconsistent(c, "invalid btree node key before write: %s", buf.buf);
- printbuf_exit(&buf);
- if (ret)
+ int ret = bch2_bkey_validate(c, bkey_i_to_s_c(&b->key),
+ BKEY_TYPE_btree, WRITE);
+ if (ret) {
+ bch2_fs_inconsistent(c, "invalid btree node key before write");
return ret;
+ }
ret = validate_bset_keys(c, b, i, WRITE, false, &saw_error) ?:
validate_bset(c, NULL, b, i, b->written, sectors, WRITE, false, &saw_error);
diff --git a/fs/bcachefs/btree_iter.c b/fs/bcachefs/btree_iter.c
index 36872207f09b..2e84d22e17bd 100644
--- a/fs/bcachefs/btree_iter.c
+++ b/fs/bcachefs/btree_iter.c
@@ -1900,6 +1900,7 @@ err:
goto out;
}
+/* Only kept for -tools */
struct btree *bch2_btree_iter_peek_node_and_restart(struct btree_iter *iter)
{
struct btree *b;
@@ -1921,6 +1922,11 @@ struct btree *bch2_btree_iter_next_node(struct btree_iter *iter)
bch2_trans_verify_not_in_restart(trans);
bch2_btree_iter_verify(iter);
+ ret = bch2_btree_path_traverse(trans, iter->path, iter->flags);
+ if (ret)
+ goto err;
+
+
struct btree_path *path = btree_iter_path(trans, iter);
/* already at end? */
diff --git a/fs/bcachefs/btree_iter.h b/fs/bcachefs/btree_iter.h
index c7725865309c..dca62375d7d3 100644
--- a/fs/bcachefs/btree_iter.h
+++ b/fs/bcachefs/btree_iter.h
@@ -600,23 +600,35 @@ void bch2_trans_srcu_unlock(struct btree_trans *);
u32 bch2_trans_begin(struct btree_trans *);
-/*
- * XXX
- * this does not handle transaction restarts from bch2_btree_iter_next_node()
- * correctly
- */
-#define __for_each_btree_node(_trans, _iter, _btree_id, _start, \
- _locks_want, _depth, _flags, _b, _ret) \
- for (bch2_trans_node_iter_init((_trans), &(_iter), (_btree_id), \
- _start, _locks_want, _depth, _flags); \
- (_b) = bch2_btree_iter_peek_node_and_restart(&(_iter)), \
- !((_ret) = PTR_ERR_OR_ZERO(_b)) && (_b); \
- (_b) = bch2_btree_iter_next_node(&(_iter)))
+#define __for_each_btree_node(_trans, _iter, _btree_id, _start, \
+ _locks_want, _depth, _flags, _b, _do) \
+({ \
+ bch2_trans_begin((_trans)); \
+ \
+ struct btree_iter _iter; \
+ bch2_trans_node_iter_init((_trans), &_iter, (_btree_id), \
+ _start, _locks_want, _depth, _flags); \
+ int _ret3 = 0; \
+ do { \
+ _ret3 = lockrestart_do((_trans), ({ \
+ struct btree *_b = bch2_btree_iter_peek_node(&_iter); \
+ if (!_b) \
+ break; \
+ \
+ PTR_ERR_OR_ZERO(_b) ?: (_do); \
+ })) ?: \
+ lockrestart_do((_trans), \
+ PTR_ERR_OR_ZERO(bch2_btree_iter_next_node(&_iter))); \
+ } while (!_ret3); \
+ \
+ bch2_trans_iter_exit((_trans), &(_iter)); \
+ _ret3; \
+})
#define for_each_btree_node(_trans, _iter, _btree_id, _start, \
- _flags, _b, _ret) \
- __for_each_btree_node(_trans, _iter, _btree_id, _start, \
- 0, 0, _flags, _b, _ret)
+ _flags, _b, _do) \
+ __for_each_btree_node(_trans, _iter, _btree_id, _start, \
+ 0, 0, _flags, _b, _do)
static inline struct bkey_s_c bch2_btree_iter_peek_prev_type(struct btree_iter *iter,
unsigned flags)
diff --git a/fs/bcachefs/btree_key_cache.c b/fs/bcachefs/btree_key_cache.c
index f2f2e525460b..79954490627c 100644
--- a/fs/bcachefs/btree_key_cache.c
+++ b/fs/bcachefs/btree_key_cache.c
@@ -497,11 +497,6 @@ int bch2_btree_path_traverse_cached(struct btree_trans *trans, struct btree_path
path->l[1].b = NULL;
- if (bch2_btree_node_relock_notrace(trans, path, 0)) {
- path->uptodate = BTREE_ITER_UPTODATE;
- return 0;
- }
-
int ret;
do {
ret = btree_path_traverse_cached_fast(trans, path);
diff --git a/fs/bcachefs/btree_key_cache.h b/fs/bcachefs/btree_key_cache.h
index e6b2cd0dd2c1..51d6289b8dee 100644
--- a/fs/bcachefs/btree_key_cache.h
+++ b/fs/bcachefs/btree_key_cache.h
@@ -11,13 +11,27 @@ static inline size_t bch2_nr_btree_keys_need_flush(struct bch_fs *c)
return max_t(ssize_t, 0, nr_dirty - max_dirty);
}
-static inline bool bch2_btree_key_cache_must_wait(struct bch_fs *c)
+static inline ssize_t __bch2_btree_key_cache_must_wait(struct bch_fs *c)
{
size_t nr_dirty = atomic_long_read(&c->btree_key_cache.nr_dirty);
size_t nr_keys = atomic_long_read(&c->btree_key_cache.nr_keys);
size_t max_dirty = 4096 + (nr_keys * 3) / 4;
- return nr_dirty > max_dirty;
+ return nr_dirty - max_dirty;
+}
+
+static inline bool bch2_btree_key_cache_must_wait(struct bch_fs *c)
+{
+ return __bch2_btree_key_cache_must_wait(c) > 0;
+}
+
+static inline bool bch2_btree_key_cache_wait_done(struct bch_fs *c)
+{
+ size_t nr_dirty = atomic_long_read(&c->btree_key_cache.nr_dirty);
+ size_t nr_keys = atomic_long_read(&c->btree_key_cache.nr_keys);
+ size_t max_dirty = 2048 + (nr_keys * 5) / 8;
+
+ return nr_dirty <= max_dirty;
}
int bch2_btree_key_cache_journal_flush(struct journal *,
diff --git a/fs/bcachefs/btree_node_scan.c b/fs/bcachefs/btree_node_scan.c
index 001107226377..b28c649c6838 100644
--- a/fs/bcachefs/btree_node_scan.c
+++ b/fs/bcachefs/btree_node_scan.c
@@ -530,7 +530,7 @@ int bch2_get_scanned_nodes(struct bch_fs *c, enum btree_id btree,
bch_verbose(c, "%s(): recovering %s", __func__, buf.buf);
printbuf_exit(&buf);
- BUG_ON(bch2_bkey_invalid(c, bkey_i_to_s_c(&tmp.k), BKEY_TYPE_btree, 0, NULL));
+ BUG_ON(bch2_bkey_validate(c, bkey_i_to_s_c(&tmp.k), BKEY_TYPE_btree, 0));
ret = bch2_journal_key_insert(c, btree, level + 1, &tmp.k);
if (ret)
diff --git a/fs/bcachefs/btree_trans_commit.c b/fs/bcachefs/btree_trans_commit.c
index cca336fe46e9..a0101d9c5d83 100644
--- a/fs/bcachefs/btree_trans_commit.c
+++ b/fs/bcachefs/btree_trans_commit.c
@@ -712,7 +712,7 @@ bch2_trans_commit_write_locked(struct btree_trans *trans, unsigned flags,
a->k.version = journal_pos_to_bversion(&trans->journal_res,
(u64 *) entry - (u64 *) trans->journal_entries);
BUG_ON(bversion_zero(a->k.version));
- ret = bch2_accounting_mem_mod_locked(trans, accounting_i_to_s_c(a), false);
+ ret = bch2_accounting_mem_mod_locked(trans, accounting_i_to_s_c(a), false, false);
if (ret)
goto revert_fs_usage;
}
@@ -798,7 +798,7 @@ revert_fs_usage:
struct bkey_s_accounting a = bkey_i_to_s_accounting(entry2->start);
bch2_accounting_neg(a);
- bch2_accounting_mem_mod_locked(trans, a.c, false);
+ bch2_accounting_mem_mod_locked(trans, a.c, false, false);
bch2_accounting_neg(a);
}
percpu_up_read(&c->mark_lock);
@@ -818,50 +818,6 @@ static noinline void bch2_drop_overwrites_from_journal(struct btree_trans *trans
bch2_journal_key_overwritten(trans->c, i->btree_id, i->level, i->k->k.p);
}
-static noinline int bch2_trans_commit_bkey_invalid(struct btree_trans *trans,
- enum bch_validate_flags flags,
- struct btree_insert_entry *i,
- struct printbuf *err)
-{
- struct bch_fs *c = trans->c;
-
- printbuf_reset(err);
- prt_printf(err, "invalid bkey on insert from %s -> %ps\n",
- trans->fn, (void *) i->ip_allocated);
- printbuf_indent_add(err, 2);
-
- bch2_bkey_val_to_text(err, c, bkey_i_to_s_c(i->k));
- prt_newline(err);
-
- bch2_bkey_invalid(c, bkey_i_to_s_c(i->k), i->bkey_type, flags, err);
- bch2_print_string_as_lines(KERN_ERR, err->buf);
-
- bch2_inconsistent_error(c);
- bch2_dump_trans_updates(trans);
-
- return -EINVAL;
-}
-
-static noinline int bch2_trans_commit_journal_entry_invalid(struct btree_trans *trans,
- struct jset_entry *i)
-{
- struct bch_fs *c = trans->c;
- struct printbuf buf = PRINTBUF;
-
- prt_printf(&buf, "invalid bkey on insert from %s\n", trans->fn);
- printbuf_indent_add(&buf, 2);
-
- bch2_journal_entry_to_text(&buf, c, i);
- prt_newline(&buf);
-
- bch2_print_string_as_lines(KERN_ERR, buf.buf);
-
- bch2_inconsistent_error(c);
- bch2_dump_trans_updates(trans);
-
- return -EINVAL;
-}
-
static int bch2_trans_commit_journal_pin_flush(struct journal *j,
struct journal_entry_pin *_pin, u64 seq)
{
@@ -927,7 +883,7 @@ static inline int do_bch2_trans_commit(struct btree_trans *trans, unsigned flags
static int journal_reclaim_wait_done(struct bch_fs *c)
{
int ret = bch2_journal_error(&c->journal) ?:
- !bch2_btree_key_cache_must_wait(c);
+ bch2_btree_key_cache_wait_done(c);
if (!ret)
journal_reclaim_kick(&c->journal);
@@ -973,9 +929,13 @@ int bch2_trans_commit_error(struct btree_trans *trans, unsigned flags,
bch2_trans_unlock(trans);
trace_and_count(c, trans_blocked_journal_reclaim, trans, trace_ip);
+ track_event_change(&c->times[BCH_TIME_blocked_key_cache_flush], true);
wait_event_freezable(c->journal.reclaim_wait,
(ret = journal_reclaim_wait_done(c)));
+
+ track_event_change(&c->times[BCH_TIME_blocked_key_cache_flush], false);
+
if (ret < 0)
break;
@@ -1060,20 +1020,19 @@ int __bch2_trans_commit(struct btree_trans *trans, unsigned flags)
goto out_reset;
trans_for_each_update(trans, i) {
- struct printbuf buf = PRINTBUF;
enum bch_validate_flags invalid_flags = 0;
if (!(flags & BCH_TRANS_COMMIT_no_journal_res))
invalid_flags |= BCH_VALIDATE_write|BCH_VALIDATE_commit;
- if (unlikely(bch2_bkey_invalid(c, bkey_i_to_s_c(i->k),
- i->bkey_type, invalid_flags, &buf)))
- ret = bch2_trans_commit_bkey_invalid(trans, invalid_flags, i, &buf);
- btree_insert_entry_checks(trans, i);
- printbuf_exit(&buf);
-
- if (ret)
+ ret = bch2_bkey_validate(c, bkey_i_to_s_c(i->k),
+ i->bkey_type, invalid_flags);
+ if (unlikely(ret)){
+ bch2_trans_inconsistent(trans, "invalid bkey on insert from %s -> %ps\n",
+ trans->fn, (void *) i->ip_allocated);
return ret;
+ }
+ btree_insert_entry_checks(trans, i);
}
for (struct jset_entry *i = trans->journal_entries;
@@ -1084,13 +1043,14 @@ int __bch2_trans_commit(struct btree_trans *trans, unsigned flags)
if (!(flags & BCH_TRANS_COMMIT_no_journal_res))
invalid_flags |= BCH_VALIDATE_write|BCH_VALIDATE_commit;
- if (unlikely(bch2_journal_entry_validate(c, NULL, i,
- bcachefs_metadata_version_current,
- CPU_BIG_ENDIAN, invalid_flags)))
- ret = bch2_trans_commit_journal_entry_invalid(trans, i);
-
- if (ret)
+ ret = bch2_journal_entry_validate(c, NULL, i,
+ bcachefs_metadata_version_current,
+ CPU_BIG_ENDIAN, invalid_flags);
+ if (unlikely(ret)) {
+ bch2_trans_inconsistent(trans, "invalid journal entry on insert from %s\n",
+ trans->fn);
return ret;
+ }
}
if (unlikely(!test_bit(BCH_FS_may_go_rw, &c->flags))) {
diff --git a/fs/bcachefs/btree_update_interior.c b/fs/bcachefs/btree_update_interior.c
index 31ee50184be2..b3454d4619e8 100644
--- a/fs/bcachefs/btree_update_interior.c
+++ b/fs/bcachefs/btree_update_interior.c
@@ -1264,7 +1264,7 @@ bch2_btree_update_start(struct btree_trans *trans, struct btree_path *path,
ret = bch2_btree_reserve_get(trans, as, nr_nodes, flags, &cl);
bch2_trans_unlock(trans);
- closure_sync(&cl);
+ bch2_wait_on_allocator(c, &cl);
} while (bch2_err_matches(ret, BCH_ERR_operation_blocked));
}
@@ -1364,18 +1364,10 @@ static void bch2_insert_fixup_btree_ptr(struct btree_update *as,
if (unlikely(!test_bit(JOURNAL_replay_done, &c->journal.flags)))
bch2_journal_key_overwritten(c, b->c.btree_id, b->c.level, insert->k.p);
- if (bch2_bkey_invalid(c, bkey_i_to_s_c(insert),
- btree_node_type(b), WRITE, &buf) ?:
- bch2_bkey_in_btree_node(c, b, bkey_i_to_s_c(insert), &buf)) {
- printbuf_reset(&buf);
- prt_printf(&buf, "inserting invalid bkey\n ");
- bch2_bkey_val_to_text(&buf, c, bkey_i_to_s_c(insert));
- prt_printf(&buf, "\n ");
- bch2_bkey_invalid(c, bkey_i_to_s_c(insert),
- btree_node_type(b), WRITE, &buf);
- bch2_bkey_in_btree_node(c, b, bkey_i_to_s_c(insert), &buf);
-
- bch2_fs_inconsistent(c, "%s", buf.buf);
+ if (bch2_bkey_validate(c, bkey_i_to_s_c(insert),
+ btree_node_type(b), BCH_VALIDATE_write) ?:
+ bch2_bkey_in_btree_node(c, b, bkey_i_to_s_c(insert), BCH_VALIDATE_write)) {
+ bch2_fs_inconsistent(c, "%s: inserting invalid bkey", __func__);
dump_stack();
}
diff --git a/fs/bcachefs/buckets.c b/fs/bcachefs/buckets.c
index 2650a0d24663..be2bbd248631 100644
--- a/fs/bcachefs/buckets.c
+++ b/fs/bcachefs/buckets.c
@@ -71,17 +71,21 @@ bch2_fs_usage_read_short(struct bch_fs *c)
return ret;
}
-void bch2_dev_usage_to_text(struct printbuf *out, struct bch_dev_usage *usage)
+void bch2_dev_usage_to_text(struct printbuf *out,
+ struct bch_dev *ca,
+ struct bch_dev_usage *usage)
{
prt_printf(out, "\tbuckets\rsectors\rfragmented\r\n");
for (unsigned i = 0; i < BCH_DATA_NR; i++) {
bch2_prt_data_type(out, i);
prt_printf(out, "\t%llu\r%llu\r%llu\r\n",
- usage->d[i].buckets,
- usage->d[i].sectors,
- usage->d[i].fragmented);
+ usage->d[i].buckets,
+ usage->d[i].sectors,
+ usage->d[i].fragmented);
}
+
+ prt_printf(out, "capacity\t%llu\r\n", ca->mi.nbuckets);
}
static int bch2_check_fix_ptr(struct btree_trans *trans,
@@ -806,6 +810,20 @@ static int __trigger_extent(struct btree_trans *trans,
ret = bch2_disk_accounting_mod(trans, &acc_btree_key, &replicas_sectors, 1, gc);
if (ret)
return ret;
+ } else {
+ bool insert = !(flags & BTREE_TRIGGER_overwrite);
+ struct disk_accounting_pos acc_inum_key = {
+ .type = BCH_DISK_ACCOUNTING_inum,
+ .inum.inum = k.k->p.inode,
+ };
+ s64 v[3] = {
+ insert ? 1 : -1,
+ insert ? k.k->size : -((s64) k.k->size),
+ replicas_sectors,
+ };
+ ret = bch2_disk_accounting_mod(trans, &acc_inum_key, v, ARRAY_SIZE(v), gc);
+ if (ret)
+ return ret;
}
if (bch2_bkey_rebalance_opts(k)) {
@@ -897,7 +915,6 @@ static int __bch2_trans_mark_metadata_bucket(struct btree_trans *trans,
enum bch_data_type type,
unsigned sectors)
{
- struct bch_fs *c = trans->c;
struct btree_iter iter;
int ret = 0;
@@ -907,7 +924,7 @@ static int __bch2_trans_mark_metadata_bucket(struct btree_trans *trans,
return PTR_ERR(a);
if (a->v.data_type && type && a->v.data_type != type) {
- bch2_fsck_err(c, FSCK_CAN_IGNORE|FSCK_NEED_FSCK,
+ bch2_fsck_err(trans, FSCK_CAN_IGNORE|FSCK_NEED_FSCK,
bucket_metadata_type_mismatch,
"bucket %llu:%llu gen %u different types of data in same bucket: %s, %s\n"
"while marking %s",
@@ -1028,13 +1045,18 @@ static int bch2_trans_mark_metadata_sectors(struct btree_trans *trans,
static int __bch2_trans_mark_dev_sb(struct btree_trans *trans, struct bch_dev *ca,
enum btree_iter_update_trigger_flags flags)
{
- struct bch_sb_layout *layout = &ca->disk_sb.sb->layout;
+ struct bch_fs *c = trans->c;
+
+ mutex_lock(&c->sb_lock);
+ struct bch_sb_layout layout = ca->disk_sb.sb->layout;
+ mutex_unlock(&c->sb_lock);
+
u64 bucket = 0;
unsigned i, bucket_sectors = 0;
int ret;
- for (i = 0; i < layout->nr_superblocks; i++) {
- u64 offset = le64_to_cpu(layout->sb_offset[i]);
+ for (i = 0; i < layout.nr_superblocks; i++) {
+ u64 offset = le64_to_cpu(layout.sb_offset[i]);
if (offset == BCH_SB_SECTOR) {
ret = bch2_trans_mark_metadata_sectors(trans, ca,
@@ -1045,7 +1067,7 @@ static int __bch2_trans_mark_dev_sb(struct btree_trans *trans, struct bch_dev *c
}
ret = bch2_trans_mark_metadata_sectors(trans, ca, offset,
- offset + (1 << layout->sb_max_size_bits),
+ offset + (1 << layout.sb_max_size_bits),
BCH_DATA_sb, &bucket, &bucket_sectors, flags);
if (ret)
return ret;
diff --git a/fs/bcachefs/buckets.h b/fs/bcachefs/buckets.h
index 2d35eeb24a2d..edbdffd508fc 100644
--- a/fs/bcachefs/buckets.h
+++ b/fs/bcachefs/buckets.h
@@ -212,7 +212,7 @@ static inline struct bch_dev_usage bch2_dev_usage_read(struct bch_dev *ca)
return ret;
}
-void bch2_dev_usage_to_text(struct printbuf *, struct bch_dev_usage *);
+void bch2_dev_usage_to_text(struct printbuf *, struct bch_dev *, struct bch_dev_usage *);
static inline u64 bch2_dev_buckets_reserved(struct bch_dev *ca, enum bch_watermark watermark)
{
diff --git a/fs/bcachefs/buckets_waiting_for_journal.c b/fs/bcachefs/buckets_waiting_for_journal.c
index ec1b636ef78d..f70eb2127d32 100644
--- a/fs/bcachefs/buckets_waiting_for_journal.c
+++ b/fs/bcachefs/buckets_waiting_for_journal.c
@@ -93,7 +93,7 @@ int bch2_set_bucket_needs_journal_commit(struct buckets_waiting_for_journal *b,
.dev_bucket = (u64) dev << 56 | bucket,
.journal_seq = journal_seq,
};
- size_t i, size, new_bits, nr_elements = 1, nr_rehashes = 0;
+ size_t i, size, new_bits, nr_elements = 1, nr_rehashes = 0, nr_rehashes_this_size = 0;
int ret = 0;
mutex_lock(&b->lock);
@@ -106,7 +106,7 @@ int bch2_set_bucket_needs_journal_commit(struct buckets_waiting_for_journal *b,
for (i = 0; i < size; i++)
nr_elements += t->d[i].journal_seq > flushed_seq;
- new_bits = t->bits + (nr_elements * 3 > size);
+ new_bits = ilog2(roundup_pow_of_two(nr_elements * 3));
n = kvmalloc(sizeof(*n) + (sizeof(n->d[0]) << new_bits), GFP_KERNEL);
if (!n) {
@@ -115,7 +115,14 @@ int bch2_set_bucket_needs_journal_commit(struct buckets_waiting_for_journal *b,
}
retry_rehash:
+ if (nr_rehashes_this_size == 3) {
+ new_bits++;
+ nr_rehashes_this_size = 0;
+ }
+
nr_rehashes++;
+ nr_rehashes_this_size++;
+
bucket_table_init(n, new_bits);
tmp = new;
diff --git a/fs/bcachefs/data_update.c b/fs/bcachefs/data_update.c
index 0087b8555ead..6a854c918496 100644
--- a/fs/bcachefs/data_update.c
+++ b/fs/bcachefs/data_update.c
@@ -250,10 +250,8 @@ restart_drop_extra_replicas:
* it's been hard to reproduce, so this should give us some more
* information when it does occur:
*/
- struct printbuf err = PRINTBUF;
- int invalid = bch2_bkey_invalid(c, bkey_i_to_s_c(insert), __btree_node_type(0, m->btree_id), 0, &err);
- printbuf_exit(&err);
-
+ int invalid = bch2_bkey_validate(c, bkey_i_to_s_c(insert), __btree_node_type(0, m->btree_id),
+ BCH_VALIDATE_commit);
if (invalid) {
struct printbuf buf = PRINTBUF;
diff --git a/fs/bcachefs/debug.c b/fs/bcachefs/debug.c
index ebabab171fe5..45aec1afdb0e 100644
--- a/fs/bcachefs/debug.c
+++ b/fs/bcachefs/debug.c
@@ -397,47 +397,27 @@ static ssize_t bch2_read_btree_formats(struct file *file, char __user *buf,
size_t size, loff_t *ppos)
{
struct dump_iter *i = file->private_data;
- struct btree_trans *trans;
- struct btree_iter iter;
- struct btree *b;
- ssize_t ret;
i->ubuf = buf;
i->size = size;
i->ret = 0;
- ret = flush_buf(i);
+ ssize_t ret = flush_buf(i);
if (ret)
return ret;
if (bpos_eq(SPOS_MAX, i->from))
return i->ret;
- trans = bch2_trans_get(i->c);
-retry:
- bch2_trans_begin(trans);
-
- for_each_btree_node(trans, iter, i->id, i->from, 0, b, ret) {
- bch2_btree_node_to_text(&i->buf, i->c, b);
- i->from = !bpos_eq(SPOS_MAX, b->key.k.p)
- ? bpos_successor(b->key.k.p)
- : b->key.k.p;
-
- ret = drop_locks_do(trans, flush_buf(i));
- if (ret)
- break;
- }
- bch2_trans_iter_exit(trans, &iter);
-
- if (bch2_err_matches(ret, BCH_ERR_transaction_restart))
- goto retry;
-
- bch2_trans_put(trans);
-
- if (!ret)
- ret = flush_buf(i);
+ return bch2_trans_run(i->c,
+ for_each_btree_node(trans, iter, i->id, i->from, 0, b, ({
+ bch2_btree_node_to_text(&i->buf, i->c, b);
+ i->from = !bpos_eq(SPOS_MAX, b->key.k.p)
+ ? bpos_successor(b->key.k.p)
+ : b->key.k.p;
- return ret ?: i->ret;
+ drop_locks_do(trans, flush_buf(i));
+ }))) ?: i->ret;
}
static const struct file_operations btree_format_debug_ops = {
diff --git a/fs/bcachefs/dirent.c b/fs/bcachefs/dirent.c
index d743da89308e..32bfdf19289a 100644
--- a/fs/bcachefs/dirent.c
+++ b/fs/bcachefs/dirent.c
@@ -100,20 +100,19 @@ const struct bch_hash_desc bch2_dirent_hash_desc = {
.is_visible = dirent_is_visible,
};
-int bch2_dirent_invalid(struct bch_fs *c, struct bkey_s_c k,
- enum bch_validate_flags flags,
- struct printbuf *err)
+int bch2_dirent_validate(struct bch_fs *c, struct bkey_s_c k,
+ enum bch_validate_flags flags)
{
struct bkey_s_c_dirent d = bkey_s_c_to_dirent(k);
struct qstr d_name = bch2_dirent_get_name(d);
int ret = 0;
- bkey_fsck_err_on(!d_name.len, c, err,
- dirent_empty_name,
+ bkey_fsck_err_on(!d_name.len,
+ c, dirent_empty_name,
"empty name");
- bkey_fsck_err_on(bkey_val_u64s(k.k) > dirent_val_u64s(d_name.len), c, err,
- dirent_val_too_big,
+ bkey_fsck_err_on(bkey_val_u64s(k.k) > dirent_val_u64s(d_name.len),
+ c, dirent_val_too_big,
"value too big (%zu > %u)",
bkey_val_u64s(k.k), dirent_val_u64s(d_name.len));
@@ -121,27 +120,27 @@ int bch2_dirent_invalid(struct bch_fs *c, struct bkey_s_c k,
* Check new keys don't exceed the max length
* (older keys may be larger.)
*/
- bkey_fsck_err_on((flags & BCH_VALIDATE_commit) && d_name.len > BCH_NAME_MAX, c, err,
- dirent_name_too_long,
+ bkey_fsck_err_on((flags & BCH_VALIDATE_commit) && d_name.len > BCH_NAME_MAX,
+ c, dirent_name_too_long,
"dirent name too big (%u > %u)",
d_name.len, BCH_NAME_MAX);
- bkey_fsck_err_on(d_name.len != strnlen(d_name.name, d_name.len), c, err,
- dirent_name_embedded_nul,
+ bkey_fsck_err_on(d_name.len != strnlen(d_name.name, d_name.len),
+ c, dirent_name_embedded_nul,
"dirent has stray data after name's NUL");
bkey_fsck_err_on((d_name.len == 1 && !memcmp(d_name.name, ".", 1)) ||
- (d_name.len == 2 && !memcmp(d_name.name, "..", 2)), c, err,
- dirent_name_dot_or_dotdot,
+ (d_name.len == 2 && !memcmp(d_name.name, "..", 2)),
+ c, dirent_name_dot_or_dotdot,
"invalid name");
- bkey_fsck_err_on(memchr(d_name.name, '/', d_name.len), c, err,
- dirent_name_has_slash,
+ bkey_fsck_err_on(memchr(d_name.name, '/', d_name.len),
+ c, dirent_name_has_slash,
"name with /");
bkey_fsck_err_on(d.v->d_type != DT_SUBVOL &&
- le64_to_cpu(d.v->d_inum) == d.k->p.inode, c, err,
- dirent_to_itself,
+ le64_to_cpu(d.v->d_inum) == d.k->p.inode,
+ c, dirent_to_itself,
"dirent points to own directory");
fsck_err:
return ret;
diff --git a/fs/bcachefs/dirent.h b/fs/bcachefs/dirent.h
index 24037e6e0a09..8945145865c5 100644
--- a/fs/bcachefs/dirent.h
+++ b/fs/bcachefs/dirent.h
@@ -7,12 +7,11 @@
enum bch_validate_flags;
extern const struct bch_hash_desc bch2_dirent_hash_desc;
-int bch2_dirent_invalid(struct bch_fs *, struct bkey_s_c,
- enum bch_validate_flags, struct printbuf *);
+int bch2_dirent_validate(struct bch_fs *, struct bkey_s_c, enum bch_validate_flags);
void bch2_dirent_to_text(struct printbuf *, struct bch_fs *, struct bkey_s_c);
#define bch2_bkey_ops_dirent ((struct bkey_ops) { \
- .key_invalid = bch2_dirent_invalid, \
+ .key_validate = bch2_dirent_validate, \
.val_to_text = bch2_dirent_to_text, \
.min_val_size = 16, \
})
diff --git a/fs/bcachefs/disk_accounting.c b/fs/bcachefs/disk_accounting.c
index dcdd59249c23..e972e2bca546 100644
--- a/fs/bcachefs/disk_accounting.c
+++ b/fs/bcachefs/disk_accounting.c
@@ -114,11 +114,73 @@ int bch2_mod_dev_cached_sectors(struct btree_trans *trans,
return bch2_disk_accounting_mod(trans, &acc, &sectors, 1, gc);
}
-int bch2_accounting_invalid(struct bch_fs *c, struct bkey_s_c k,
- enum bch_validate_flags flags,
- struct printbuf *err)
+static inline bool is_zero(char *start, char *end)
{
- return 0;
+ BUG_ON(start > end);
+
+ for (; start < end; start++)
+ if (*start)
+ return false;
+ return true;
+}
+
+#define field_end(p, member) (((void *) (&p.member)) + sizeof(p.member))
+
+int bch2_accounting_validate(struct bch_fs *c, struct bkey_s_c k,
+ enum bch_validate_flags flags)
+{
+ struct disk_accounting_pos acc_k;
+ bpos_to_disk_accounting_pos(&acc_k, k.k->p);
+ void *end = &acc_k + 1;
+ int ret = 0;
+
+ switch (acc_k.type) {
+ case BCH_DISK_ACCOUNTING_nr_inodes:
+ end = field_end(acc_k, nr_inodes);
+ break;
+ case BCH_DISK_ACCOUNTING_persistent_reserved:
+ end = field_end(acc_k, persistent_reserved);
+ break;
+ case BCH_DISK_ACCOUNTING_replicas:
+ bkey_fsck_err_on(!acc_k.replicas.nr_devs,
+ c, accounting_key_replicas_nr_devs_0,
+ "accounting key replicas entry with nr_devs=0");
+
+ bkey_fsck_err_on(acc_k.replicas.nr_required > acc_k.replicas.nr_devs ||
+ (acc_k.replicas.nr_required > 1 &&
+ acc_k.replicas.nr_required == acc_k.replicas.nr_devs),
+ c, accounting_key_replicas_nr_required_bad,
+ "accounting key replicas entry with bad nr_required");
+
+ for (unsigned i = 0; i + 1 < acc_k.replicas.nr_devs; i++)
+ bkey_fsck_err_on(acc_k.replicas.devs[i] >= acc_k.replicas.devs[i + 1],
+ c, accounting_key_replicas_devs_unsorted,
+ "accounting key replicas entry with unsorted devs");
+
+ end = (void *) &acc_k.replicas + replicas_entry_bytes(&acc_k.replicas);
+ break;
+ case BCH_DISK_ACCOUNTING_dev_data_type:
+ end = field_end(acc_k, dev_data_type);
+ break;
+ case BCH_DISK_ACCOUNTING_compression:
+ end = field_end(acc_k, compression);
+ break;
+ case BCH_DISK_ACCOUNTING_snapshot:
+ end = field_end(acc_k, snapshot);
+ break;
+ case BCH_DISK_ACCOUNTING_btree:
+ end = field_end(acc_k, btree);
+ break;
+ case BCH_DISK_ACCOUNTING_rebalance_work:
+ end = field_end(acc_k, rebalance_work);
+ break;
+ }
+
+ bkey_fsck_err_on(!is_zero(end, (void *) (&acc_k + 1)),
+ c, accounting_key_junk_at_end,
+ "junk at end of accounting key");
+fsck_err:
+ return ret;
}
void bch2_accounting_key_to_text(struct printbuf *out, struct disk_accounting_pos *k)
@@ -465,6 +527,9 @@ int bch2_gc_accounting_done(struct bch_fs *c)
struct disk_accounting_pos acc_k;
bpos_to_disk_accounting_pos(&acc_k, e->pos);
+ if (acc_k.type >= BCH_DISK_ACCOUNTING_TYPE_NR)
+ continue;
+
u64 src_v[BCH_ACCOUNTING_MAX_COUNTERS];
u64 dst_v[BCH_ACCOUNTING_MAX_COUNTERS];
@@ -501,7 +566,7 @@ int bch2_gc_accounting_done(struct bch_fs *c)
struct { __BKEY_PADDED(k, BCH_ACCOUNTING_MAX_COUNTERS); } k_i;
accounting_key_init(&k_i.k, &acc_k, src_v, nr);
- bch2_accounting_mem_mod_locked(trans, bkey_i_to_s_c_accounting(&k_i.k), false);
+ bch2_accounting_mem_mod_locked(trans, bkey_i_to_s_c_accounting(&k_i.k), false, false);
preempt_disable();
struct bch_fs_usage_base *dst = this_cpu_ptr(c->usage);
@@ -530,7 +595,7 @@ static int accounting_read_key(struct btree_trans *trans, struct bkey_s_c k)
return 0;
percpu_down_read(&c->mark_lock);
- int ret = __bch2_accounting_mem_mod(c, bkey_s_c_to_accounting(k), false);
+ int ret = bch2_accounting_mem_mod_locked(trans, bkey_s_c_to_accounting(k), false, true);
percpu_up_read(&c->mark_lock);
if (bch2_accounting_key_is_zero(bkey_s_c_to_accounting(k)) &&
@@ -697,6 +762,15 @@ void bch2_verify_accounting_clean(struct bch_fs *c)
struct bkey_s_c_accounting a = bkey_s_c_to_accounting(k);
unsigned nr = bch2_accounting_counters(k.k);
+ struct disk_accounting_pos acc_k;
+ bpos_to_disk_accounting_pos(&acc_k, k.k->p);
+
+ if (acc_k.type >= BCH_DISK_ACCOUNTING_TYPE_NR)
+ continue;
+
+ if (acc_k.type == BCH_DISK_ACCOUNTING_inum)
+ continue;
+
bch2_accounting_mem_read(c, k.k->p, v, nr);
if (memcmp(a.v->d, v, nr * sizeof(u64))) {
@@ -712,9 +786,6 @@ void bch2_verify_accounting_clean(struct bch_fs *c)
mismatch = true;
}
- struct disk_accounting_pos acc_k;
- bpos_to_disk_accounting_pos(&acc_k, a.k->p);
-
switch (acc_k.type) {
case BCH_DISK_ACCOUNTING_persistent_reserved:
base.reserved += acc_k.persistent_reserved.nr_replicas * a.v->d[0];
diff --git a/fs/bcachefs/disk_accounting.h b/fs/bcachefs/disk_accounting.h
index 3d3f25e08b69..f29fd0dd9581 100644
--- a/fs/bcachefs/disk_accounting.h
+++ b/fs/bcachefs/disk_accounting.h
@@ -82,14 +82,13 @@ int bch2_disk_accounting_mod(struct btree_trans *, struct disk_accounting_pos *,
s64 *, unsigned, bool);
int bch2_mod_dev_cached_sectors(struct btree_trans *, unsigned, s64, bool);
-int bch2_accounting_invalid(struct bch_fs *, struct bkey_s_c,
- enum bch_validate_flags, struct printbuf *);
+int bch2_accounting_validate(struct bch_fs *, struct bkey_s_c, enum bch_validate_flags);
void bch2_accounting_key_to_text(struct printbuf *, struct disk_accounting_pos *);
void bch2_accounting_to_text(struct printbuf *, struct bch_fs *, struct bkey_s_c);
void bch2_accounting_swab(struct bkey_s);
#define bch2_bkey_ops_accounting ((struct bkey_ops) { \
- .key_invalid = bch2_accounting_invalid, \
+ .key_validate = bch2_accounting_validate, \
.val_to_text = bch2_accounting_to_text, \
.swab = bch2_accounting_swab, \
.min_val_size = 8, \
@@ -107,41 +106,20 @@ static inline int accounting_pos_cmp(const void *_l, const void *_r)
int bch2_accounting_mem_insert(struct bch_fs *, struct bkey_s_c_accounting, bool);
void bch2_accounting_mem_gc(struct bch_fs *);
-static inline int __bch2_accounting_mem_mod(struct bch_fs *c, struct bkey_s_c_accounting a, bool gc)
-{
- struct bch_accounting_mem *acc = &c->accounting;
- unsigned idx;
-
- EBUG_ON(gc && !acc->gc_running);
-
- while ((idx = eytzinger0_find(acc->k.data, acc->k.nr, sizeof(acc->k.data[0]),
- accounting_pos_cmp, &a.k->p)) >= acc->k.nr) {
- int ret = bch2_accounting_mem_insert(c, a, gc);
- if (ret)
- return ret;
- }
-
- struct accounting_mem_entry *e = &acc->k.data[idx];
-
- EBUG_ON(bch2_accounting_counters(a.k) != e->nr_counters);
-
- for (unsigned i = 0; i < bch2_accounting_counters(a.k); i++)
- this_cpu_add(e->v[gc][i], a.v->d[i]);
- return 0;
-}
-
/*
* Update in memory counters so they match the btree update we're doing; called
* from transaction commit path
*/
-static inline int bch2_accounting_mem_mod_locked(struct btree_trans *trans, struct bkey_s_c_accounting a, bool gc)
+static inline int bch2_accounting_mem_mod_locked(struct btree_trans *trans, struct bkey_s_c_accounting a, bool gc, bool read)
{
struct bch_fs *c = trans->c;
+ struct disk_accounting_pos acc_k;
+ bpos_to_disk_accounting_pos(&acc_k, a.k->p);
- if (!gc) {
- struct disk_accounting_pos acc_k;
- bpos_to_disk_accounting_pos(&acc_k, a.k->p);
+ if (acc_k.type == BCH_DISK_ACCOUNTING_inum)
+ return 0;
+ if (!gc && !read) {
switch (acc_k.type) {
case BCH_DISK_ACCOUNTING_persistent_reserved:
trans->fs_usage_delta.reserved += acc_k.persistent_reserved.nr_replicas * a.v->d[0];
@@ -162,13 +140,31 @@ static inline int bch2_accounting_mem_mod_locked(struct btree_trans *trans, stru
}
}
- return __bch2_accounting_mem_mod(c, a, gc);
+ struct bch_accounting_mem *acc = &c->accounting;
+ unsigned idx;
+
+ EBUG_ON(gc && !acc->gc_running);
+
+ while ((idx = eytzinger0_find(acc->k.data, acc->k.nr, sizeof(acc->k.data[0]),
+ accounting_pos_cmp, &a.k->p)) >= acc->k.nr) {
+ int ret = bch2_accounting_mem_insert(c, a, gc);
+ if (ret)
+ return ret;
+ }
+
+ struct accounting_mem_entry *e = &acc->k.data[idx];
+
+ EBUG_ON(bch2_accounting_counters(a.k) != e->nr_counters);
+
+ for (unsigned i = 0; i < bch2_accounting_counters(a.k); i++)
+ this_cpu_add(e->v[gc][i], a.v->d[i]);
+ return 0;
}
static inline int bch2_accounting_mem_add(struct btree_trans *trans, struct bkey_s_c_accounting a, bool gc)
{
percpu_down_read(&trans->c->mark_lock);
- int ret = bch2_accounting_mem_mod_locked(trans, a, gc);
+ int ret = bch2_accounting_mem_mod_locked(trans, a, gc, false);
percpu_up_read(&trans->c->mark_lock);
return ret;
}
diff --git a/fs/bcachefs/disk_accounting_format.h b/fs/bcachefs/disk_accounting_format.h
index cba417060b33..7b6e6c97e6aa 100644
--- a/fs/bcachefs/disk_accounting_format.h
+++ b/fs/bcachefs/disk_accounting_format.h
@@ -103,7 +103,8 @@ static inline bool data_type_is_hidden(enum bch_data_type type)
x(compression, 4) \
x(snapshot, 5) \
x(btree, 6) \
- x(rebalance_work, 7)
+ x(rebalance_work, 7) \
+ x(inum, 8)
enum disk_accounting_type {
#define x(f, nr) BCH_DISK_ACCOUNTING_##f = nr,
@@ -124,20 +125,23 @@ struct bch_dev_data_type {
__u8 data_type;
};
-struct bch_dev_stripe_buckets {
- __u8 dev;
-};
-
struct bch_acct_compression {
__u8 type;
};
struct bch_acct_snapshot {
__u32 id;
-};
+} __packed;
struct bch_acct_btree {
__u32 id;
+} __packed;
+
+struct bch_acct_inum {
+ __u64 inum;
+} __packed;
+
+struct bch_acct_rebalance_work {
};
struct disk_accounting_pos {
@@ -149,12 +153,13 @@ struct disk_accounting_pos {
struct bch_persistent_reserved persistent_reserved;
struct bch_replicas_entry_v1 replicas;
struct bch_dev_data_type dev_data_type;
- struct bch_dev_stripe_buckets dev_stripe_buckets;
struct bch_acct_compression compression;
struct bch_acct_snapshot snapshot;
struct bch_acct_btree btree;
- };
- };
+ struct bch_acct_rebalance_work rebalance_work;
+ struct bch_acct_inum inum;
+ } __packed;
+ } __packed;
struct bpos _pad;
};
};
diff --git a/fs/bcachefs/ec.c b/fs/bcachefs/ec.c
index 9b5b5c9a6c63..141a4c63142f 100644
--- a/fs/bcachefs/ec.c
+++ b/fs/bcachefs/ec.c
@@ -107,24 +107,23 @@ struct ec_bio {
/* Stripes btree keys: */
-int bch2_stripe_invalid(struct bch_fs *c, struct bkey_s_c k,
- enum bch_validate_flags flags,
- struct printbuf *err)
+int bch2_stripe_validate(struct bch_fs *c, struct bkey_s_c k,
+ enum bch_validate_flags flags)
{
const struct bch_stripe *s = bkey_s_c_to_stripe(k).v;
int ret = 0;
bkey_fsck_err_on(bkey_eq(k.k->p, POS_MIN) ||
- bpos_gt(k.k->p, POS(0, U32_MAX)), c, err,
- stripe_pos_bad,
+ bpos_gt(k.k->p, POS(0, U32_MAX)),
+ c, stripe_pos_bad,
"stripe at bad pos");
- bkey_fsck_err_on(bkey_val_u64s(k.k) < stripe_val_u64s(s), c, err,
- stripe_val_size_bad,
+ bkey_fsck_err_on(bkey_val_u64s(k.k) < stripe_val_u64s(s),
+ c, stripe_val_size_bad,
"incorrect value size (%zu < %u)",
bkey_val_u64s(k.k), stripe_val_u64s(s));
- ret = bch2_bkey_ptrs_invalid(c, k, flags, err);
+ ret = bch2_bkey_ptrs_validate(c, k, flags);
fsck_err:
return ret;
}
@@ -1809,6 +1808,9 @@ static int new_stripe_alloc_buckets(struct btree_trans *trans, struct ec_stripe_
BUG_ON(v->nr_blocks != h->s->nr_data + h->s->nr_parity);
BUG_ON(v->nr_redundant != h->s->nr_parity);
+ /* * We bypass the sector allocator which normally does this: */
+ bitmap_and(devs.d, devs.d, c->rw_devs[BCH_DATA_user].d, BCH_SB_MEMBERS_MAX);
+
for_each_set_bit(i, h->s->blocks_gotten, v->nr_blocks) {
__clear_bit(v->ptrs[i].dev, devs.d);
if (i < h->s->nr_data)
@@ -2235,6 +2237,23 @@ void bch2_stripes_heap_to_text(struct printbuf *out, struct bch_fs *c)
mutex_unlock(&c->ec_stripes_heap_lock);
}
+static void bch2_new_stripe_to_text(struct printbuf *out, struct bch_fs *c,
+ struct ec_stripe_new *s)
+{
+ prt_printf(out, "\tidx %llu blocks %u+%u allocated %u ref %u %u %s obs",
+ s->idx, s->nr_data, s->nr_parity,
+ bitmap_weight(s->blocks_allocated, s->nr_data),
+ atomic_read(&s->ref[STRIPE_REF_io]),
+ atomic_read(&s->ref[STRIPE_REF_stripe]),
+ bch2_watermarks[s->h->watermark]);
+
+ struct bch_stripe *v = &bkey_i_to_stripe(&s->new_stripe.key)->v;
+ unsigned i;
+ for_each_set_bit(i, s->blocks_gotten, v->nr_blocks)
+ prt_printf(out, " %u", s->blocks[i]);
+ prt_newline(out);
+}
+
void bch2_new_stripes_to_text(struct printbuf *out, struct bch_fs *c)
{
struct ec_stripe_head *h;
@@ -2247,23 +2266,15 @@ void bch2_new_stripes_to_text(struct printbuf *out, struct bch_fs *c)
bch2_watermarks[h->watermark]);
if (h->s)
- prt_printf(out, "\tidx %llu blocks %u+%u allocated %u\n",
- h->s->idx, h->s->nr_data, h->s->nr_parity,
- bitmap_weight(h->s->blocks_allocated,
- h->s->nr_data));
+ bch2_new_stripe_to_text(out, c, h->s);
}
mutex_unlock(&c->ec_stripe_head_lock);
prt_printf(out, "in flight:\n");
mutex_lock(&c->ec_stripe_new_lock);
- list_for_each_entry(s, &c->ec_stripe_new_list, list) {
- prt_printf(out, "\tidx %llu blocks %u+%u ref %u %u %s\n",
- s->idx, s->nr_data, s->nr_parity,
- atomic_read(&s->ref[STRIPE_REF_io]),
- atomic_read(&s->ref[STRIPE_REF_stripe]),
- bch2_watermarks[s->h->watermark]);
- }
+ list_for_each_entry(s, &c->ec_stripe_new_list, list)
+ bch2_new_stripe_to_text(out, c, s);
mutex_unlock(&c->ec_stripe_new_lock);
}
diff --git a/fs/bcachefs/ec.h b/fs/bcachefs/ec.h
index 84a23eeb6249..90962b3c0130 100644
--- a/fs/bcachefs/ec.h
+++ b/fs/bcachefs/ec.h
@@ -8,8 +8,7 @@
enum bch_validate_flags;
-int bch2_stripe_invalid(struct bch_fs *, struct bkey_s_c,
- enum bch_validate_flags, struct printbuf *);
+int bch2_stripe_validate(struct bch_fs *, struct bkey_s_c, enum bch_validate_flags);
void bch2_stripe_to_text(struct printbuf *, struct bch_fs *,
struct bkey_s_c);
int bch2_trigger_stripe(struct btree_trans *, enum btree_id, unsigned,
@@ -17,7 +16,7 @@ int bch2_trigger_stripe(struct btree_trans *, enum btree_id, unsigned,
enum btree_iter_update_trigger_flags);
#define bch2_bkey_ops_stripe ((struct bkey_ops) { \
- .key_invalid = bch2_stripe_invalid, \
+ .key_validate = bch2_stripe_validate, \
.val_to_text = bch2_stripe_to_text, \
.swab = bch2_ptr_swab, \
.trigger = bch2_trigger_stripe, \
diff --git a/fs/bcachefs/errcode.h b/fs/bcachefs/errcode.h
index a268af3e52bf..ab5a7adece10 100644
--- a/fs/bcachefs/errcode.h
+++ b/fs/bcachefs/errcode.h
@@ -166,6 +166,7 @@
x(0, journal_reclaim_would_deadlock) \
x(EINVAL, fsck) \
x(BCH_ERR_fsck, fsck_fix) \
+ x(BCH_ERR_fsck, fsck_delete_bkey) \
x(BCH_ERR_fsck, fsck_ignore) \
x(BCH_ERR_fsck, fsck_errors_not_fixed) \
x(BCH_ERR_fsck, fsck_repair_unimplemented) \
diff --git a/fs/bcachefs/error.c b/fs/bcachefs/error.c
index a62b63108820..95afa7bf2020 100644
--- a/fs/bcachefs/error.c
+++ b/fs/bcachefs/error.c
@@ -416,6 +416,28 @@ err:
return ret;
}
+int __bch2_bkey_fsck_err(struct bch_fs *c,
+ struct bkey_s_c k,
+ enum bch_fsck_flags flags,
+ enum bch_sb_error_id err,
+ const char *fmt, ...)
+{
+ struct printbuf buf = PRINTBUF;
+ va_list args;
+
+ prt_str(&buf, "invalid bkey ");
+ bch2_bkey_val_to_text(&buf, c, k);
+ prt_str(&buf, "\n ");
+ va_start(args, fmt);
+ prt_vprintf(&buf, fmt, args);
+ va_end(args);
+ prt_str(&buf, ": delete?");
+
+ int ret = __bch2_fsck_err(c, NULL, flags, err, "%s", buf.buf);
+ printbuf_exit(&buf);
+ return ret;
+}
+
void bch2_flush_fsck_errs(struct bch_fs *c)
{
struct fsck_err_state *s, *n;
diff --git a/fs/bcachefs/error.h b/fs/bcachefs/error.h
index 995e6bba9bad..2f1b86978f36 100644
--- a/fs/bcachefs/error.h
+++ b/fs/bcachefs/error.h
@@ -4,6 +4,7 @@
#include <linux/list.h>
#include <linux/printk.h>
+#include "bkey_types.h"
#include "sb-errors.h"
struct bch_dev;
@@ -166,24 +167,30 @@ void bch2_flush_fsck_errs(struct bch_fs *);
#define fsck_err_on(cond, c, _err_type, ...) \
__fsck_err_on(cond, c, FSCK_CAN_FIX|FSCK_CAN_IGNORE, _err_type, __VA_ARGS__)
-__printf(4, 0)
-static inline void bch2_bkey_fsck_err(struct bch_fs *c,
- struct printbuf *err_msg,
- enum bch_sb_error_id err_type,
- const char *fmt, ...)
-{
- va_list args;
+__printf(5, 6)
+int __bch2_bkey_fsck_err(struct bch_fs *,
+ struct bkey_s_c,
+ enum bch_fsck_flags,
+ enum bch_sb_error_id,
+ const char *, ...);
- va_start(args, fmt);
- prt_vprintf(err_msg, fmt, args);
- va_end(args);
-}
-
-#define bkey_fsck_err(c, _err_msg, _err_type, ...) \
+/*
+ * for now, bkey fsck errors are always handled by deleting the entire key -
+ * this will change at some point
+ */
+#define bkey_fsck_err(c, _err_type, _err_msg, ...) \
do { \
- prt_printf(_err_msg, __VA_ARGS__); \
- bch2_sb_error_count(c, BCH_FSCK_ERR_##_err_type); \
- ret = -BCH_ERR_invalid_bkey; \
+ if ((flags & BCH_VALIDATE_silent)) { \
+ ret = -BCH_ERR_fsck_delete_bkey; \
+ goto fsck_err; \
+ } \
+ int _ret = __bch2_bkey_fsck_err(c, k, FSCK_CAN_FIX, \
+ BCH_FSCK_ERR_##_err_type, \
+ _err_msg, ##__VA_ARGS__); \
+ if (_ret != -BCH_ERR_fsck_fix && \
+ _ret != -BCH_ERR_fsck_ignore) \
+ ret = _ret; \
+ ret = -BCH_ERR_fsck_delete_bkey; \
goto fsck_err; \
} while (0)
diff --git a/fs/bcachefs/extents.c b/fs/bcachefs/extents.c
index 07973198e35f..4419ad3e454e 100644
--- a/fs/bcachefs/extents.c
+++ b/fs/bcachefs/extents.c
@@ -171,17 +171,16 @@ int bch2_bkey_pick_read_device(struct bch_fs *c, struct bkey_s_c k,
/* KEY_TYPE_btree_ptr: */
-int bch2_btree_ptr_invalid(struct bch_fs *c, struct bkey_s_c k,
- enum bch_validate_flags flags,
- struct printbuf *err)
+int bch2_btree_ptr_validate(struct bch_fs *c, struct bkey_s_c k,
+ enum bch_validate_flags flags)
{
int ret = 0;
- bkey_fsck_err_on(bkey_val_u64s(k.k) > BCH_REPLICAS_MAX, c, err,
- btree_ptr_val_too_big,
+ bkey_fsck_err_on(bkey_val_u64s(k.k) > BCH_REPLICAS_MAX,
+ c, btree_ptr_val_too_big,
"value too big (%zu > %u)", bkey_val_u64s(k.k), BCH_REPLICAS_MAX);
- ret = bch2_bkey_ptrs_invalid(c, k, flags, err);
+ ret = bch2_bkey_ptrs_validate(c, k, flags);
fsck_err:
return ret;
}
@@ -192,28 +191,27 @@ void bch2_btree_ptr_to_text(struct printbuf *out, struct bch_fs *c,
bch2_bkey_ptrs_to_text(out, c, k);
}
-int bch2_btree_ptr_v2_invalid(struct bch_fs *c, struct bkey_s_c k,
- enum bch_validate_flags flags,
- struct printbuf *err)
+int bch2_btree_ptr_v2_validate(struct bch_fs *c, struct bkey_s_c k,
+ enum bch_validate_flags flags)
{
struct bkey_s_c_btree_ptr_v2 bp = bkey_s_c_to_btree_ptr_v2(k);
int ret = 0;
bkey_fsck_err_on(bkey_val_u64s(k.k) > BKEY_BTREE_PTR_VAL_U64s_MAX,
- c, err, btree_ptr_v2_val_too_big,
+ c, btree_ptr_v2_val_too_big,
"value too big (%zu > %zu)",
bkey_val_u64s(k.k), BKEY_BTREE_PTR_VAL_U64s_MAX);
bkey_fsck_err_on(bpos_ge(bp.v->min_key, bp.k->p),
- c, err, btree_ptr_v2_min_key_bad,
+ c, btree_ptr_v2_min_key_bad,
"min_key > key");
if (flags & BCH_VALIDATE_write)
bkey_fsck_err_on(!bp.v->sectors_written,
- c, err, btree_ptr_v2_written_0,
+ c, btree_ptr_v2_written_0,
"sectors_written == 0");
- ret = bch2_bkey_ptrs_invalid(c, k, flags, err);
+ ret = bch2_bkey_ptrs_validate(c, k, flags);
fsck_err:
return ret;
}
@@ -399,15 +397,14 @@ bool bch2_extent_merge(struct bch_fs *c, struct bkey_s l, struct bkey_s_c r)
/* KEY_TYPE_reservation: */
-int bch2_reservation_invalid(struct bch_fs *c, struct bkey_s_c k,
- enum bch_validate_flags flags,
- struct printbuf *err)
+int bch2_reservation_validate(struct bch_fs *c, struct bkey_s_c k,
+ enum bch_validate_flags flags)
{
struct bkey_s_c_reservation r = bkey_s_c_to_reservation(k);
int ret = 0;
- bkey_fsck_err_on(!r.v->nr_replicas || r.v->nr_replicas > BCH_REPLICAS_MAX, c, err,
- reservation_key_nr_replicas_invalid,
+ bkey_fsck_err_on(!r.v->nr_replicas || r.v->nr_replicas > BCH_REPLICAS_MAX,
+ c, reservation_key_nr_replicas_invalid,
"invalid nr_replicas (%u)", r.v->nr_replicas);
fsck_err:
return ret;
@@ -1102,14 +1099,12 @@ void bch2_bkey_ptrs_to_text(struct printbuf *out, struct bch_fs *c,
}
}
-
-static int extent_ptr_invalid(struct bch_fs *c,
- struct bkey_s_c k,
- enum bch_validate_flags flags,
- const struct bch_extent_ptr *ptr,
- unsigned size_ondisk,
- bool metadata,
- struct printbuf *err)
+static int extent_ptr_validate(struct bch_fs *c,
+ struct bkey_s_c k,
+ enum bch_validate_flags flags,
+ const struct bch_extent_ptr *ptr,
+ unsigned size_ondisk,
+ bool metadata)
{
int ret = 0;
@@ -1128,28 +1123,27 @@ static int extent_ptr_invalid(struct bch_fs *c,
struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k);
bkey_for_each_ptr(ptrs, ptr2)
- bkey_fsck_err_on(ptr != ptr2 && ptr->dev == ptr2->dev, c, err,
- ptr_to_duplicate_device,
+ bkey_fsck_err_on(ptr != ptr2 && ptr->dev == ptr2->dev,
+ c, ptr_to_duplicate_device,
"multiple pointers to same device (%u)", ptr->dev);
- bkey_fsck_err_on(bucket >= nbuckets, c, err,
- ptr_after_last_bucket,
+ bkey_fsck_err_on(bucket >= nbuckets,
+ c, ptr_after_last_bucket,
"pointer past last bucket (%llu > %llu)", bucket, nbuckets);
- bkey_fsck_err_on(bucket < first_bucket, c, err,
- ptr_before_first_bucket,
+ bkey_fsck_err_on(bucket < first_bucket,
+ c, ptr_before_first_bucket,
"pointer before first bucket (%llu < %u)", bucket, first_bucket);
- bkey_fsck_err_on(bucket_offset + size_ondisk > bucket_size, c, err,
- ptr_spans_multiple_buckets,
+ bkey_fsck_err_on(bucket_offset + size_ondisk > bucket_size,
+ c, ptr_spans_multiple_buckets,
"pointer spans multiple buckets (%u + %u > %u)",
bucket_offset, size_ondisk, bucket_size);
fsck_err:
return ret;
}
-int bch2_bkey_ptrs_invalid(struct bch_fs *c, struct bkey_s_c k,
- enum bch_validate_flags flags,
- struct printbuf *err)
+int bch2_bkey_ptrs_validate(struct bch_fs *c, struct bkey_s_c k,
+ enum bch_validate_flags flags)
{
struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k);
const union bch_extent_entry *entry;
@@ -1164,25 +1158,24 @@ int bch2_bkey_ptrs_invalid(struct bch_fs *c, struct bkey_s_c k,
size_ondisk = btree_sectors(c);
bkey_extent_entry_for_each(ptrs, entry) {
- bkey_fsck_err_on(__extent_entry_type(entry) >= BCH_EXTENT_ENTRY_MAX, c, err,
- extent_ptrs_invalid_entry,
- "invalid extent entry type (got %u, max %u)",
- __extent_entry_type(entry), BCH_EXTENT_ENTRY_MAX);
+ bkey_fsck_err_on(__extent_entry_type(entry) >= BCH_EXTENT_ENTRY_MAX,
+ c, extent_ptrs_invalid_entry,
+ "invalid extent entry type (got %u, max %u)",
+ __extent_entry_type(entry), BCH_EXTENT_ENTRY_MAX);
bkey_fsck_err_on(bkey_is_btree_ptr(k.k) &&
- !extent_entry_is_ptr(entry), c, err,
- btree_ptr_has_non_ptr,
+ !extent_entry_is_ptr(entry),
+ c, btree_ptr_has_non_ptr,
"has non ptr field");
switch (extent_entry_type(entry)) {
case BCH_EXTENT_ENTRY_ptr:
- ret = extent_ptr_invalid(c, k, flags, &entry->ptr,
- size_ondisk, false, err);
+ ret = extent_ptr_validate(c, k, flags, &entry->ptr, size_ondisk, false);
if (ret)
return ret;
- bkey_fsck_err_on(entry->ptr.cached && have_ec, c, err,
- ptr_cached_and_erasure_coded,
+ bkey_fsck_err_on(entry->ptr.cached && have_ec,
+ c, ptr_cached_and_erasure_coded,
"cached, erasure coded ptr");
if (!entry->ptr.unwritten)
@@ -1199,44 +1192,50 @@ int bch2_bkey_ptrs_invalid(struct bch_fs *c, struct bkey_s_c k,
case BCH_EXTENT_ENTRY_crc128:
crc = bch2_extent_crc_unpack(k.k, entry_to_crc(entry));
- bkey_fsck_err_on(crc.offset + crc.live_size > crc.uncompressed_size, c, err,
- ptr_crc_uncompressed_size_too_small,
+ bkey_fsck_err_on(crc.offset + crc.live_size > crc.uncompressed_size,
+ c, ptr_crc_uncompressed_size_too_small,
"checksum offset + key size > uncompressed size");
- bkey_fsck_err_on(!bch2_checksum_type_valid(c, crc.csum_type), c, err,
- ptr_crc_csum_type_unknown,
+ bkey_fsck_err_on(!bch2_checksum_type_valid(c, crc.csum_type),
+ c, ptr_crc_csum_type_unknown,
"invalid checksum type");
- bkey_fsck_err_on(crc.compression_type >= BCH_COMPRESSION_TYPE_NR, c, err,
- ptr_crc_compression_type_unknown,
+ bkey_fsck_err_on(crc.compression_type >= BCH_COMPRESSION_TYPE_NR,
+ c, ptr_crc_compression_type_unknown,
"invalid compression type");
if (bch2_csum_type_is_encryption(crc.csum_type)) {
if (nonce == UINT_MAX)
nonce = crc.offset + crc.nonce;
else if (nonce != crc.offset + crc.nonce)
- bkey_fsck_err(c, err, ptr_crc_nonce_mismatch,
+ bkey_fsck_err(c, ptr_crc_nonce_mismatch,
"incorrect nonce");
}
- bkey_fsck_err_on(crc_since_last_ptr, c, err,
- ptr_crc_redundant,
+ bkey_fsck_err_on(crc_since_last_ptr,
+ c, ptr_crc_redundant,
"redundant crc entry");
crc_since_last_ptr = true;
bkey_fsck_err_on(crc_is_encoded(crc) &&
(crc.uncompressed_size > c->opts.encoded_extent_max >> 9) &&
- (flags & (BCH_VALIDATE_write|BCH_VALIDATE_commit)), c, err,
- ptr_crc_uncompressed_size_too_big,
+ (flags & (BCH_VALIDATE_write|BCH_VALIDATE_commit)),
+ c, ptr_crc_uncompressed_size_too_big,
"too large encoded extent");
size_ondisk = crc.compressed_size;
break;
case BCH_EXTENT_ENTRY_stripe_ptr:
- bkey_fsck_err_on(have_ec, c, err,
- ptr_stripe_redundant,
+ bkey_fsck_err_on(have_ec,
+ c, ptr_stripe_redundant,
"redundant stripe entry");
have_ec = true;
break;
case BCH_EXTENT_ENTRY_rebalance: {
+ /*
+ * this shouldn't be a fsck error, for forward
+ * compatibility; the rebalance code should just refetch
+ * the compression opt if it's unknown
+ */
+#if 0
const struct bch_extent_rebalance *r = &entry->rebalance;
if (!bch2_compression_opt_valid(r->compression)) {
@@ -1245,28 +1244,29 @@ int bch2_bkey_ptrs_invalid(struct bch_fs *c, struct bkey_s_c k,
opt.type, opt.level);
return -BCH_ERR_invalid_bkey;
}
+#endif
break;
}
}
}
- bkey_fsck_err_on(!nr_ptrs, c, err,
- extent_ptrs_no_ptrs,
+ bkey_fsck_err_on(!nr_ptrs,
+ c, extent_ptrs_no_ptrs,
"no ptrs");
- bkey_fsck_err_on(nr_ptrs > BCH_BKEY_PTRS_MAX, c, err,
- extent_ptrs_too_many_ptrs,
+ bkey_fsck_err_on(nr_ptrs > BCH_BKEY_PTRS_MAX,
+ c, extent_ptrs_too_many_ptrs,
"too many ptrs: %u > %u", nr_ptrs, BCH_BKEY_PTRS_MAX);
- bkey_fsck_err_on(have_written && have_unwritten, c, err,
- extent_ptrs_written_and_unwritten,
+ bkey_fsck_err_on(have_written && have_unwritten,
+ c, extent_ptrs_written_and_unwritten,
"extent with unwritten and written ptrs");
- bkey_fsck_err_on(k.k->type != KEY_TYPE_extent && have_unwritten, c, err,
- extent_ptrs_unwritten,
+ bkey_fsck_err_on(k.k->type != KEY_TYPE_extent && have_unwritten,
+ c, extent_ptrs_unwritten,
"has unwritten ptrs");
- bkey_fsck_err_on(crc_since_last_ptr, c, err,
- extent_ptrs_redundant_crc,
+ bkey_fsck_err_on(crc_since_last_ptr,
+ c, extent_ptrs_redundant_crc,
"redundant crc entry");
- bkey_fsck_err_on(have_ec, c, err,
- extent_ptrs_redundant_stripe,
+ bkey_fsck_err_on(have_ec,
+ c, extent_ptrs_redundant_stripe,
"redundant stripe entry");
fsck_err:
return ret;
diff --git a/fs/bcachefs/extents.h b/fs/bcachefs/extents.h
index facdb8a86eec..1a6ddee48041 100644
--- a/fs/bcachefs/extents.h
+++ b/fs/bcachefs/extents.h
@@ -409,26 +409,26 @@ int bch2_bkey_pick_read_device(struct bch_fs *, struct bkey_s_c,
/* KEY_TYPE_btree_ptr: */
-int bch2_btree_ptr_invalid(struct bch_fs *, struct bkey_s_c,
- enum bch_validate_flags, struct printbuf *);
+int bch2_btree_ptr_validate(struct bch_fs *, struct bkey_s_c,
+ enum bch_validate_flags);
void bch2_btree_ptr_to_text(struct printbuf *, struct bch_fs *,
struct bkey_s_c);
-int bch2_btree_ptr_v2_invalid(struct bch_fs *, struct bkey_s_c,
- enum bch_validate_flags, struct printbuf *);
+int bch2_btree_ptr_v2_validate(struct bch_fs *, struct bkey_s_c,
+ enum bch_validate_flags);
void bch2_btree_ptr_v2_to_text(struct printbuf *, struct bch_fs *, struct bkey_s_c);
void bch2_btree_ptr_v2_compat(enum btree_id, unsigned, unsigned,
int, struct bkey_s);
#define bch2_bkey_ops_btree_ptr ((struct bkey_ops) { \
- .key_invalid = bch2_btree_ptr_invalid, \
+ .key_validate = bch2_btree_ptr_validate, \
.val_to_text = bch2_btree_ptr_to_text, \
.swab = bch2_ptr_swab, \
.trigger = bch2_trigger_extent, \
})
#define bch2_bkey_ops_btree_ptr_v2 ((struct bkey_ops) { \
- .key_invalid = bch2_btree_ptr_v2_invalid, \
+ .key_validate = bch2_btree_ptr_v2_validate, \
.val_to_text = bch2_btree_ptr_v2_to_text, \
.swab = bch2_ptr_swab, \
.compat = bch2_btree_ptr_v2_compat, \
@@ -441,7 +441,7 @@ void bch2_btree_ptr_v2_compat(enum btree_id, unsigned, unsigned,
bool bch2_extent_merge(struct bch_fs *, struct bkey_s, struct bkey_s_c);
#define bch2_bkey_ops_extent ((struct bkey_ops) { \
- .key_invalid = bch2_bkey_ptrs_invalid, \
+ .key_validate = bch2_bkey_ptrs_validate, \
.val_to_text = bch2_bkey_ptrs_to_text, \
.swab = bch2_ptr_swab, \
.key_normalize = bch2_extent_normalize, \
@@ -451,13 +451,13 @@ bool bch2_extent_merge(struct bch_fs *, struct bkey_s, struct bkey_s_c);
/* KEY_TYPE_reservation: */
-int bch2_reservation_invalid(struct bch_fs *, struct bkey_s_c,
- enum bch_validate_flags, struct printbuf *);
+int bch2_reservation_validate(struct bch_fs *, struct bkey_s_c,
+ enum bch_validate_flags);
void bch2_reservation_to_text(struct printbuf *, struct bch_fs *, struct bkey_s_c);
bool bch2_reservation_merge(struct bch_fs *, struct bkey_s, struct bkey_s_c);
#define bch2_bkey_ops_reservation ((struct bkey_ops) { \
- .key_invalid = bch2_reservation_invalid, \
+ .key_validate = bch2_reservation_validate, \
.val_to_text = bch2_reservation_to_text, \
.key_merge = bch2_reservation_merge, \
.trigger = bch2_trigger_reservation, \
@@ -683,8 +683,8 @@ bool bch2_extent_normalize(struct bch_fs *, struct bkey_s);
void bch2_extent_ptr_to_text(struct printbuf *out, struct bch_fs *, const struct bch_extent_ptr *);
void bch2_bkey_ptrs_to_text(struct printbuf *, struct bch_fs *,
struct bkey_s_c);
-int bch2_bkey_ptrs_invalid(struct bch_fs *, struct bkey_s_c,
- enum bch_validate_flags, struct printbuf *);
+int bch2_bkey_ptrs_validate(struct bch_fs *, struct bkey_s_c,
+ enum bch_validate_flags);
void bch2_ptr_swab(struct bkey_s);
diff --git a/fs/bcachefs/fs.c b/fs/bcachefs/fs.c
index 3a5f49affa0a..94c392abef65 100644
--- a/fs/bcachefs/fs.c
+++ b/fs/bcachefs/fs.c
@@ -193,7 +193,7 @@ static struct bch_inode_info *bch2_inode_insert(struct bch_fs *c, struct bch_ino
* only insert fully created inodes in the inode hash table. But
* discard_new_inode() expects it to be set...
*/
- inode->v.i_flags |= I_NEW;
+ inode->v.i_state |= I_NEW;
/*
* We don't want bch2_evict_inode() to delete the inode on disk,
* we just raced and had another inode in cache. Normally new
@@ -1199,7 +1199,7 @@ static const struct inode_operations bch_file_inode_operations = {
.fiemap = bch2_fiemap,
.listxattr = bch2_xattr_list,
#ifdef CONFIG_BCACHEFS_POSIX_ACL
- .get_acl = bch2_get_acl,
+ .get_inode_acl = bch2_get_acl,
.set_acl = bch2_set_acl,
#endif
};
@@ -1219,7 +1219,7 @@ static const struct inode_operations bch_dir_inode_operations = {
.tmpfile = bch2_tmpfile,
.listxattr = bch2_xattr_list,
#ifdef CONFIG_BCACHEFS_POSIX_ACL
- .get_acl = bch2_get_acl,
+ .get_inode_acl = bch2_get_acl,
.set_acl = bch2_set_acl,
#endif
};
@@ -1241,7 +1241,7 @@ static const struct inode_operations bch_symlink_inode_operations = {
.setattr = bch2_setattr,
.listxattr = bch2_xattr_list,
#ifdef CONFIG_BCACHEFS_POSIX_ACL
- .get_acl = bch2_get_acl,
+ .get_inode_acl = bch2_get_acl,
.set_acl = bch2_set_acl,
#endif
};
@@ -1251,7 +1251,7 @@ static const struct inode_operations bch_special_inode_operations = {
.setattr = bch2_setattr,
.listxattr = bch2_xattr_list,
#ifdef CONFIG_BCACHEFS_POSIX_ACL
- .get_acl = bch2_get_acl,
+ .get_inode_acl = bch2_get_acl,
.set_acl = bch2_set_acl,
#endif
};
diff --git a/fs/bcachefs/inode.c b/fs/bcachefs/inode.c
index 1e20020eadd1..2be6be33afa3 100644
--- a/fs/bcachefs/inode.c
+++ b/fs/bcachefs/inode.c
@@ -434,100 +434,98 @@ struct bkey_i *bch2_inode_to_v3(struct btree_trans *trans, struct bkey_i *k)
return &inode_p->inode.k_i;
}
-static int __bch2_inode_invalid(struct bch_fs *c, struct bkey_s_c k, struct printbuf *err)
+static int __bch2_inode_validate(struct bch_fs *c, struct bkey_s_c k,
+ enum bch_validate_flags flags)
{
struct bch_inode_unpacked unpacked;
int ret = 0;
- bkey_fsck_err_on(k.k->p.inode, c, err,
- inode_pos_inode_nonzero,
+ bkey_fsck_err_on(k.k->p.inode,
+ c, inode_pos_inode_nonzero,
"nonzero k.p.inode");
- bkey_fsck_err_on(k.k->p.offset < BLOCKDEV_INODE_MAX, c, err,
- inode_pos_blockdev_range,
+ bkey_fsck_err_on(k.k->p.offset < BLOCKDEV_INODE_MAX,
+ c, inode_pos_blockdev_range,
"fs inode in blockdev range");
- bkey_fsck_err_on(bch2_inode_unpack(k, &unpacked), c, err,
- inode_unpack_error,
+ bkey_fsck_err_on(bch2_inode_unpack(k, &unpacked),
+ c, inode_unpack_error,
"invalid variable length fields");
- bkey_fsck_err_on(unpacked.bi_data_checksum >= BCH_CSUM_OPT_NR + 1, c, err,
- inode_checksum_type_invalid,
+ bkey_fsck_err_on(unpacked.bi_data_checksum >= BCH_CSUM_OPT_NR + 1,
+ c, inode_checksum_type_invalid,
"invalid data checksum type (%u >= %u",
unpacked.bi_data_checksum, BCH_CSUM_OPT_NR + 1);
bkey_fsck_err_on(unpacked.bi_compression &&
- !bch2_compression_opt_valid(unpacked.bi_compression - 1), c, err,
- inode_compression_type_invalid,
+ !bch2_compression_opt_valid(unpacked.bi_compression - 1),
+ c, inode_compression_type_invalid,
"invalid compression opt %u", unpacked.bi_compression - 1);
bkey_fsck_err_on((unpacked.bi_flags & BCH_INODE_unlinked) &&
- unpacked.bi_nlink != 0, c, err,
- inode_unlinked_but_nlink_nonzero,
+ unpacked.bi_nlink != 0,
+ c, inode_unlinked_but_nlink_nonzero,
"flagged as unlinked but bi_nlink != 0");
- bkey_fsck_err_on(unpacked.bi_subvol && !S_ISDIR(unpacked.bi_mode), c, err,
- inode_subvol_root_but_not_dir,
+ bkey_fsck_err_on(unpacked.bi_subvol && !S_ISDIR(unpacked.bi_mode),
+ c, inode_subvol_root_but_not_dir,
"subvolume root but not a directory");
fsck_err:
return ret;
}
-int bch2_inode_invalid(struct bch_fs *c, struct bkey_s_c k,
- enum bch_validate_flags flags,
- struct printbuf *err)
+int bch2_inode_validate(struct bch_fs *c, struct bkey_s_c k,
+ enum bch_validate_flags flags)
{
struct bkey_s_c_inode inode = bkey_s_c_to_inode(k);
int ret = 0;
- bkey_fsck_err_on(INODE_STR_HASH(inode.v) >= BCH_STR_HASH_NR, c, err,
- inode_str_hash_invalid,
+ bkey_fsck_err_on(INODE_STR_HASH(inode.v) >= BCH_STR_HASH_NR,
+ c, inode_str_hash_invalid,
"invalid str hash type (%llu >= %u)",
INODE_STR_HASH(inode.v), BCH_STR_HASH_NR);
- ret = __bch2_inode_invalid(c, k, err);
+ ret = __bch2_inode_validate(c, k, flags);
fsck_err:
return ret;
}
-int bch2_inode_v2_invalid(struct bch_fs *c, struct bkey_s_c k,
- enum bch_validate_flags flags,
- struct printbuf *err)
+int bch2_inode_v2_validate(struct bch_fs *c, struct bkey_s_c k,
+ enum bch_validate_flags flags)
{
struct bkey_s_c_inode_v2 inode = bkey_s_c_to_inode_v2(k);
int ret = 0;
- bkey_fsck_err_on(INODEv2_STR_HASH(inode.v) >= BCH_STR_HASH_NR, c, err,
- inode_str_hash_invalid,
+ bkey_fsck_err_on(INODEv2_STR_HASH(inode.v) >= BCH_STR_HASH_NR,
+ c, inode_str_hash_invalid,
"invalid str hash type (%llu >= %u)",
INODEv2_STR_HASH(inode.v), BCH_STR_HASH_NR);
- ret = __bch2_inode_invalid(c, k, err);
+ ret = __bch2_inode_validate(c, k, flags);
fsck_err:
return ret;
}
-int bch2_inode_v3_invalid(struct bch_fs *c, struct bkey_s_c k,
- enum bch_validate_flags flags,
- struct printbuf *err)
+int bch2_inode_v3_validate(struct bch_fs *c, struct bkey_s_c k,
+ enum bch_validate_flags flags)
{
struct bkey_s_c_inode_v3 inode = bkey_s_c_to_inode_v3(k);
int ret = 0;
bkey_fsck_err_on(INODEv3_FIELDS_START(inode.v) < INODEv3_FIELDS_START_INITIAL ||
- INODEv3_FIELDS_START(inode.v) > bkey_val_u64s(inode.k), c, err,
- inode_v3_fields_start_bad,
+ INODEv3_FIELDS_START(inode.v) > bkey_val_u64s(inode.k),
+ c, inode_v3_fields_start_bad,
"invalid fields_start (got %llu, min %u max %zu)",
INODEv3_FIELDS_START(inode.v),
INODEv3_FIELDS_START_INITIAL,
bkey_val_u64s(inode.k));
- bkey_fsck_err_on(INODEv3_STR_HASH(inode.v) >= BCH_STR_HASH_NR, c, err,
- inode_str_hash_invalid,
+ bkey_fsck_err_on(INODEv3_STR_HASH(inode.v) >= BCH_STR_HASH_NR,
+ c, inode_str_hash_invalid,
"invalid str hash type (%llu >= %u)",
INODEv3_STR_HASH(inode.v), BCH_STR_HASH_NR);
- ret = __bch2_inode_invalid(c, k, err);
+ ret = __bch2_inode_validate(c, k, flags);
fsck_err:
return ret;
}
@@ -625,14 +623,13 @@ int bch2_trigger_inode(struct btree_trans *trans,
return 0;
}
-int bch2_inode_generation_invalid(struct bch_fs *c, struct bkey_s_c k,
- enum bch_validate_flags flags,
- struct printbuf *err)
+int bch2_inode_generation_validate(struct bch_fs *c, struct bkey_s_c k,
+ enum bch_validate_flags flags)
{
int ret = 0;
- bkey_fsck_err_on(k.k->p.inode, c, err,
- inode_pos_inode_nonzero,
+ bkey_fsck_err_on(k.k->p.inode,
+ c, inode_pos_inode_nonzero,
"nonzero k.p.inode");
fsck_err:
return ret;
diff --git a/fs/bcachefs/inode.h b/fs/bcachefs/inode.h
index da0e4a745099..f1fcb4c58039 100644
--- a/fs/bcachefs/inode.h
+++ b/fs/bcachefs/inode.h
@@ -9,12 +9,12 @@
enum bch_validate_flags;
extern const char * const bch2_inode_opts[];
-int bch2_inode_invalid(struct bch_fs *, struct bkey_s_c,
- enum bch_validate_flags, struct printbuf *);
-int bch2_inode_v2_invalid(struct bch_fs *, struct bkey_s_c,
- enum bch_validate_flags, struct printbuf *);
-int bch2_inode_v3_invalid(struct bch_fs *, struct bkey_s_c,
- enum bch_validate_flags, struct printbuf *);
+int bch2_inode_validate(struct bch_fs *, struct bkey_s_c,
+ enum bch_validate_flags);
+int bch2_inode_v2_validate(struct bch_fs *, struct bkey_s_c,
+ enum bch_validate_flags);
+int bch2_inode_v3_validate(struct bch_fs *, struct bkey_s_c,
+ enum bch_validate_flags);
void bch2_inode_to_text(struct printbuf *, struct bch_fs *, struct bkey_s_c);
int bch2_trigger_inode(struct btree_trans *, enum btree_id, unsigned,
@@ -22,21 +22,21 @@ int bch2_trigger_inode(struct btree_trans *, enum btree_id, unsigned,
enum btree_iter_update_trigger_flags);
#define bch2_bkey_ops_inode ((struct bkey_ops) { \
- .key_invalid = bch2_inode_invalid, \
+ .key_validate = bch2_inode_validate, \
.val_to_text = bch2_inode_to_text, \
.trigger = bch2_trigger_inode, \
.min_val_size = 16, \
})
#define bch2_bkey_ops_inode_v2 ((struct bkey_ops) { \
- .key_invalid = bch2_inode_v2_invalid, \
+ .key_validate = bch2_inode_v2_validate, \
.val_to_text = bch2_inode_to_text, \
.trigger = bch2_trigger_inode, \
.min_val_size = 32, \
})
#define bch2_bkey_ops_inode_v3 ((struct bkey_ops) { \
- .key_invalid = bch2_inode_v3_invalid, \
+ .key_validate = bch2_inode_v3_validate, \
.val_to_text = bch2_inode_to_text, \
.trigger = bch2_trigger_inode, \
.min_val_size = 48, \
@@ -49,12 +49,12 @@ static inline bool bkey_is_inode(const struct bkey *k)
k->type == KEY_TYPE_inode_v3;
}
-int bch2_inode_generation_invalid(struct bch_fs *, struct bkey_s_c,
- enum bch_validate_flags, struct printbuf *);
+int bch2_inode_generation_validate(struct bch_fs *, struct bkey_s_c,
+ enum bch_validate_flags);
void bch2_inode_generation_to_text(struct printbuf *, struct bch_fs *, struct bkey_s_c);
#define bch2_bkey_ops_inode_generation ((struct bkey_ops) { \
- .key_invalid = bch2_inode_generation_invalid, \
+ .key_validate = bch2_inode_generation_validate, \
.val_to_text = bch2_inode_generation_to_text, \
.min_val_size = 8, \
})
diff --git a/fs/bcachefs/io_misc.c b/fs/bcachefs/io_misc.c
index 2cf6297756f8..177ed331c00b 100644
--- a/fs/bcachefs/io_misc.c
+++ b/fs/bcachefs/io_misc.c
@@ -126,11 +126,7 @@ err_noprint:
if (closure_nr_remaining(&cl) != 1) {
bch2_trans_unlock_long(trans);
-
- if (closure_sync_timeout(&cl, HZ * 10)) {
- bch2_print_allocator_stuck(c);
- closure_sync(&cl);
- }
+ bch2_wait_on_allocator(c, &cl);
}
return ret;
diff --git a/fs/bcachefs/io_read.c b/fs/bcachefs/io_read.c
index 4531c9ab3e12..7ee3b75480df 100644
--- a/fs/bcachefs/io_read.c
+++ b/fs/bcachefs/io_read.c
@@ -406,6 +406,7 @@ static void bch2_read_retry_nodecode(struct bch_fs *c, struct bch_read_bio *rbio
bch2_trans_iter_init(trans, &iter, rbio->data_btree,
rbio->read_pos, BTREE_ITER_slots);
retry:
+ bch2_trans_begin(trans);
rbio->bio.bi_status = 0;
k = bch2_btree_iter_peek_slot(&iter);
diff --git a/fs/bcachefs/io_write.c b/fs/bcachefs/io_write.c
index d31c8d006d97..1d4761d15002 100644
--- a/fs/bcachefs/io_write.c
+++ b/fs/bcachefs/io_write.c
@@ -1503,10 +1503,7 @@ err:
if ((op->flags & BCH_WRITE_SYNC) ||
(!(op->flags & BCH_WRITE_SUBMITTED) &&
!(op->flags & BCH_WRITE_IN_WORKER))) {
- if (closure_sync_timeout(&op->cl, HZ * 10)) {
- bch2_print_allocator_stuck(c);
- closure_sync(&op->cl);
- }
+ bch2_wait_on_allocator(c, &op->cl);
__bch2_write_index(op);
diff --git a/fs/bcachefs/journal_io.c b/fs/bcachefs/journal_io.c
index 7a833a3f1c63..7664b68e6a15 100644
--- a/fs/bcachefs/journal_io.c
+++ b/fs/bcachefs/journal_io.c
@@ -332,7 +332,6 @@ static int journal_validate_key(struct bch_fs *c,
{
int write = flags & BCH_VALIDATE_write;
void *next = vstruct_next(entry);
- struct printbuf buf = PRINTBUF;
int ret = 0;
if (journal_entry_err_on(!k->k.u64s,
@@ -368,34 +367,21 @@ static int journal_validate_key(struct bch_fs *c,
bch2_bkey_compat(level, btree_id, version, big_endian,
write, NULL, bkey_to_packed(k));
- if (bch2_bkey_invalid(c, bkey_i_to_s_c(k),
- __btree_node_type(level, btree_id), write, &buf)) {
- printbuf_reset(&buf);
- journal_entry_err_msg(&buf, version, jset, entry);
- prt_newline(&buf);
- printbuf_indent_add(&buf, 2);
-
- bch2_bkey_val_to_text(&buf, c, bkey_i_to_s_c(k));
- prt_newline(&buf);
- bch2_bkey_invalid(c, bkey_i_to_s_c(k),
- __btree_node_type(level, btree_id), write, &buf);
-
- mustfix_fsck_err(c, journal_entry_bkey_invalid,
- "%s", buf.buf);
-
+ ret = bch2_bkey_validate(c, bkey_i_to_s_c(k),
+ __btree_node_type(level, btree_id), write);
+ if (ret == -BCH_ERR_fsck_delete_bkey) {
le16_add_cpu(&entry->u64s, -((u16) k->k.u64s));
memmove(k, bkey_next(k), next - (void *) bkey_next(k));
journal_entry_null_range(vstruct_next(entry), next);
-
- printbuf_exit(&buf);
return FSCK_DELETED_KEY;
}
+ if (ret)
+ goto fsck_err;
if (write)
bch2_bkey_compat(level, btree_id, version, big_endian,
write, NULL, bkey_to_packed(k));
fsck_err:
- printbuf_exit(&buf);
return ret;
}
diff --git a/fs/bcachefs/lru.c b/fs/bcachefs/lru.c
index 83b1586cb371..96f2f4f8c397 100644
--- a/fs/bcachefs/lru.c
+++ b/fs/bcachefs/lru.c
@@ -10,14 +10,13 @@
#include "recovery.h"
/* KEY_TYPE_lru is obsolete: */
-int bch2_lru_invalid(struct bch_fs *c, struct bkey_s_c k,
- enum bch_validate_flags flags,
- struct printbuf *err)
+int bch2_lru_validate(struct bch_fs *c, struct bkey_s_c k,
+ enum bch_validate_flags flags)
{
int ret = 0;
- bkey_fsck_err_on(!lru_pos_time(k.k->p), c, err,
- lru_entry_at_time_0,
+ bkey_fsck_err_on(!lru_pos_time(k.k->p),
+ c, lru_entry_at_time_0,
"lru entry at time=0");
fsck_err:
return ret;
diff --git a/fs/bcachefs/lru.h b/fs/bcachefs/lru.h
index 5bd8974a7f11..e6a7d8241bb8 100644
--- a/fs/bcachefs/lru.h
+++ b/fs/bcachefs/lru.h
@@ -33,14 +33,13 @@ static inline enum bch_lru_type lru_type(struct bkey_s_c l)
return BCH_LRU_read;
}
-int bch2_lru_invalid(struct bch_fs *, struct bkey_s_c,
- enum bch_validate_flags, struct printbuf *);
+int bch2_lru_validate(struct bch_fs *, struct bkey_s_c, enum bch_validate_flags);
void bch2_lru_to_text(struct printbuf *, struct bch_fs *, struct bkey_s_c);
void bch2_lru_pos_to_text(struct printbuf *, struct bpos);
#define bch2_bkey_ops_lru ((struct bkey_ops) { \
- .key_invalid = bch2_lru_invalid, \
+ .key_validate = bch2_lru_validate, \
.val_to_text = bch2_lru_to_text, \
.min_val_size = 8, \
})
diff --git a/fs/bcachefs/opts.h b/fs/bcachefs/opts.h
index 60b93018501f..cda1725702ea 100644
--- a/fs/bcachefs/opts.h
+++ b/fs/bcachefs/opts.h
@@ -391,6 +391,11 @@ enum fsck_err_opts {
OPT_BOOL(), \
BCH_SB_JOURNAL_TRANSACTION_NAMES, true, \
NULL, "Log transaction function names in journal") \
+ x(allocator_stuck_timeout, u16, \
+ OPT_FS|OPT_FORMAT|OPT_MOUNT|OPT_RUNTIME, \
+ OPT_UINT(0, U16_MAX), \
+ BCH_SB_ALLOCATOR_STUCK_TIMEOUT, 30, \
+ NULL, "Default timeout in seconds for stuck allocator messages")\
x(noexcl, u8, \
OPT_FS|OPT_MOUNT, \
OPT_BOOL(), \
diff --git a/fs/bcachefs/quota.c b/fs/bcachefs/quota.c
index a0cca8b70e0a..c32a05e252e2 100644
--- a/fs/bcachefs/quota.c
+++ b/fs/bcachefs/quota.c
@@ -59,13 +59,13 @@ const struct bch_sb_field_ops bch_sb_field_ops_quota = {
.to_text = bch2_sb_quota_to_text,
};
-int bch2_quota_invalid(struct bch_fs *c, struct bkey_s_c k,
- enum bch_validate_flags flags, struct printbuf *err)
+int bch2_quota_validate(struct bch_fs *c, struct bkey_s_c k,
+ enum bch_validate_flags flags)
{
int ret = 0;
- bkey_fsck_err_on(k.k->p.inode >= QTYP_NR, c, err,
- quota_type_invalid,
+ bkey_fsck_err_on(k.k->p.inode >= QTYP_NR,
+ c, quota_type_invalid,
"invalid quota type (%llu >= %u)",
k.k->p.inode, QTYP_NR);
fsck_err:
diff --git a/fs/bcachefs/quota.h b/fs/bcachefs/quota.h
index 02d37a332218..a62abcc5332a 100644
--- a/fs/bcachefs/quota.h
+++ b/fs/bcachefs/quota.h
@@ -8,12 +8,11 @@
enum bch_validate_flags;
extern const struct bch_sb_field_ops bch_sb_field_ops_quota;
-int bch2_quota_invalid(struct bch_fs *, struct bkey_s_c,
- enum bch_validate_flags, struct printbuf *);
+int bch2_quota_validate(struct bch_fs *, struct bkey_s_c, enum bch_validate_flags);
void bch2_quota_to_text(struct printbuf *, struct bch_fs *, struct bkey_s_c);
#define bch2_bkey_ops_quota ((struct bkey_ops) { \
- .key_invalid = bch2_quota_invalid, \
+ .key_validate = bch2_quota_validate, \
.val_to_text = bch2_quota_to_text, \
.min_val_size = 32, \
})
diff --git a/fs/bcachefs/reflink.c b/fs/bcachefs/reflink.c
index 5f92715e1525..e59c0abb4772 100644
--- a/fs/bcachefs/reflink.c
+++ b/fs/bcachefs/reflink.c
@@ -29,15 +29,14 @@ static inline unsigned bkey_type_to_indirect(const struct bkey *k)
/* reflink pointers */
-int bch2_reflink_p_invalid(struct bch_fs *c, struct bkey_s_c k,
- enum bch_validate_flags flags,
- struct printbuf *err)
+int bch2_reflink_p_validate(struct bch_fs *c, struct bkey_s_c k,
+ enum bch_validate_flags flags)
{
struct bkey_s_c_reflink_p p = bkey_s_c_to_reflink_p(k);
int ret = 0;
bkey_fsck_err_on(le64_to_cpu(p.v->idx) < le32_to_cpu(p.v->front_pad),
- c, err, reflink_p_front_pad_bad,
+ c, reflink_p_front_pad_bad,
"idx < front_pad (%llu < %u)",
le64_to_cpu(p.v->idx), le32_to_cpu(p.v->front_pad));
fsck_err:
@@ -256,11 +255,10 @@ int bch2_trigger_reflink_p(struct btree_trans *trans,
/* indirect extents */
-int bch2_reflink_v_invalid(struct bch_fs *c, struct bkey_s_c k,
- enum bch_validate_flags flags,
- struct printbuf *err)
+int bch2_reflink_v_validate(struct bch_fs *c, struct bkey_s_c k,
+ enum bch_validate_flags flags)
{
- return bch2_bkey_ptrs_invalid(c, k, flags, err);
+ return bch2_bkey_ptrs_validate(c, k, flags);
}
void bch2_reflink_v_to_text(struct printbuf *out, struct bch_fs *c,
@@ -311,9 +309,8 @@ int bch2_trigger_reflink_v(struct btree_trans *trans,
/* indirect inline data */
-int bch2_indirect_inline_data_invalid(struct bch_fs *c, struct bkey_s_c k,
- enum bch_validate_flags flags,
- struct printbuf *err)
+int bch2_indirect_inline_data_validate(struct bch_fs *c, struct bkey_s_c k,
+ enum bch_validate_flags flags)
{
return 0;
}
diff --git a/fs/bcachefs/reflink.h b/fs/bcachefs/reflink.h
index e894f3a2c67a..51afe11d8ed6 100644
--- a/fs/bcachefs/reflink.h
+++ b/fs/bcachefs/reflink.h
@@ -4,41 +4,37 @@
enum bch_validate_flags;
-int bch2_reflink_p_invalid(struct bch_fs *, struct bkey_s_c,
- enum bch_validate_flags, struct printbuf *);
-void bch2_reflink_p_to_text(struct printbuf *, struct bch_fs *,
- struct bkey_s_c);
+int bch2_reflink_p_validate(struct bch_fs *, struct bkey_s_c, enum bch_validate_flags);
+void bch2_reflink_p_to_text(struct printbuf *, struct bch_fs *, struct bkey_s_c);
bool bch2_reflink_p_merge(struct bch_fs *, struct bkey_s, struct bkey_s_c);
int bch2_trigger_reflink_p(struct btree_trans *, enum btree_id, unsigned,
struct bkey_s_c, struct bkey_s,
enum btree_iter_update_trigger_flags);
#define bch2_bkey_ops_reflink_p ((struct bkey_ops) { \
- .key_invalid = bch2_reflink_p_invalid, \
+ .key_validate = bch2_reflink_p_validate, \
.val_to_text = bch2_reflink_p_to_text, \
.key_merge = bch2_reflink_p_merge, \
.trigger = bch2_trigger_reflink_p, \
.min_val_size = 16, \
})
-int bch2_reflink_v_invalid(struct bch_fs *, struct bkey_s_c,
- enum bch_validate_flags, struct printbuf *);
-void bch2_reflink_v_to_text(struct printbuf *, struct bch_fs *,
- struct bkey_s_c);
+int bch2_reflink_v_validate(struct bch_fs *, struct bkey_s_c, enum bch_validate_flags);
+void bch2_reflink_v_to_text(struct printbuf *, struct bch_fs *, struct bkey_s_c);
int bch2_trigger_reflink_v(struct btree_trans *, enum btree_id, unsigned,
struct bkey_s_c, struct bkey_s,
enum btree_iter_update_trigger_flags);
#define bch2_bkey_ops_reflink_v ((struct bkey_ops) { \
- .key_invalid = bch2_reflink_v_invalid, \
+ .key_validate = bch2_reflink_v_validate, \
.val_to_text = bch2_reflink_v_to_text, \
.swab = bch2_ptr_swab, \
.trigger = bch2_trigger_reflink_v, \
.min_val_size = 8, \
})
-int bch2_indirect_inline_data_invalid(struct bch_fs *, struct bkey_s_c,
- enum bch_validate_flags, struct printbuf *);
+int bch2_indirect_inline_data_validate(struct bch_fs *, struct bkey_s_c,
+ enum bch_validate_flags);
void bch2_indirect_inline_data_to_text(struct printbuf *,
struct bch_fs *, struct bkey_s_c);
int bch2_trigger_indirect_inline_data(struct btree_trans *,
@@ -47,7 +43,7 @@ int bch2_trigger_indirect_inline_data(struct btree_trans *,
enum btree_iter_update_trigger_flags);
#define bch2_bkey_ops_indirect_inline_data ((struct bkey_ops) { \
- .key_invalid = bch2_indirect_inline_data_invalid, \
+ .key_validate = bch2_indirect_inline_data_validate, \
.val_to_text = bch2_indirect_inline_data_to_text, \
.trigger = bch2_trigger_indirect_inline_data, \
.min_val_size = 8, \
diff --git a/fs/bcachefs/replicas.c b/fs/bcachefs/replicas.c
index 10c96cb2047a..1223b710755d 100644
--- a/fs/bcachefs/replicas.c
+++ b/fs/bcachefs/replicas.c
@@ -24,7 +24,6 @@ static int bch2_memcmp(const void *l, const void *r, const void *priv)
static void verify_replicas_entry(struct bch_replicas_entry_v1 *e)
{
#ifdef CONFIG_BCACHEFS_DEBUG
- BUG_ON(e->data_type >= BCH_DATA_NR);
BUG_ON(!e->nr_devs);
BUG_ON(e->nr_required > 1 &&
e->nr_required >= e->nr_devs);
diff --git a/fs/bcachefs/sb-downgrade.c b/fs/bcachefs/sb-downgrade.c
index dfbbd33c8731..650a1f77ca40 100644
--- a/fs/bcachefs/sb-downgrade.c
+++ b/fs/bcachefs/sb-downgrade.c
@@ -61,6 +61,20 @@
BCH_FSCK_ERR_dev_usage_buckets_wrong, \
BCH_FSCK_ERR_dev_usage_sectors_wrong, \
BCH_FSCK_ERR_dev_usage_fragmented_wrong, \
+ BCH_FSCK_ERR_accounting_mismatch) \
+ x(disk_accounting_v3, \
+ BIT_ULL(BCH_RECOVERY_PASS_check_allocations), \
+ BCH_FSCK_ERR_bkey_version_in_future, \
+ BCH_FSCK_ERR_dev_usage_buckets_wrong, \
+ BCH_FSCK_ERR_dev_usage_sectors_wrong, \
+ BCH_FSCK_ERR_dev_usage_fragmented_wrong, \
+ BCH_FSCK_ERR_accounting_mismatch, \
+ BCH_FSCK_ERR_accounting_key_replicas_nr_devs_0, \
+ BCH_FSCK_ERR_accounting_key_replicas_nr_required_bad, \
+ BCH_FSCK_ERR_accounting_key_replicas_devs_unsorted, \
+ BCH_FSCK_ERR_accounting_key_junk_at_end) \
+ x(disk_accounting_inum, \
+ BIT_ULL(BCH_RECOVERY_PASS_check_allocations), \
BCH_FSCK_ERR_accounting_mismatch)
#define DOWNGRADE_TABLE() \
@@ -79,6 +93,21 @@
BCH_FSCK_ERR_fs_usage_nr_inodes_wrong, \
BCH_FSCK_ERR_fs_usage_persistent_reserved_wrong, \
BCH_FSCK_ERR_fs_usage_replicas_wrong, \
+ BCH_FSCK_ERR_bkey_version_in_future) \
+ x(disk_accounting_v3, \
+ BIT_ULL(BCH_RECOVERY_PASS_check_allocations), \
+ BCH_FSCK_ERR_dev_usage_buckets_wrong, \
+ BCH_FSCK_ERR_dev_usage_sectors_wrong, \
+ BCH_FSCK_ERR_dev_usage_fragmented_wrong, \
+ BCH_FSCK_ERR_fs_usage_hidden_wrong, \
+ BCH_FSCK_ERR_fs_usage_btree_wrong, \
+ BCH_FSCK_ERR_fs_usage_data_wrong, \
+ BCH_FSCK_ERR_fs_usage_cached_wrong, \
+ BCH_FSCK_ERR_fs_usage_reserved_wrong, \
+ BCH_FSCK_ERR_fs_usage_nr_inodes_wrong, \
+ BCH_FSCK_ERR_fs_usage_persistent_reserved_wrong, \
+ BCH_FSCK_ERR_fs_usage_replicas_wrong, \
+ BCH_FSCK_ERR_accounting_replicas_not_marked, \
BCH_FSCK_ERR_bkey_version_in_future)
struct upgrade_downgrade_entry {
diff --git a/fs/bcachefs/sb-errors_format.h b/fs/bcachefs/sb-errors_format.h
index d1b2f2aa397a..d3a498617303 100644
--- a/fs/bcachefs/sb-errors_format.h
+++ b/fs/bcachefs/sb-errors_format.h
@@ -287,7 +287,11 @@ enum bch_fsck_flags {
x(accounting_replicas_not_marked, 273, 0) \
x(invalid_btree_id, 274, 0) \
x(alloc_key_io_time_bad, 275, 0) \
- x(alloc_key_fragmentation_lru_wrong, 276, FSCK_AUTOFIX)
+ x(alloc_key_fragmentation_lru_wrong, 276, FSCK_AUTOFIX) \
+ x(accounting_key_junk_at_end, 277, 0) \
+ x(accounting_key_replicas_nr_devs_0, 278, 0) \
+ x(accounting_key_replicas_nr_required_bad, 279, 0) \
+ x(accounting_key_replicas_devs_unsorted, 280, 0) \
enum bch_sb_error_id {
#define x(t, n, ...) BCH_FSCK_ERR_##t = n,
diff --git a/fs/bcachefs/snapshot.c b/fs/bcachefs/snapshot.c
index 96744b1a76f5..8b18a9b483a4 100644
--- a/fs/bcachefs/snapshot.c
+++ b/fs/bcachefs/snapshot.c
@@ -31,15 +31,14 @@ void bch2_snapshot_tree_to_text(struct printbuf *out, struct bch_fs *c,
le32_to_cpu(t.v->root_snapshot));
}
-int bch2_snapshot_tree_invalid(struct bch_fs *c, struct bkey_s_c k,
- enum bch_validate_flags flags,
- struct printbuf *err)
+int bch2_snapshot_tree_validate(struct bch_fs *c, struct bkey_s_c k,
+ enum bch_validate_flags flags)
{
int ret = 0;
bkey_fsck_err_on(bkey_gt(k.k->p, POS(0, U32_MAX)) ||
- bkey_lt(k.k->p, POS(0, 1)), c, err,
- snapshot_tree_pos_bad,
+ bkey_lt(k.k->p, POS(0, 1)),
+ c, snapshot_tree_pos_bad,
"bad pos");
fsck_err:
return ret;
@@ -225,55 +224,54 @@ void bch2_snapshot_to_text(struct printbuf *out, struct bch_fs *c,
le32_to_cpu(s.v->skip[2]));
}
-int bch2_snapshot_invalid(struct bch_fs *c, struct bkey_s_c k,
- enum bch_validate_flags flags,
- struct printbuf *err)
+int bch2_snapshot_validate(struct bch_fs *c, struct bkey_s_c k,
+ enum bch_validate_flags flags)
{
struct bkey_s_c_snapshot s;
u32 i, id;
int ret = 0;
bkey_fsck_err_on(bkey_gt(k.k->p, POS(0, U32_MAX)) ||
- bkey_lt(k.k->p, POS(0, 1)), c, err,
- snapshot_pos_bad,
+ bkey_lt(k.k->p, POS(0, 1)),
+ c, snapshot_pos_bad,
"bad pos");
s = bkey_s_c_to_snapshot(k);
id = le32_to_cpu(s.v->parent);
- bkey_fsck_err_on(id && id <= k.k->p.offset, c, err,
- snapshot_parent_bad,
+ bkey_fsck_err_on(id && id <= k.k->p.offset,
+ c, snapshot_parent_bad,
"bad parent node (%u <= %llu)",
id, k.k->p.offset);
- bkey_fsck_err_on(le32_to_cpu(s.v->children[0]) < le32_to_cpu(s.v->children[1]), c, err,
- snapshot_children_not_normalized,
+ bkey_fsck_err_on(le32_to_cpu(s.v->children[0]) < le32_to_cpu(s.v->children[1]),
+ c, snapshot_children_not_normalized,
"children not normalized");
- bkey_fsck_err_on(s.v->children[0] && s.v->children[0] == s.v->children[1], c, err,
- snapshot_child_duplicate,
+ bkey_fsck_err_on(s.v->children[0] && s.v->children[0] == s.v->children[1],
+ c, snapshot_child_duplicate,
"duplicate child nodes");
for (i = 0; i < 2; i++) {
id = le32_to_cpu(s.v->children[i]);
- bkey_fsck_err_on(id >= k.k->p.offset, c, err,
- snapshot_child_bad,
+ bkey_fsck_err_on(id >= k.k->p.offset,
+ c, snapshot_child_bad,
"bad child node (%u >= %llu)",
id, k.k->p.offset);
}
if (bkey_val_bytes(k.k) > offsetof(struct bch_snapshot, skip)) {
bkey_fsck_err_on(le32_to_cpu(s.v->skip[0]) > le32_to_cpu(s.v->skip[1]) ||
- le32_to_cpu(s.v->skip[1]) > le32_to_cpu(s.v->skip[2]), c, err,
- snapshot_skiplist_not_normalized,
+ le32_to_cpu(s.v->skip[1]) > le32_to_cpu(s.v->skip[2]),
+ c, snapshot_skiplist_not_normalized,
"skiplist not normalized");
for (i = 0; i < ARRAY_SIZE(s.v->skip); i++) {
id = le32_to_cpu(s.v->skip[i]);
- bkey_fsck_err_on(id && id < le32_to_cpu(s.v->parent), c, err,
- snapshot_skiplist_bad,
+ bkey_fsck_err_on(id && id < le32_to_cpu(s.v->parent),
+ c, snapshot_skiplist_bad,
"bad skiplist node %u", id);
}
}
diff --git a/fs/bcachefs/snapshot.h b/fs/bcachefs/snapshot.h
index 31b0ee03e962..eb5ef64221d6 100644
--- a/fs/bcachefs/snapshot.h
+++ b/fs/bcachefs/snapshot.h
@@ -5,11 +5,11 @@
enum bch_validate_flags;
void bch2_snapshot_tree_to_text(struct printbuf *, struct bch_fs *, struct bkey_s_c);
-int bch2_snapshot_tree_invalid(struct bch_fs *, struct bkey_s_c,
- enum bch_validate_flags, struct printbuf *);
+int bch2_snapshot_tree_validate(struct bch_fs *, struct bkey_s_c,
+ enum bch_validate_flags);
#define bch2_bkey_ops_snapshot_tree ((struct bkey_ops) { \
- .key_invalid = bch2_snapshot_tree_invalid, \
+ .key_validate = bch2_snapshot_tree_validate, \
.val_to_text = bch2_snapshot_tree_to_text, \
.min_val_size = 8, \
})
@@ -19,14 +19,13 @@ struct bkey_i_snapshot_tree *__bch2_snapshot_tree_create(struct btree_trans *);
int bch2_snapshot_tree_lookup(struct btree_trans *, u32, struct bch_snapshot_tree *);
void bch2_snapshot_to_text(struct printbuf *, struct bch_fs *, struct bkey_s_c);
-int bch2_snapshot_invalid(struct bch_fs *, struct bkey_s_c,
- enum bch_validate_flags, struct printbuf *);
+int bch2_snapshot_validate(struct bch_fs *, struct bkey_s_c, enum bch_validate_flags);
int bch2_mark_snapshot(struct btree_trans *, enum btree_id, unsigned,
struct bkey_s_c, struct bkey_s,
enum btree_iter_update_trigger_flags);
#define bch2_bkey_ops_snapshot ((struct bkey_ops) { \
- .key_invalid = bch2_snapshot_invalid, \
+ .key_validate = bch2_snapshot_validate, \
.val_to_text = bch2_snapshot_to_text, \
.trigger = bch2_mark_snapshot, \
.min_val_size = 24, \
diff --git a/fs/bcachefs/subvolume.c b/fs/bcachefs/subvolume.c
index f56720b55862..dbe834cb349f 100644
--- a/fs/bcachefs/subvolume.c
+++ b/fs/bcachefs/subvolume.c
@@ -207,23 +207,23 @@ int bch2_check_subvol_children(struct bch_fs *c)
/* Subvolumes: */
-int bch2_subvolume_invalid(struct bch_fs *c, struct bkey_s_c k,
- enum bch_validate_flags flags, struct printbuf *err)
+int bch2_subvolume_validate(struct bch_fs *c, struct bkey_s_c k,
+ enum bch_validate_flags flags)
{
struct bkey_s_c_subvolume subvol = bkey_s_c_to_subvolume(k);
int ret = 0;
bkey_fsck_err_on(bkey_lt(k.k->p, SUBVOL_POS_MIN) ||
- bkey_gt(k.k->p, SUBVOL_POS_MAX), c, err,
- subvol_pos_bad,
+ bkey_gt(k.k->p, SUBVOL_POS_MAX),
+ c, subvol_pos_bad,
"invalid pos");
- bkey_fsck_err_on(!subvol.v->snapshot, c, err,
- subvol_snapshot_bad,
+ bkey_fsck_err_on(!subvol.v->snapshot,
+ c, subvol_snapshot_bad,
"invalid snapshot");
- bkey_fsck_err_on(!subvol.v->inode, c, err,
- subvol_inode_bad,
+ bkey_fsck_err_on(!subvol.v->inode,
+ c, subvol_inode_bad,
"invalid inode");
fsck_err:
return ret;
diff --git a/fs/bcachefs/subvolume.h b/fs/bcachefs/subvolume.h
index afa5e871efb2..a8299ba2cab2 100644
--- a/fs/bcachefs/subvolume.h
+++ b/fs/bcachefs/subvolume.h
@@ -10,15 +10,14 @@ enum bch_validate_flags;
int bch2_check_subvols(struct bch_fs *);
int bch2_check_subvol_children(struct bch_fs *);
-int bch2_subvolume_invalid(struct bch_fs *, struct bkey_s_c,
- enum bch_validate_flags, struct printbuf *);
+int bch2_subvolume_validate(struct bch_fs *, struct bkey_s_c, enum bch_validate_flags);
void bch2_subvolume_to_text(struct printbuf *, struct bch_fs *, struct bkey_s_c);
int bch2_subvolume_trigger(struct btree_trans *, enum btree_id, unsigned,
struct bkey_s_c, struct bkey_s,
enum btree_iter_update_trigger_flags);
#define bch2_bkey_ops_subvolume ((struct bkey_ops) { \
- .key_invalid = bch2_subvolume_invalid, \
+ .key_validate = bch2_subvolume_validate, \
.val_to_text = bch2_subvolume_to_text, \
.trigger = bch2_subvolume_trigger, \
.min_val_size = 16, \
diff --git a/fs/bcachefs/super-io.c b/fs/bcachefs/super-io.c
index 8bc819832790..c8c2ccbdfbb5 100644
--- a/fs/bcachefs/super-io.c
+++ b/fs/bcachefs/super-io.c
@@ -414,6 +414,10 @@ static int bch2_sb_validate(struct bch_sb_handle *disk_sb,
if (!BCH_SB_VERSION_UPGRADE_COMPLETE(sb))
SET_BCH_SB_VERSION_UPGRADE_COMPLETE(sb, le16_to_cpu(sb->version));
+
+ if (le16_to_cpu(sb->version) <= bcachefs_metadata_version_disk_accounting_v2 &&
+ !BCH_SB_ALLOCATOR_STUCK_TIMEOUT(sb))
+ SET_BCH_SB_ALLOCATOR_STUCK_TIMEOUT(sb, 30);
}
for (opt_id = 0; opt_id < bch2_opts_nr; opt_id++) {
diff --git a/fs/bcachefs/super.c b/fs/bcachefs/super.c
index 0455a1001fec..e7fa2de35014 100644
--- a/fs/bcachefs/super.c
+++ b/fs/bcachefs/super.c
@@ -1193,7 +1193,6 @@ static void bch2_dev_free(struct bch_dev *ca)
if (ca->kobj.state_in_sysfs)
kobject_del(&ca->kobj);
- kfree(ca->buckets_nouse);
bch2_free_super(&ca->disk_sb);
bch2_dev_allocator_background_exit(ca);
bch2_dev_journal_exit(ca);
diff --git a/fs/bcachefs/sysfs.c b/fs/bcachefs/sysfs.c
index 1c0d1fb20276..f393023a3ae2 100644
--- a/fs/bcachefs/sysfs.c
+++ b/fs/bcachefs/sysfs.c
@@ -367,7 +367,7 @@ SHOW(bch2_fs)
bch2_stripes_heap_to_text(out, c);
if (attr == &sysfs_open_buckets)
- bch2_open_buckets_to_text(out, c);
+ bch2_open_buckets_to_text(out, c, NULL);
if (attr == &sysfs_open_buckets_partial)
bch2_open_buckets_partial_to_text(out, c);
@@ -811,6 +811,9 @@ SHOW(bch2_dev)
if (attr == &sysfs_alloc_debug)
bch2_dev_alloc_debug_to_text(out, ca);
+ if (attr == &sysfs_open_buckets)
+ bch2_open_buckets_to_text(out, c, ca);
+
return 0;
}
@@ -892,6 +895,7 @@ struct attribute *bch2_dev_files[] = {
/* debug: */
&sysfs_alloc_debug,
+ &sysfs_open_buckets,
NULL
};
diff --git a/fs/bcachefs/trace.c b/fs/bcachefs/trace.c
index dc48b52b01b4..dfad1d06633d 100644
--- a/fs/bcachefs/trace.c
+++ b/fs/bcachefs/trace.c
@@ -4,6 +4,7 @@
#include "buckets.h"
#include "btree_cache.h"
#include "btree_iter.h"
+#include "btree_key_cache.h"
#include "btree_locking.h"
#include "btree_update_interior.h"
#include "keylist.h"
diff --git a/fs/bcachefs/trace.h b/fs/bcachefs/trace.h
index d0e6b9deb6cb..c62f00322d1e 100644
--- a/fs/bcachefs/trace.h
+++ b/fs/bcachefs/trace.h
@@ -988,10 +988,33 @@ TRACE_EVENT(trans_restart_split_race,
__entry->u64s_remaining)
);
-DEFINE_EVENT(transaction_event, trans_blocked_journal_reclaim,
+TRACE_EVENT(trans_blocked_journal_reclaim,
TP_PROTO(struct btree_trans *trans,
unsigned long caller_ip),
- TP_ARGS(trans, caller_ip)
+ TP_ARGS(trans, caller_ip),
+
+ TP_STRUCT__entry(
+ __array(char, trans_fn, 32 )
+ __field(unsigned long, caller_ip )
+
+ __field(unsigned long, key_cache_nr_keys )
+ __field(unsigned long, key_cache_nr_dirty )
+ __field(long, must_wait )
+ ),
+
+ TP_fast_assign(
+ strscpy(__entry->trans_fn, trans->fn, sizeof(__entry->trans_fn));
+ __entry->caller_ip = caller_ip;
+ __entry->key_cache_nr_keys = atomic_long_read(&trans->c->btree_key_cache.nr_keys);
+ __entry->key_cache_nr_dirty = atomic_long_read(&trans->c->btree_key_cache.nr_dirty);
+ __entry->must_wait = __bch2_btree_key_cache_must_wait(trans->c);
+ ),
+
+ TP_printk("%s %pS key cache keys %lu dirty %lu must_wait %li",
+ __entry->trans_fn, (void *) __entry->caller_ip,
+ __entry->key_cache_nr_keys,
+ __entry->key_cache_nr_dirty,
+ __entry->must_wait)
);
TRACE_EVENT(trans_restart_journal_preres_get,
diff --git a/fs/bcachefs/xattr.c b/fs/bcachefs/xattr.c
index c11bf6dacc2c..f2b4c17a0307 100644
--- a/fs/bcachefs/xattr.c
+++ b/fs/bcachefs/xattr.c
@@ -70,17 +70,16 @@ const struct bch_hash_desc bch2_xattr_hash_desc = {
.cmp_bkey = xattr_cmp_bkey,
};
-int bch2_xattr_invalid(struct bch_fs *c, struct bkey_s_c k,
- enum bch_validate_flags flags,
- struct printbuf *err)
+int bch2_xattr_validate(struct bch_fs *c, struct bkey_s_c k,
+ enum bch_validate_flags flags)
{
struct bkey_s_c_xattr xattr = bkey_s_c_to_xattr(k);
unsigned val_u64s = xattr_val_u64s(xattr.v->x_name_len,
le16_to_cpu(xattr.v->x_val_len));
int ret = 0;
- bkey_fsck_err_on(bkey_val_u64s(k.k) < val_u64s, c, err,
- xattr_val_size_too_small,
+ bkey_fsck_err_on(bkey_val_u64s(k.k) < val_u64s,
+ c, xattr_val_size_too_small,
"value too small (%zu < %u)",
bkey_val_u64s(k.k), val_u64s);
@@ -88,17 +87,17 @@ int bch2_xattr_invalid(struct bch_fs *c, struct bkey_s_c k,
val_u64s = xattr_val_u64s(xattr.v->x_name_len,
le16_to_cpu(xattr.v->x_val_len) + 4);
- bkey_fsck_err_on(bkey_val_u64s(k.k) > val_u64s, c, err,
- xattr_val_size_too_big,
+ bkey_fsck_err_on(bkey_val_u64s(k.k) > val_u64s,
+ c, xattr_val_size_too_big,
"value too big (%zu > %u)",
bkey_val_u64s(k.k), val_u64s);
- bkey_fsck_err_on(!bch2_xattr_type_to_handler(xattr.v->x_type), c, err,
- xattr_invalid_type,
+ bkey_fsck_err_on(!bch2_xattr_type_to_handler(xattr.v->x_type),
+ c, xattr_invalid_type,
"invalid type (%u)", xattr.v->x_type);
- bkey_fsck_err_on(memchr(xattr.v->x_name, '\0', xattr.v->x_name_len), c, err,
- xattr_name_invalid_chars,
+ bkey_fsck_err_on(memchr(xattr.v->x_name, '\0', xattr.v->x_name_len),
+ c, xattr_name_invalid_chars,
"xattr name has invalid characters");
fsck_err:
return ret;
diff --git a/fs/bcachefs/xattr.h b/fs/bcachefs/xattr.h
index 1574b9eb4c85..c188a5ad64ce 100644
--- a/fs/bcachefs/xattr.h
+++ b/fs/bcachefs/xattr.h
@@ -6,12 +6,11 @@
extern const struct bch_hash_desc bch2_xattr_hash_desc;
-int bch2_xattr_invalid(struct bch_fs *, struct bkey_s_c,
- enum bch_validate_flags, struct printbuf *);
+int bch2_xattr_validate(struct bch_fs *, struct bkey_s_c, enum bch_validate_flags);
void bch2_xattr_to_text(struct printbuf *, struct bch_fs *, struct bkey_s_c);
#define bch2_bkey_ops_xattr ((struct bkey_ops) { \
- .key_invalid = bch2_xattr_invalid, \
+ .key_validate = bch2_xattr_validate, \
.val_to_text = bch2_xattr_to_text, \
.min_val_size = 8, \
})
diff --git a/fs/binfmt_flat.c b/fs/binfmt_flat.c
index c26545d71d39..cd6d5bbb4b9d 100644
--- a/fs/binfmt_flat.c
+++ b/fs/binfmt_flat.c
@@ -72,8 +72,10 @@
#ifdef CONFIG_BINFMT_FLAT_NO_DATA_START_OFFSET
#define DATA_START_OFFSET_WORDS (0)
+#define MAX_SHARED_LIBS_UPDATE (0)
#else
#define DATA_START_OFFSET_WORDS (MAX_SHARED_LIBS)
+#define MAX_SHARED_LIBS_UPDATE (MAX_SHARED_LIBS)
#endif
struct lib_info {
@@ -880,7 +882,7 @@ static int load_flat_binary(struct linux_binprm *bprm)
return res;
/* Update data segment pointers for all libraries */
- for (i = 0; i < MAX_SHARED_LIBS; i++) {
+ for (i = 0; i < MAX_SHARED_LIBS_UPDATE; i++) {
if (!libinfo.lib_list[i].loaded)
continue;
for (j = 0; j < MAX_SHARED_LIBS; j++) {
diff --git a/fs/btrfs/block-group.c b/fs/btrfs/block-group.c
index 498442d0c216..2e49d978f504 100644
--- a/fs/btrfs/block-group.c
+++ b/fs/btrfs/block-group.c
@@ -1223,8 +1223,8 @@ int btrfs_remove_block_group(struct btrfs_trans_handle *trans,
block_group->space_info->total_bytes -= block_group->length;
block_group->space_info->bytes_readonly -=
(block_group->length - block_group->zone_unusable);
- block_group->space_info->bytes_zone_unusable -=
- block_group->zone_unusable;
+ btrfs_space_info_update_bytes_zone_unusable(fs_info, block_group->space_info,
+ -block_group->zone_unusable);
block_group->space_info->disk_total -= block_group->length * factor;
spin_unlock(&block_group->space_info->lock);
@@ -1396,7 +1396,8 @@ static int inc_block_group_ro(struct btrfs_block_group *cache, int force)
if (btrfs_is_zoned(cache->fs_info)) {
/* Migrate zone_unusable bytes to readonly */
sinfo->bytes_readonly += cache->zone_unusable;
- sinfo->bytes_zone_unusable -= cache->zone_unusable;
+ btrfs_space_info_update_bytes_zone_unusable(cache->fs_info, sinfo,
+ -cache->zone_unusable);
cache->zone_unusable = 0;
}
cache->ro++;
@@ -3056,9 +3057,11 @@ void btrfs_dec_block_group_ro(struct btrfs_block_group *cache)
if (btrfs_is_zoned(cache->fs_info)) {
/* Migrate zone_unusable bytes back */
cache->zone_unusable =
- (cache->alloc_offset - cache->used) +
+ (cache->alloc_offset - cache->used - cache->pinned -
+ cache->reserved) +
(cache->length - cache->zone_capacity);
- sinfo->bytes_zone_unusable += cache->zone_unusable;
+ btrfs_space_info_update_bytes_zone_unusable(cache->fs_info, sinfo,
+ cache->zone_unusable);
sinfo->bytes_readonly -= cache->zone_unusable;
}
num_bytes = cache->length - cache->reserved -
diff --git a/fs/btrfs/ctree.h b/fs/btrfs/ctree.h
index c8568b1a61c4..75fa563e4cac 100644
--- a/fs/btrfs/ctree.h
+++ b/fs/btrfs/ctree.h
@@ -459,6 +459,7 @@ struct btrfs_file_private {
void *filldir_buf;
u64 last_index;
struct extent_state *llseek_cached_state;
+ bool fsync_skip_inode_lock;
};
static inline u32 BTRFS_LEAF_DATA_SIZE(const struct btrfs_fs_info *info)
diff --git a/fs/btrfs/delayed-ref.c b/fs/btrfs/delayed-ref.c
index 2ac9296edccb..06a9e0542d70 100644
--- a/fs/btrfs/delayed-ref.c
+++ b/fs/btrfs/delayed-ref.c
@@ -1134,6 +1134,73 @@ btrfs_find_delayed_ref_head(struct btrfs_delayed_ref_root *delayed_refs, u64 byt
return find_ref_head(delayed_refs, bytenr, false);
}
+static int find_comp(struct btrfs_delayed_ref_node *entry, u64 root, u64 parent)
+{
+ int type = parent ? BTRFS_SHARED_BLOCK_REF_KEY : BTRFS_TREE_BLOCK_REF_KEY;
+
+ if (type < entry->type)
+ return -1;
+ if (type > entry->type)
+ return 1;
+
+ if (type == BTRFS_TREE_BLOCK_REF_KEY) {
+ if (root < entry->ref_root)
+ return -1;
+ if (root > entry->ref_root)
+ return 1;
+ } else {
+ if (parent < entry->parent)
+ return -1;
+ if (parent > entry->parent)
+ return 1;
+ }
+ return 0;
+}
+
+/*
+ * Check to see if a given root/parent reference is attached to the head. This
+ * only checks for BTRFS_ADD_DELAYED_REF references that match, as that
+ * indicates the reference exists for the given root or parent. This is for
+ * tree blocks only.
+ *
+ * @head: the head of the bytenr we're searching.
+ * @root: the root objectid of the reference if it is a normal reference.
+ * @parent: the parent if this is a shared backref.
+ */
+bool btrfs_find_delayed_tree_ref(struct btrfs_delayed_ref_head *head,
+ u64 root, u64 parent)
+{
+ struct rb_node *node;
+ bool found = false;
+
+ lockdep_assert_held(&head->mutex);
+
+ spin_lock(&head->lock);
+ node = head->ref_tree.rb_root.rb_node;
+ while (node) {
+ struct btrfs_delayed_ref_node *entry;
+ int ret;
+
+ entry = rb_entry(node, struct btrfs_delayed_ref_node, ref_node);
+ ret = find_comp(entry, root, parent);
+ if (ret < 0) {
+ node = node->rb_left;
+ } else if (ret > 0) {
+ node = node->rb_right;
+ } else {
+ /*
+ * We only want to count ADD actions, as drops mean the
+ * ref doesn't exist.
+ */
+ if (entry->action == BTRFS_ADD_DELAYED_REF)
+ found = true;
+ break;
+ }
+ }
+ spin_unlock(&head->lock);
+ return found;
+}
+
void __cold btrfs_delayed_ref_exit(void)
{
kmem_cache_destroy(btrfs_delayed_ref_head_cachep);
diff --git a/fs/btrfs/delayed-ref.h b/fs/btrfs/delayed-ref.h
index ef15e998be03..05f634eb472d 100644
--- a/fs/btrfs/delayed-ref.h
+++ b/fs/btrfs/delayed-ref.h
@@ -389,6 +389,8 @@ void btrfs_dec_delayed_refs_rsv_bg_updates(struct btrfs_fs_info *fs_info);
int btrfs_delayed_refs_rsv_refill(struct btrfs_fs_info *fs_info,
enum btrfs_reserve_flush_enum flush);
bool btrfs_check_space_for_delayed_refs(struct btrfs_fs_info *fs_info);
+bool btrfs_find_delayed_tree_ref(struct btrfs_delayed_ref_head *head,
+ u64 root, u64 parent);
static inline u64 btrfs_delayed_ref_owner(struct btrfs_delayed_ref_node *node)
{
diff --git a/fs/btrfs/direct-io.c b/fs/btrfs/direct-io.c
index f9fb2db6a1e4..67adbe9d294a 100644
--- a/fs/btrfs/direct-io.c
+++ b/fs/btrfs/direct-io.c
@@ -856,21 +856,37 @@ relock:
* So here we disable page faults in the iov_iter and then retry if we
* got -EFAULT, faulting in the pages before the retry.
*/
+again:
from->nofault = true;
dio = btrfs_dio_write(iocb, from, written);
from->nofault = false;
- /*
- * iomap_dio_complete() will call btrfs_sync_file() if we have a dsync
- * iocb, and that needs to lock the inode. So unlock it before calling
- * iomap_dio_complete() to avoid a deadlock.
- */
- btrfs_inode_unlock(BTRFS_I(inode), ilock_flags);
-
- if (IS_ERR_OR_NULL(dio))
+ if (IS_ERR_OR_NULL(dio)) {
ret = PTR_ERR_OR_ZERO(dio);
- else
+ } else {
+ struct btrfs_file_private stack_private = { 0 };
+ struct btrfs_file_private *private;
+ const bool have_private = (file->private_data != NULL);
+
+ if (!have_private)
+ file->private_data = &stack_private;
+
+ /*
+ * If we have a synchronous write, we must make sure the fsync
+ * triggered by the iomap_dio_complete() call below doesn't
+ * deadlock on the inode lock - we are already holding it and we
+ * can't call it after unlocking because we may need to complete
+ * partial writes due to the input buffer (or parts of it) not
+ * being already faulted in.
+ */
+ private = file->private_data;
+ private->fsync_skip_inode_lock = true;
ret = iomap_dio_complete(dio);
+ private->fsync_skip_inode_lock = false;
+
+ if (!have_private)
+ file->private_data = NULL;
+ }
/* No increment (+=) because iomap returns a cumulative value. */
if (ret > 0)
@@ -897,10 +913,12 @@ relock:
} else {
fault_in_iov_iter_readable(from, left);
prev_left = left;
- goto relock;
+ goto again;
}
}
+ btrfs_inode_unlock(BTRFS_I(inode), ilock_flags);
+
/*
* If 'ret' is -ENOTBLK or we have not written all data, then it means
* we must fallback to buffered IO.
diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c
index d77498e7671c..feec49e6f9c8 100644
--- a/fs/btrfs/extent-tree.c
+++ b/fs/btrfs/extent-tree.c
@@ -2793,7 +2793,8 @@ static int unpin_extent_range(struct btrfs_fs_info *fs_info,
readonly = true;
} else if (btrfs_is_zoned(fs_info)) {
/* Need reset before reusing in a zoned block group */
- space_info->bytes_zone_unusable += len;
+ btrfs_space_info_update_bytes_zone_unusable(fs_info, space_info,
+ len);
readonly = true;
}
spin_unlock(&cache->lock);
@@ -5471,23 +5472,62 @@ static int check_ref_exists(struct btrfs_trans_handle *trans,
struct btrfs_root *root, u64 bytenr, u64 parent,
int level)
{
+ struct btrfs_delayed_ref_root *delayed_refs;
+ struct btrfs_delayed_ref_head *head;
struct btrfs_path *path;
struct btrfs_extent_inline_ref *iref;
int ret;
+ bool exists = false;
path = btrfs_alloc_path();
if (!path)
return -ENOMEM;
-
+again:
ret = lookup_extent_backref(trans, path, &iref, bytenr,
root->fs_info->nodesize, parent,
btrfs_root_id(root), level, 0);
+ if (ret != -ENOENT) {
+ /*
+ * If we get 0 then we found our reference, return 1, else
+ * return the error if it's not -ENOENT;
+ */
+ btrfs_free_path(path);
+ return (ret < 0 ) ? ret : 1;
+ }
+
+ /*
+ * We could have a delayed ref with this reference, so look it up while
+ * we're holding the path open to make sure we don't race with the
+ * delayed ref running.
+ */
+ delayed_refs = &trans->transaction->delayed_refs;
+ spin_lock(&delayed_refs->lock);
+ head = btrfs_find_delayed_ref_head(delayed_refs, bytenr);
+ if (!head)
+ goto out;
+ if (!mutex_trylock(&head->mutex)) {
+ /*
+ * We're contended, means that the delayed ref is running, get a
+ * reference and wait for the ref head to be complete and then
+ * try again.
+ */
+ refcount_inc(&head->refs);
+ spin_unlock(&delayed_refs->lock);
+
+ btrfs_release_path(path);
+
+ mutex_lock(&head->mutex);
+ mutex_unlock(&head->mutex);
+ btrfs_put_delayed_ref_head(head);
+ goto again;
+ }
+
+ exists = btrfs_find_delayed_tree_ref(head, root->root_key.objectid, parent);
+ mutex_unlock(&head->mutex);
+out:
+ spin_unlock(&delayed_refs->lock);
btrfs_free_path(path);
- if (ret == -ENOENT)
- return 0;
- if (ret < 0)
- return ret;
- return 1;
+ return exists ? 1 : 0;
}
/*
diff --git a/fs/btrfs/extent_io.c b/fs/btrfs/extent_io.c
index aa7f8148cd0d..c73cd4f89015 100644
--- a/fs/btrfs/extent_io.c
+++ b/fs/btrfs/extent_io.c
@@ -1496,6 +1496,13 @@ static noinline_for_stack int __extent_writepage_io(struct btrfs_inode *inode,
free_extent_map(em);
em = NULL;
+ /*
+ * Although the PageDirty bit might be cleared before entering
+ * this function, subpage dirty bit is not cleared.
+ * So clear subpage dirty bit here so next time we won't submit
+ * page for range already written to disk.
+ */
+ btrfs_folio_clear_dirty(fs_info, page_folio(page), cur, iosize);
btrfs_set_range_writeback(inode, cur, cur + iosize - 1);
if (!PageWriteback(page)) {
btrfs_err(inode->root->fs_info,
@@ -1503,13 +1510,6 @@ static noinline_for_stack int __extent_writepage_io(struct btrfs_inode *inode,
page->index, cur, end);
}
- /*
- * Although the PageDirty bit is cleared before entering this
- * function, subpage dirty bit is not cleared.
- * So clear subpage dirty bit here so next time we won't submit
- * page for range already written to disk.
- */
- btrfs_folio_clear_dirty(fs_info, page_folio(page), cur, iosize);
submit_extent_page(bio_ctrl, disk_bytenr, page, iosize,
cur - page_offset(page));
diff --git a/fs/btrfs/extent_map.c b/fs/btrfs/extent_map.c
index 81558f90ee80..10ac5f657e38 100644
--- a/fs/btrfs/extent_map.c
+++ b/fs/btrfs/extent_map.c
@@ -664,7 +664,7 @@ static noinline int merge_extent_mapping(struct btrfs_inode *inode,
start_diff = start - em->start;
em->start = start;
em->len = end - start;
- if (em->disk_bytenr < EXTENT_MAP_LAST_BYTE && !extent_map_is_compressed(em))
+ if (em->disk_bytenr < EXTENT_MAP_LAST_BYTE)
em->offset += start_diff;
return add_extent_mapping(inode, em, 0);
}
@@ -1147,8 +1147,7 @@ static long btrfs_scan_inode(struct btrfs_inode *inode, struct btrfs_em_shrink_c
return 0;
/*
- * We want to be fast because we can be called from any path trying to
- * allocate memory, so if the lock is busy we don't want to spend time
+ * We want to be fast so if the lock is busy we don't want to spend time
* waiting for it - either some task is about to do IO for the inode or
* we may have another task shrinking extent maps, here in this code, so
* skip this inode.
@@ -1191,9 +1190,7 @@ next:
/*
* Stop if we need to reschedule or there's contention on the
* lock. This is to avoid slowing other tasks trying to take the
- * lock and because the shrinker might be called during a memory
- * allocation path and we want to avoid taking a very long time
- * and slowing down all sorts of tasks.
+ * lock.
*/
if (need_resched() || rwlock_needbreak(&tree->lock))
break;
@@ -1222,12 +1219,7 @@ static long btrfs_scan_root(struct btrfs_root *root, struct btrfs_em_shrink_ctx
if (ctx->scanned >= ctx->nr_to_scan)
break;
- /*
- * We may be called from memory allocation paths, so we don't
- * want to take too much time and slowdown tasks.
- */
- if (need_resched())
- break;
+ cond_resched();
inode = btrfs_find_first_inode(root, min_ino);
}
@@ -1285,14 +1277,12 @@ long btrfs_free_extent_maps(struct btrfs_fs_info *fs_info, long nr_to_scan)
ctx.last_ino);
}
- /*
- * We may be called from memory allocation paths, so we don't want to
- * take too much time and slowdown tasks, so stop if we need reschedule.
- */
- while (ctx.scanned < ctx.nr_to_scan && !need_resched()) {
+ while (ctx.scanned < ctx.nr_to_scan) {
struct btrfs_root *root;
unsigned long count;
+ cond_resched();
+
spin_lock(&fs_info->fs_roots_radix_lock);
count = radix_tree_gang_lookup(&fs_info->fs_roots_radix,
(void **)&root,
diff --git a/fs/btrfs/file.c b/fs/btrfs/file.c
index 21381de906f6..9914419f3b7d 100644
--- a/fs/btrfs/file.c
+++ b/fs/btrfs/file.c
@@ -1603,6 +1603,7 @@ static inline bool skip_inode_logging(const struct btrfs_log_ctx *ctx)
*/
int btrfs_sync_file(struct file *file, loff_t start, loff_t end, int datasync)
{
+ struct btrfs_file_private *private = file->private_data;
struct dentry *dentry = file_dentry(file);
struct btrfs_inode *inode = BTRFS_I(d_inode(dentry));
struct btrfs_root *root = inode->root;
@@ -1612,6 +1613,7 @@ int btrfs_sync_file(struct file *file, loff_t start, loff_t end, int datasync)
int ret = 0, err;
u64 len;
bool full_sync;
+ const bool skip_ilock = (private ? private->fsync_skip_inode_lock : false);
trace_btrfs_sync_file(file, datasync);
@@ -1639,7 +1641,10 @@ int btrfs_sync_file(struct file *file, loff_t start, loff_t end, int datasync)
if (ret)
goto out;
- btrfs_inode_lock(inode, BTRFS_ILOCK_MMAP);
+ if (skip_ilock)
+ down_write(&inode->i_mmap_lock);
+ else
+ btrfs_inode_lock(inode, BTRFS_ILOCK_MMAP);
atomic_inc(&root->log_batch);
@@ -1663,7 +1668,10 @@ int btrfs_sync_file(struct file *file, loff_t start, loff_t end, int datasync)
*/
ret = start_ordered_ops(inode, start, end);
if (ret) {
- btrfs_inode_unlock(inode, BTRFS_ILOCK_MMAP);
+ if (skip_ilock)
+ up_write(&inode->i_mmap_lock);
+ else
+ btrfs_inode_unlock(inode, BTRFS_ILOCK_MMAP);
goto out;
}
@@ -1788,7 +1796,10 @@ int btrfs_sync_file(struct file *file, loff_t start, loff_t end, int datasync)
* file again, but that will end up using the synchronization
* inside btrfs_sync_log to keep things safe.
*/
- btrfs_inode_unlock(inode, BTRFS_ILOCK_MMAP);
+ if (skip_ilock)
+ up_write(&inode->i_mmap_lock);
+ else
+ btrfs_inode_unlock(inode, BTRFS_ILOCK_MMAP);
if (ret == BTRFS_NO_LOG_SYNC) {
ret = btrfs_end_transaction(trans);
@@ -1857,7 +1868,10 @@ out:
out_release_extents:
btrfs_release_log_ctx_extents(&ctx);
- btrfs_inode_unlock(inode, BTRFS_ILOCK_MMAP);
+ if (skip_ilock)
+ up_write(&inode->i_mmap_lock);
+ else
+ btrfs_inode_unlock(inode, BTRFS_ILOCK_MMAP);
goto out;
}
diff --git a/fs/btrfs/free-space-cache.c b/fs/btrfs/free-space-cache.c
index 3f9b7507543a..eaa1dbd31352 100644
--- a/fs/btrfs/free-space-cache.c
+++ b/fs/btrfs/free-space-cache.c
@@ -2697,15 +2697,16 @@ static int __btrfs_add_free_space_zoned(struct btrfs_block_group *block_group,
u64 offset = bytenr - block_group->start;
u64 to_free, to_unusable;
int bg_reclaim_threshold = 0;
- bool initial = ((size == block_group->length) && (block_group->alloc_offset == 0));
+ bool initial;
u64 reclaimable_unusable;
- WARN_ON(!initial && offset + size > block_group->zone_capacity);
+ spin_lock(&block_group->lock);
+ initial = ((size == block_group->length) && (block_group->alloc_offset == 0));
+ WARN_ON(!initial && offset + size > block_group->zone_capacity);
if (!initial)
bg_reclaim_threshold = READ_ONCE(sinfo->bg_reclaim_threshold);
- spin_lock(&ctl->tree_lock);
if (!used)
to_free = size;
else if (initial)
@@ -2718,18 +2719,19 @@ static int __btrfs_add_free_space_zoned(struct btrfs_block_group *block_group,
to_free = offset + size - block_group->alloc_offset;
to_unusable = size - to_free;
+ spin_lock(&ctl->tree_lock);
ctl->free_space += to_free;
+ spin_unlock(&ctl->tree_lock);
/*
* If the block group is read-only, we should account freed space into
* bytes_readonly.
*/
- if (!block_group->ro)
+ if (!block_group->ro) {
block_group->zone_unusable += to_unusable;
- spin_unlock(&ctl->tree_lock);
+ WARN_ON(block_group->zone_unusable > block_group->length);
+ }
if (!used) {
- spin_lock(&block_group->lock);
block_group->alloc_offset -= size;
- spin_unlock(&block_group->lock);
}
reclaimable_unusable = block_group->zone_unusable -
@@ -2743,6 +2745,8 @@ static int __btrfs_add_free_space_zoned(struct btrfs_block_group *block_group,
btrfs_mark_bg_to_reclaim(block_group);
}
+ spin_unlock(&block_group->lock);
+
return 0;
}
diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c
index 01eab6955647..b1b6564ab68f 100644
--- a/fs/btrfs/inode.c
+++ b/fs/btrfs/inode.c
@@ -714,8 +714,9 @@ out:
return ret;
}
-static noinline int cow_file_range_inline(struct btrfs_inode *inode, u64 offset,
- u64 end,
+static noinline int cow_file_range_inline(struct btrfs_inode *inode,
+ struct page *locked_page,
+ u64 offset, u64 end,
size_t compressed_size,
int compress_type,
struct folio *compressed_folio,
@@ -739,7 +740,10 @@ static noinline int cow_file_range_inline(struct btrfs_inode *inode, u64 offset,
return ret;
}
- extent_clear_unlock_delalloc(inode, offset, end, NULL, &cached,
+ if (ret == 0)
+ locked_page = NULL;
+
+ extent_clear_unlock_delalloc(inode, offset, end, locked_page, &cached,
clear_flags,
PAGE_UNLOCK | PAGE_START_WRITEBACK |
PAGE_END_WRITEBACK);
@@ -1043,10 +1047,10 @@ again:
* extent for the subpage case.
*/
if (total_in < actual_end)
- ret = cow_file_range_inline(inode, start, end, 0,
+ ret = cow_file_range_inline(inode, NULL, start, end, 0,
BTRFS_COMPRESS_NONE, NULL, false);
else
- ret = cow_file_range_inline(inode, start, end, total_compressed,
+ ret = cow_file_range_inline(inode, NULL, start, end, total_compressed,
compress_type, folios[0], false);
if (ret <= 0) {
if (ret < 0)
@@ -1359,7 +1363,7 @@ static noinline int cow_file_range(struct btrfs_inode *inode,
if (!no_inline) {
/* lets try to make an inline extent */
- ret = cow_file_range_inline(inode, start, end, 0,
+ ret = cow_file_range_inline(inode, locked_page, start, end, 0,
BTRFS_COMPRESS_NONE, NULL, false);
if (ret <= 0) {
/*
@@ -1581,6 +1585,7 @@ out_unlock:
locked_page, &cached,
clear_bits,
page_ops);
+ btrfs_qgroup_free_data(inode, NULL, start, cur_alloc_size, NULL);
start += cur_alloc_size;
}
@@ -1594,6 +1599,7 @@ out_unlock:
clear_bits |= EXTENT_CLEAR_DATA_RESV;
extent_clear_unlock_delalloc(inode, start, end, locked_page,
&cached, clear_bits, page_ops);
+ btrfs_qgroup_free_data(inode, NULL, start, cur_alloc_size, NULL);
}
return ret;
}
@@ -2255,6 +2261,7 @@ error:
EXTENT_DO_ACCOUNTING, PAGE_UNLOCK |
PAGE_START_WRITEBACK |
PAGE_END_WRITEBACK);
+ btrfs_qgroup_free_data(inode, NULL, cur_offset, end - cur_offset + 1, NULL);
}
btrfs_free_path(path);
return ret;
@@ -4188,6 +4195,7 @@ err:
btrfs_i_size_write(dir, dir->vfs_inode.i_size - name->len * 2);
inode_inc_iversion(&inode->vfs_inode);
+ inode_set_ctime_current(&inode->vfs_inode);
inode_inc_iversion(&dir->vfs_inode);
inode_set_mtime_to_ts(&dir->vfs_inode, inode_set_ctime_current(&dir->vfs_inode));
ret = btrfs_update_inode(trans, dir);
@@ -5660,7 +5668,7 @@ struct inode *btrfs_lookup_dentry(struct inode *dir, struct dentry *dentry)
struct inode *inode;
struct btrfs_root *root = BTRFS_I(dir)->root;
struct btrfs_root *sub_root = root;
- struct btrfs_key location;
+ struct btrfs_key location = { 0 };
u8 di_type = 0;
int ret = 0;
@@ -7198,6 +7206,12 @@ static void wait_subpage_spinlock(struct page *page)
spin_unlock_irq(&subpage->lock);
}
+static int btrfs_launder_folio(struct folio *folio)
+{
+ return btrfs_qgroup_free_data(folio_to_inode(folio), NULL, folio_pos(folio),
+ PAGE_SIZE, NULL);
+}
+
static bool __btrfs_release_folio(struct folio *folio, gfp_t gfp_flags)
{
if (try_release_extent_mapping(&folio->page, gfp_flags)) {
@@ -10133,6 +10147,7 @@ static const struct address_space_operations btrfs_aops = {
.writepages = btrfs_writepages,
.readahead = btrfs_readahead,
.invalidate_folio = btrfs_invalidate_folio,
+ .launder_folio = btrfs_launder_folio,
.release_folio = btrfs_release_folio,
.migrate_folio = btrfs_migrate_folio,
.dirty_folio = filemap_dirty_folio,
diff --git a/fs/btrfs/print-tree.c b/fs/btrfs/print-tree.c
index 32dcea662da3..fc821aa446f0 100644
--- a/fs/btrfs/print-tree.c
+++ b/fs/btrfs/print-tree.c
@@ -14,7 +14,7 @@
struct root_name_map {
u64 id;
- char name[16];
+ const char *name;
};
static const struct root_name_map root_map[] = {
diff --git a/fs/btrfs/scrub.c b/fs/btrfs/scrub.c
index 14a8d7100018..0de9162ff481 100644
--- a/fs/btrfs/scrub.c
+++ b/fs/btrfs/scrub.c
@@ -1648,14 +1648,20 @@ static void scrub_reset_stripe(struct scrub_stripe *stripe)
}
}
+static u32 stripe_length(const struct scrub_stripe *stripe)
+{
+ ASSERT(stripe->bg);
+
+ return min(BTRFS_STRIPE_LEN,
+ stripe->bg->start + stripe->bg->length - stripe->logical);
+}
+
static void scrub_submit_extent_sector_read(struct scrub_ctx *sctx,
struct scrub_stripe *stripe)
{
struct btrfs_fs_info *fs_info = stripe->bg->fs_info;
struct btrfs_bio *bbio = NULL;
- unsigned int nr_sectors = min(BTRFS_STRIPE_LEN, stripe->bg->start +
- stripe->bg->length - stripe->logical) >>
- fs_info->sectorsize_bits;
+ unsigned int nr_sectors = stripe_length(stripe) >> fs_info->sectorsize_bits;
u64 stripe_len = BTRFS_STRIPE_LEN;
int mirror = stripe->mirror_num;
int i;
@@ -1729,9 +1735,7 @@ static void scrub_submit_initial_read(struct scrub_ctx *sctx,
{
struct btrfs_fs_info *fs_info = sctx->fs_info;
struct btrfs_bio *bbio;
- unsigned int nr_sectors = min(BTRFS_STRIPE_LEN, stripe->bg->start +
- stripe->bg->length - stripe->logical) >>
- fs_info->sectorsize_bits;
+ unsigned int nr_sectors = stripe_length(stripe) >> fs_info->sectorsize_bits;
int mirror = stripe->mirror_num;
ASSERT(stripe->bg);
@@ -1871,6 +1875,9 @@ static int flush_scrub_stripes(struct scrub_ctx *sctx)
stripe = &sctx->stripes[i];
wait_scrub_stripe_io(stripe);
+ spin_lock(&sctx->stat_lock);
+ sctx->stat.last_physical = stripe->physical + stripe_length(stripe);
+ spin_unlock(&sctx->stat_lock);
scrub_reset_stripe(stripe);
}
out:
@@ -2139,7 +2146,9 @@ static int scrub_simple_mirror(struct scrub_ctx *sctx,
cur_physical, &found_logical);
if (ret > 0) {
/* No more extent, just update the accounting */
+ spin_lock(&sctx->stat_lock);
sctx->stat.last_physical = physical + logical_length;
+ spin_unlock(&sctx->stat_lock);
ret = 0;
break;
}
@@ -2336,6 +2345,10 @@ static noinline_for_stack int scrub_stripe(struct scrub_ctx *sctx,
stripe_logical += chunk_logical;
ret = scrub_raid56_parity_stripe(sctx, scrub_dev, bg,
map, stripe_logical);
+ spin_lock(&sctx->stat_lock);
+ sctx->stat.last_physical = min(physical + BTRFS_STRIPE_LEN,
+ physical_end);
+ spin_unlock(&sctx->stat_lock);
if (ret)
goto out;
goto next;
diff --git a/fs/btrfs/send.c b/fs/btrfs/send.c
index 4ca711a773ef..619fa0b8b3f6 100644
--- a/fs/btrfs/send.c
+++ b/fs/btrfs/send.c
@@ -347,7 +347,7 @@ struct name_cache_entry {
int ret;
int need_later_update;
int name_len;
- char name[];
+ char name[] __counted_by(name_len);
};
/* See the comment at lru_cache.h about struct btrfs_lru_cache_entry. */
@@ -6157,25 +6157,51 @@ static int send_write_or_clone(struct send_ctx *sctx,
u64 offset = key->offset;
u64 end;
u64 bs = sctx->send_root->fs_info->sectorsize;
+ struct btrfs_file_extent_item *ei;
+ u64 disk_byte;
+ u64 data_offset;
+ u64 num_bytes;
+ struct btrfs_inode_info info = { 0 };
end = min_t(u64, btrfs_file_extent_end(path), sctx->cur_inode_size);
if (offset >= end)
return 0;
- if (clone_root && IS_ALIGNED(end, bs)) {
- struct btrfs_file_extent_item *ei;
- u64 disk_byte;
- u64 data_offset;
+ num_bytes = end - offset;
- ei = btrfs_item_ptr(path->nodes[0], path->slots[0],
- struct btrfs_file_extent_item);
- disk_byte = btrfs_file_extent_disk_bytenr(path->nodes[0], ei);
- data_offset = btrfs_file_extent_offset(path->nodes[0], ei);
- ret = clone_range(sctx, path, clone_root, disk_byte,
- data_offset, offset, end - offset);
- } else {
- ret = send_extent_data(sctx, path, offset, end - offset);
- }
+ if (!clone_root)
+ goto write_data;
+
+ if (IS_ALIGNED(end, bs))
+ goto clone_data;
+
+ /*
+ * If the extent end is not aligned, we can clone if the extent ends at
+ * the i_size of the inode and the clone range ends at the i_size of the
+ * source inode, otherwise the clone operation fails with -EINVAL.
+ */
+ if (end != sctx->cur_inode_size)
+ goto write_data;
+
+ ret = get_inode_info(clone_root->root, clone_root->ino, &info);
+ if (ret < 0)
+ return ret;
+
+ if (clone_root->offset + num_bytes == info.size)
+ goto clone_data;
+
+write_data:
+ ret = send_extent_data(sctx, path, offset, num_bytes);
+ sctx->cur_inode_next_write_offset = end;
+ return ret;
+
+clone_data:
+ ei = btrfs_item_ptr(path->nodes[0], path->slots[0],
+ struct btrfs_file_extent_item);
+ disk_byte = btrfs_file_extent_disk_bytenr(path->nodes[0], ei);
+ data_offset = btrfs_file_extent_offset(path->nodes[0], ei);
+ ret = clone_range(sctx, path, clone_root, disk_byte, data_offset, offset,
+ num_bytes);
sctx->cur_inode_next_write_offset = end;
return ret;
}
diff --git a/fs/btrfs/space-info.c b/fs/btrfs/space-info.c
index 9ac94d3119e8..68e14fd48638 100644
--- a/fs/btrfs/space-info.c
+++ b/fs/btrfs/space-info.c
@@ -316,7 +316,7 @@ void btrfs_add_bg_to_space_info(struct btrfs_fs_info *info,
found->bytes_used += block_group->used;
found->disk_used += block_group->used * factor;
found->bytes_readonly += block_group->bytes_super;
- found->bytes_zone_unusable += block_group->zone_unusable;
+ btrfs_space_info_update_bytes_zone_unusable(info, found, block_group->zone_unusable);
if (block_group->length > 0)
found->full = 0;
btrfs_try_granting_tickets(info, found);
@@ -583,8 +583,7 @@ again:
spin_lock(&cache->lock);
avail = cache->length - cache->used - cache->pinned -
- cache->reserved - cache->delalloc_bytes -
- cache->bytes_super - cache->zone_unusable;
+ cache->reserved - cache->bytes_super - cache->zone_unusable;
btrfs_info(fs_info,
"block group %llu has %llu bytes, %llu used %llu pinned %llu reserved %llu delalloc %llu super %llu zone_unusable (%llu bytes available) %s",
cache->start, cache->length, cache->used, cache->pinned,
diff --git a/fs/btrfs/space-info.h b/fs/btrfs/space-info.h
index 4db8a0267c16..88b44221ce97 100644
--- a/fs/btrfs/space-info.h
+++ b/fs/btrfs/space-info.h
@@ -249,6 +249,7 @@ btrfs_space_info_update_##name(struct btrfs_fs_info *fs_info, \
DECLARE_SPACE_INFO_UPDATE(bytes_may_use, "space_info");
DECLARE_SPACE_INFO_UPDATE(bytes_pinned, "pinned");
+DECLARE_SPACE_INFO_UPDATE(bytes_zone_unusable, "zone_unusable");
int btrfs_init_space_info(struct btrfs_fs_info *fs_info);
void btrfs_add_bg_to_space_info(struct btrfs_fs_info *info,
diff --git a/fs/btrfs/super.c b/fs/btrfs/super.c
index 08d33cb372fb..98fa0f382480 100644
--- a/fs/btrfs/super.c
+++ b/fs/btrfs/super.c
@@ -28,6 +28,7 @@
#include <linux/btrfs.h>
#include <linux/security.h>
#include <linux/fs_parser.h>
+#include <linux/swap.h>
#include "messages.h"
#include "delayed-inode.h"
#include "ctree.h"
@@ -683,8 +684,11 @@ bool btrfs_check_options(const struct btrfs_fs_info *info,
ret = false;
if (!test_bit(BTRFS_FS_STATE_REMOUNTING, &info->fs_state)) {
- if (btrfs_raw_test_opt(*mount_opt, SPACE_CACHE))
+ if (btrfs_raw_test_opt(*mount_opt, SPACE_CACHE)) {
btrfs_info(info, "disk space caching is enabled");
+ btrfs_warn(info,
+"space cache v1 is being deprecated and will be removed in a future release, please use -o space_cache=v2");
+ }
if (btrfs_raw_test_opt(*mount_opt, FREE_SPACE_TREE))
btrfs_info(info, "using free-space-tree");
}
@@ -2398,7 +2402,13 @@ static long btrfs_nr_cached_objects(struct super_block *sb, struct shrink_contro
trace_btrfs_extent_map_shrinker_count(fs_info, nr);
- return nr;
+ /*
+ * Only report the real number for DEBUG builds, as there are reports of
+ * serious performance degradation caused by too frequent shrinks.
+ */
+ if (IS_ENABLED(CONFIG_BTRFS_DEBUG))
+ return nr;
+ return 0;
}
static long btrfs_free_cached_objects(struct super_block *sb, struct shrink_control *sc)
@@ -2406,6 +2416,15 @@ static long btrfs_free_cached_objects(struct super_block *sb, struct shrink_cont
const long nr_to_scan = min_t(unsigned long, LONG_MAX, sc->nr_to_scan);
struct btrfs_fs_info *fs_info = btrfs_sb(sb);
+ /*
+ * We may be called from any task trying to allocate memory and we don't
+ * want to slow it down with scanning and dropping extent maps. It would
+ * also cause heavy lock contention if many tasks concurrently enter
+ * here. Therefore only allow kswapd tasks to scan and drop extent maps.
+ */
+ if (!current_is_kswapd())
+ return 0;
+
return btrfs_free_extent_maps(fs_info, nr_to_scan);
}
diff --git a/fs/btrfs/tests/extent-map-tests.c b/fs/btrfs/tests/extent-map-tests.c
index ebec4ab361b8..56e61ac1cc64 100644
--- a/fs/btrfs/tests/extent-map-tests.c
+++ b/fs/btrfs/tests/extent-map-tests.c
@@ -900,6 +900,102 @@ out:
return ret;
}
+/*
+ * Test a regression for compressed extent map adjustment when we attempt to
+ * add an extent map that is partially overlapped by another existing extent
+ * map. The resulting extent map offset was left unchanged despite having
+ * incremented its start offset.
+ */
+static int test_case_8(struct btrfs_fs_info *fs_info, struct btrfs_inode *inode)
+{
+ struct extent_map_tree *em_tree = &inode->extent_tree;
+ struct extent_map *em;
+ int ret;
+ int ret2;
+
+ em = alloc_extent_map();
+ if (!em) {
+ test_std_err(TEST_ALLOC_EXTENT_MAP);
+ return -ENOMEM;
+ }
+
+ /* Compressed extent for the file range [120K, 128K). */
+ em->start = SZ_1K * 120;
+ em->len = SZ_8K;
+ em->disk_num_bytes = SZ_4K;
+ em->ram_bytes = SZ_8K;
+ em->flags |= EXTENT_FLAG_COMPRESS_ZLIB;
+ write_lock(&em_tree->lock);
+ ret = btrfs_add_extent_mapping(inode, &em, em->start, em->len);
+ write_unlock(&em_tree->lock);
+ free_extent_map(em);
+ if (ret < 0) {
+ test_err("couldn't add extent map for range [120K, 128K)");
+ goto out;
+ }
+
+ em = alloc_extent_map();
+ if (!em) {
+ test_std_err(TEST_ALLOC_EXTENT_MAP);
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ /*
+ * Compressed extent for the file range [108K, 144K), which overlaps
+ * with the [120K, 128K) we previously inserted.
+ */
+ em->start = SZ_1K * 108;
+ em->len = SZ_1K * 36;
+ em->disk_num_bytes = SZ_4K;
+ em->ram_bytes = SZ_1K * 36;
+ em->flags |= EXTENT_FLAG_COMPRESS_ZLIB;
+
+ /*
+ * Try to add the extent map but with a search range of [140K, 144K),
+ * this should succeed and adjust the extent map to the range
+ * [128K, 144K), with a length of 16K and an offset of 20K.
+ *
+ * This simulates a scenario where in the subvolume tree of an inode we
+ * have a compressed file extent item for the range [108K, 144K) and we
+ * have an overlapping compressed extent map for the range [120K, 128K),
+ * which was created by an encoded write, but its ordered extent was not
+ * yet completed, so the subvolume tree doesn't have yet the file extent
+ * item for that range - we only have the extent map in the inode's
+ * extent map tree.
+ */
+ write_lock(&em_tree->lock);
+ ret = btrfs_add_extent_mapping(inode, &em, SZ_1K * 140, SZ_4K);
+ write_unlock(&em_tree->lock);
+ free_extent_map(em);
+ if (ret < 0) {
+ test_err("couldn't add extent map for range [108K, 144K)");
+ goto out;
+ }
+
+ if (em->start != SZ_128K) {
+ test_err("unexpected extent map start %llu (should be 128K)", em->start);
+ ret = -EINVAL;
+ goto out;
+ }
+ if (em->len != SZ_16K) {
+ test_err("unexpected extent map length %llu (should be 16K)", em->len);
+ ret = -EINVAL;
+ goto out;
+ }
+ if (em->offset != SZ_1K * 20) {
+ test_err("unexpected extent map offset %llu (should be 20K)", em->offset);
+ ret = -EINVAL;
+ goto out;
+ }
+out:
+ ret2 = free_extent_map_tree(inode);
+ if (ret == 0)
+ ret = ret2;
+
+ return ret;
+}
+
struct rmap_test_vector {
u64 raid_type;
u64 physical_start;
@@ -1078,6 +1174,9 @@ int btrfs_test_extent_map(void)
ret = test_case_7(fs_info, BTRFS_I(inode));
if (ret)
goto out;
+ ret = test_case_8(fs_info, BTRFS_I(inode));
+ if (ret)
+ goto out;
test_msg("running rmap tests");
for (i = 0; i < ARRAY_SIZE(rmap_tests); i++) {
diff --git a/fs/btrfs/tree-checker.c b/fs/btrfs/tree-checker.c
index 6388786fd8b5..634d69964fe4 100644
--- a/fs/btrfs/tree-checker.c
+++ b/fs/btrfs/tree-checker.c
@@ -569,9 +569,10 @@ static int check_dir_item(struct extent_buffer *leaf,
/* dir type check */
dir_type = btrfs_dir_ftype(leaf, di);
- if (unlikely(dir_type >= BTRFS_FT_MAX)) {
+ if (unlikely(dir_type <= BTRFS_FT_UNKNOWN ||
+ dir_type >= BTRFS_FT_MAX)) {
dir_item_err(leaf, slot,
- "invalid dir item type, have %u expect [0, %u)",
+ "invalid dir item type, have %u expect (0, %u)",
dir_type, BTRFS_FT_MAX);
return -EUCLEAN;
}
@@ -634,7 +635,7 @@ static int check_dir_item(struct extent_buffer *leaf,
*/
if (key->type == BTRFS_DIR_ITEM_KEY ||
key->type == BTRFS_XATTR_ITEM_KEY) {
- char namebuf[max(BTRFS_NAME_LEN, XATTR_NAME_MAX)];
+ char namebuf[MAX(BTRFS_NAME_LEN, XATTR_NAME_MAX)];
read_extent_buffer(leaf, namebuf,
(unsigned long)(di + 1), name_len);
@@ -1289,6 +1290,19 @@ static void extent_err(const struct extent_buffer *eb, int slot,
va_end(args);
}
+static bool is_valid_dref_root(u64 rootid)
+{
+ /*
+ * The following tree root objectids are allowed to have a data backref:
+ * - subvolume trees
+ * - data reloc tree
+ * - tree root
+ * For v1 space cache
+ */
+ return is_fstree(rootid) || rootid == BTRFS_DATA_RELOC_TREE_OBJECTID ||
+ rootid == BTRFS_ROOT_TREE_OBJECTID;
+}
+
static int check_extent_item(struct extent_buffer *leaf,
struct btrfs_key *key, int slot,
struct btrfs_key *prev_key)
@@ -1441,6 +1455,8 @@ static int check_extent_item(struct extent_buffer *leaf,
struct btrfs_extent_data_ref *dref;
struct btrfs_shared_data_ref *sref;
u64 seq;
+ u64 dref_root;
+ u64 dref_objectid;
u64 dref_offset;
u64 inline_offset;
u8 inline_type;
@@ -1484,11 +1500,26 @@ static int check_extent_item(struct extent_buffer *leaf,
*/
case BTRFS_EXTENT_DATA_REF_KEY:
dref = (struct btrfs_extent_data_ref *)(&iref->offset);
+ dref_root = btrfs_extent_data_ref_root(leaf, dref);
+ dref_objectid = btrfs_extent_data_ref_objectid(leaf, dref);
dref_offset = btrfs_extent_data_ref_offset(leaf, dref);
seq = hash_extent_data_ref(
btrfs_extent_data_ref_root(leaf, dref),
btrfs_extent_data_ref_objectid(leaf, dref),
btrfs_extent_data_ref_offset(leaf, dref));
+ if (unlikely(!is_valid_dref_root(dref_root))) {
+ extent_err(leaf, slot,
+ "invalid data ref root value %llu",
+ dref_root);
+ return -EUCLEAN;
+ }
+ if (unlikely(dref_objectid < BTRFS_FIRST_FREE_OBJECTID ||
+ dref_objectid > BTRFS_LAST_FREE_OBJECTID)) {
+ extent_err(leaf, slot,
+ "invalid data ref objectid value %llu",
+ dref_root);
+ return -EUCLEAN;
+ }
if (unlikely(!IS_ALIGNED(dref_offset,
fs_info->sectorsize))) {
extent_err(leaf, slot,
@@ -1627,6 +1658,8 @@ static int check_extent_data_ref(struct extent_buffer *leaf,
return -EUCLEAN;
}
for (; ptr < end; ptr += sizeof(*dref)) {
+ u64 root;
+ u64 objectid;
u64 offset;
/*
@@ -1634,7 +1667,22 @@ static int check_extent_data_ref(struct extent_buffer *leaf,
* overflow from the leaf due to hash collisions.
*/
dref = (struct btrfs_extent_data_ref *)ptr;
+ root = btrfs_extent_data_ref_root(leaf, dref);
+ objectid = btrfs_extent_data_ref_objectid(leaf, dref);
offset = btrfs_extent_data_ref_offset(leaf, dref);
+ if (unlikely(!is_valid_dref_root(root))) {
+ extent_err(leaf, slot,
+ "invalid extent data backref root value %llu",
+ root);
+ return -EUCLEAN;
+ }
+ if (unlikely(objectid < BTRFS_FIRST_FREE_OBJECTID ||
+ objectid > BTRFS_LAST_FREE_OBJECTID)) {
+ extent_err(leaf, slot,
+ "invalid extent data backref objectid value %llu",
+ root);
+ return -EUCLEAN;
+ }
if (unlikely(!IS_ALIGNED(offset, leaf->fs_info->sectorsize))) {
extent_err(leaf, slot,
"invalid extent data backref offset, have %llu expect aligned to %u",
@@ -1716,6 +1764,72 @@ static int check_raid_stripe_extent(const struct extent_buffer *leaf,
return 0;
}
+static int check_dev_extent_item(const struct extent_buffer *leaf,
+ const struct btrfs_key *key,
+ int slot,
+ struct btrfs_key *prev_key)
+{
+ struct btrfs_dev_extent *de;
+ const u32 sectorsize = leaf->fs_info->sectorsize;
+
+ de = btrfs_item_ptr(leaf, slot, struct btrfs_dev_extent);
+ /* Basic fixed member checks. */
+ if (unlikely(btrfs_dev_extent_chunk_tree(leaf, de) !=
+ BTRFS_CHUNK_TREE_OBJECTID)) {
+ generic_err(leaf, slot,
+ "invalid dev extent chunk tree id, has %llu expect %llu",
+ btrfs_dev_extent_chunk_tree(leaf, de),
+ BTRFS_CHUNK_TREE_OBJECTID);
+ return -EUCLEAN;
+ }
+ if (unlikely(btrfs_dev_extent_chunk_objectid(leaf, de) !=
+ BTRFS_FIRST_CHUNK_TREE_OBJECTID)) {
+ generic_err(leaf, slot,
+ "invalid dev extent chunk objectid, has %llu expect %llu",
+ btrfs_dev_extent_chunk_objectid(leaf, de),
+ BTRFS_FIRST_CHUNK_TREE_OBJECTID);
+ return -EUCLEAN;
+ }
+ /* Alignment check. */
+ if (unlikely(!IS_ALIGNED(key->offset, sectorsize))) {
+ generic_err(leaf, slot,
+ "invalid dev extent key.offset, has %llu not aligned to %u",
+ key->offset, sectorsize);
+ return -EUCLEAN;
+ }
+ if (unlikely(!IS_ALIGNED(btrfs_dev_extent_chunk_offset(leaf, de),
+ sectorsize))) {
+ generic_err(leaf, slot,
+ "invalid dev extent chunk offset, has %llu not aligned to %u",
+ btrfs_dev_extent_chunk_objectid(leaf, de),
+ sectorsize);
+ return -EUCLEAN;
+ }
+ if (unlikely(!IS_ALIGNED(btrfs_dev_extent_length(leaf, de),
+ sectorsize))) {
+ generic_err(leaf, slot,
+ "invalid dev extent length, has %llu not aligned to %u",
+ btrfs_dev_extent_length(leaf, de), sectorsize);
+ return -EUCLEAN;
+ }
+ /* Overlap check with previous dev extent. */
+ if (slot && prev_key->objectid == key->objectid &&
+ prev_key->type == key->type) {
+ struct btrfs_dev_extent *prev_de;
+ u64 prev_len;
+
+ prev_de = btrfs_item_ptr(leaf, slot - 1, struct btrfs_dev_extent);
+ prev_len = btrfs_dev_extent_length(leaf, prev_de);
+ if (unlikely(prev_key->offset + prev_len > key->offset)) {
+ generic_err(leaf, slot,
+ "dev extent overlap, prev offset %llu len %llu current offset %llu",
+ prev_key->objectid, prev_len, key->offset);
+ return -EUCLEAN;
+ }
+ }
+ return 0;
+}
+
/*
* Common point to switch the item-specific validation.
*/
@@ -1752,6 +1866,9 @@ static enum btrfs_tree_block_status check_leaf_item(struct extent_buffer *leaf,
case BTRFS_DEV_ITEM_KEY:
ret = check_dev_item(leaf, key, slot);
break;
+ case BTRFS_DEV_EXTENT_KEY:
+ ret = check_dev_extent_item(leaf, key, slot, prev_key);
+ break;
case BTRFS_INODE_ITEM_KEY:
ret = check_inode_item(leaf, key, slot);
break;
diff --git a/fs/ceph/addr.c b/fs/ceph/addr.c
index 8c16bc5250ef..c4744a02db75 100644
--- a/fs/ceph/addr.c
+++ b/fs/ceph/addr.c
@@ -246,7 +246,8 @@ static void finish_netfs_read(struct ceph_osd_request *req)
if (err >= 0) {
if (sparse && err > 0)
err = ceph_sparse_ext_map_end(op);
- if (err < subreq->len)
+ if (err < subreq->len &&
+ subreq->rreq->origin != NETFS_DIO_READ)
__set_bit(NETFS_SREQ_CLEAR_TAIL, &subreq->flags);
if (IS_ENCRYPTED(inode) && err > 0) {
err = ceph_fscrypt_decrypt_extents(inode,
@@ -282,7 +283,8 @@ static bool ceph_netfs_issue_op_inline(struct netfs_io_subrequest *subreq)
size_t len;
int mode;
- __set_bit(NETFS_SREQ_CLEAR_TAIL, &subreq->flags);
+ if (rreq->origin != NETFS_DIO_READ)
+ __set_bit(NETFS_SREQ_CLEAR_TAIL, &subreq->flags);
__clear_bit(NETFS_SREQ_COPY_TO_CACHE, &subreq->flags);
if (subreq->start >= inode->i_size)
@@ -424,6 +426,9 @@ static int ceph_init_request(struct netfs_io_request *rreq, struct file *file)
struct ceph_netfs_request_data *priv;
int ret = 0;
+ /* [DEPRECATED] Use PG_private_2 to mark folio being written to the cache. */
+ __set_bit(NETFS_RREQ_USE_PGPRIV2, &rreq->flags);
+
if (rreq->origin != NETFS_READAHEAD)
return 0;
@@ -498,6 +503,11 @@ const struct netfs_request_ops ceph_netfs_ops = {
};
#ifdef CONFIG_CEPH_FSCACHE
+static void ceph_set_page_fscache(struct page *page)
+{
+ folio_start_private_2(page_folio(page)); /* [DEPRECATED] */
+}
+
static void ceph_fscache_write_terminated(void *priv, ssize_t error, bool was_async)
{
struct inode *inode = priv;
@@ -515,6 +525,10 @@ static void ceph_fscache_write_to_cache(struct inode *inode, u64 off, u64 len, b
ceph_fscache_write_terminated, inode, true, caching);
}
#else
+static inline void ceph_set_page_fscache(struct page *page)
+{
+}
+
static inline void ceph_fscache_write_to_cache(struct inode *inode, u64 off, u64 len, bool caching)
{
}
@@ -706,6 +720,8 @@ static int writepage_nounlock(struct page *page, struct writeback_control *wbc)
len = wlen;
set_page_writeback(page);
+ if (caching)
+ ceph_set_page_fscache(page);
ceph_fscache_write_to_cache(inode, page_off, len, caching);
if (IS_ENCRYPTED(inode)) {
@@ -789,6 +805,8 @@ static int ceph_writepage(struct page *page, struct writeback_control *wbc)
return AOP_WRITEPAGE_ACTIVATE;
}
+ folio_wait_private_2(page_folio(page)); /* [DEPRECATED] */
+
err = writepage_nounlock(page, wbc);
if (err == -ERESTARTSYS) {
/* direct memory reclaimer was killed by SIGKILL. return 0
@@ -1062,7 +1080,8 @@ get_more_pages:
unlock_page(page);
break;
}
- if (PageWriteback(page)) {
+ if (PageWriteback(page) ||
+ PagePrivate2(page) /* [DEPRECATED] */) {
if (wbc->sync_mode == WB_SYNC_NONE) {
doutc(cl, "%p under writeback\n", page);
unlock_page(page);
@@ -1070,6 +1089,7 @@ get_more_pages:
}
doutc(cl, "waiting on writeback %p\n", page);
wait_on_page_writeback(page);
+ folio_wait_private_2(page_folio(page)); /* [DEPRECATED] */
}
if (!clear_page_dirty_for_io(page)) {
@@ -1254,6 +1274,8 @@ new_request:
}
set_page_writeback(page);
+ if (caching)
+ ceph_set_page_fscache(page);
len += thp_size(page);
}
ceph_fscache_write_to_cache(inode, offset, len, caching);
diff --git a/fs/ceph/caps.c b/fs/ceph/caps.c
index e98aa8219303..808c9c048276 100644
--- a/fs/ceph/caps.c
+++ b/fs/ceph/caps.c
@@ -2016,6 +2016,8 @@ bool __ceph_should_report_size(struct ceph_inode_info *ci)
* CHECK_CAPS_AUTHONLY - we should only check the auth cap
* CHECK_CAPS_FLUSH - we should flush any dirty caps immediately, without
* further delay.
+ * CHECK_CAPS_FLUSH_FORCE - we should flush any caps immediately, without
+ * further delay.
*/
void ceph_check_caps(struct ceph_inode_info *ci, int flags)
{
@@ -2097,7 +2099,7 @@ retry:
}
doutc(cl, "%p %llx.%llx file_want %s used %s dirty %s "
- "flushing %s issued %s revoking %s retain %s %s%s%s\n",
+ "flushing %s issued %s revoking %s retain %s %s%s%s%s\n",
inode, ceph_vinop(inode), ceph_cap_string(file_wanted),
ceph_cap_string(used), ceph_cap_string(ci->i_dirty_caps),
ceph_cap_string(ci->i_flushing_caps),
@@ -2105,7 +2107,8 @@ retry:
ceph_cap_string(retain),
(flags & CHECK_CAPS_AUTHONLY) ? " AUTHONLY" : "",
(flags & CHECK_CAPS_FLUSH) ? " FLUSH" : "",
- (flags & CHECK_CAPS_NOINVAL) ? " NOINVAL" : "");
+ (flags & CHECK_CAPS_NOINVAL) ? " NOINVAL" : "",
+ (flags & CHECK_CAPS_FLUSH_FORCE) ? " FLUSH_FORCE" : "");
/*
* If we no longer need to hold onto old our caps, and we may
@@ -2180,6 +2183,11 @@ retry:
queue_writeback = true;
}
+ if (flags & CHECK_CAPS_FLUSH_FORCE) {
+ doutc(cl, "force to flush caps\n");
+ goto ack;
+ }
+
if (cap == ci->i_auth_cap &&
(cap->issued & CEPH_CAP_FILE_WR)) {
/* request larger max_size from MDS? */
@@ -3510,6 +3518,8 @@ static void handle_cap_grant(struct inode *inode,
bool queue_invalidate = false;
bool deleted_inode = false;
bool fill_inline = false;
+ bool revoke_wait = false;
+ int flags = 0;
/*
* If there is at least one crypto block then we'll trust
@@ -3705,16 +3715,18 @@ static void handle_cap_grant(struct inode *inode,
ceph_cap_string(cap->issued), ceph_cap_string(newcaps),
ceph_cap_string(revoking));
if (S_ISREG(inode->i_mode) &&
- (revoking & used & CEPH_CAP_FILE_BUFFER))
+ (revoking & used & CEPH_CAP_FILE_BUFFER)) {
writeback = true; /* initiate writeback; will delay ack */
- else if (queue_invalidate &&
+ revoke_wait = true;
+ } else if (queue_invalidate &&
revoking == CEPH_CAP_FILE_CACHE &&
- (newcaps & CEPH_CAP_FILE_LAZYIO) == 0)
- ; /* do nothing yet, invalidation will be queued */
- else if (cap == ci->i_auth_cap)
+ (newcaps & CEPH_CAP_FILE_LAZYIO) == 0) {
+ revoke_wait = true; /* do nothing yet, invalidation will be queued */
+ } else if (cap == ci->i_auth_cap) {
check_caps = 1; /* check auth cap only */
- else
+ } else {
check_caps = 2; /* check all caps */
+ }
/* If there is new caps, try to wake up the waiters */
if (~cap->issued & newcaps)
wake = true;
@@ -3741,8 +3753,9 @@ static void handle_cap_grant(struct inode *inode,
BUG_ON(cap->issued & ~cap->implemented);
/* don't let check_caps skip sending a response to MDS for revoke msgs */
- if (le32_to_cpu(grant->op) == CEPH_CAP_OP_REVOKE) {
+ if (!revoke_wait && le32_to_cpu(grant->op) == CEPH_CAP_OP_REVOKE) {
cap->mds_wanted = 0;
+ flags |= CHECK_CAPS_FLUSH_FORCE;
if (cap == ci->i_auth_cap)
check_caps = 1; /* check auth cap only */
else
@@ -3798,9 +3811,9 @@ static void handle_cap_grant(struct inode *inode,
mutex_unlock(&session->s_mutex);
if (check_caps == 1)
- ceph_check_caps(ci, CHECK_CAPS_AUTHONLY | CHECK_CAPS_NOINVAL);
+ ceph_check_caps(ci, flags | CHECK_CAPS_AUTHONLY | CHECK_CAPS_NOINVAL);
else if (check_caps == 2)
- ceph_check_caps(ci, CHECK_CAPS_NOINVAL);
+ ceph_check_caps(ci, flags | CHECK_CAPS_NOINVAL);
}
/*
diff --git a/fs/ceph/inode.c b/fs/ceph/inode.c
index 8f8de8f33abb..71cd70514efa 100644
--- a/fs/ceph/inode.c
+++ b/fs/ceph/inode.c
@@ -577,8 +577,6 @@ struct inode *ceph_alloc_inode(struct super_block *sb)
/* Set parameters for the netfs library */
netfs_inode_init(&ci->netfs, &ceph_netfs_ops, false);
- /* [DEPRECATED] Use PG_private_2 to mark folio being written to the cache. */
- __set_bit(NETFS_ICTX_USE_PGPRIV2, &ci->netfs.flags);
spin_lock_init(&ci->i_ceph_lock);
diff --git a/fs/ceph/super.h b/fs/ceph/super.h
index b63b4cd9b5b6..6e817bf1337c 100644
--- a/fs/ceph/super.h
+++ b/fs/ceph/super.h
@@ -200,9 +200,10 @@ struct ceph_cap {
struct list_head caps_item;
};
-#define CHECK_CAPS_AUTHONLY 1 /* only check auth cap */
-#define CHECK_CAPS_FLUSH 2 /* flush any dirty caps */
-#define CHECK_CAPS_NOINVAL 4 /* don't invalidate pagecache */
+#define CHECK_CAPS_AUTHONLY 1 /* only check auth cap */
+#define CHECK_CAPS_FLUSH 2 /* flush any dirty caps */
+#define CHECK_CAPS_NOINVAL 4 /* don't invalidate pagecache */
+#define CHECK_CAPS_FLUSH_FORCE 8 /* force flush any caps */
struct ceph_cap_flush {
u64 tid;
diff --git a/fs/exec.c b/fs/exec.c
index a126e3d1cacb..50e76cc633c4 100644
--- a/fs/exec.c
+++ b/fs/exec.c
@@ -1692,6 +1692,7 @@ static void bprm_fill_uid(struct linux_binprm *bprm, struct file *file)
unsigned int mode;
vfsuid_t vfsuid;
vfsgid_t vfsgid;
+ int err;
if (!mnt_may_suid(file->f_path.mnt))
return;
@@ -1708,12 +1709,17 @@ static void bprm_fill_uid(struct linux_binprm *bprm, struct file *file)
/* Be careful if suid/sgid is set */
inode_lock(inode);
- /* reload atomically mode/uid/gid now that lock held */
+ /* Atomically reload and check mode/uid/gid now that lock held. */
mode = inode->i_mode;
vfsuid = i_uid_into_vfsuid(idmap, inode);
vfsgid = i_gid_into_vfsgid(idmap, inode);
+ err = inode_permission(idmap, inode, MAY_EXEC);
inode_unlock(inode);
+ /* Did the exec bit vanish out from under us? Give up. */
+ if (err)
+ return;
+
/* We ignore suid/sgid if there are no mappings for them in the ns */
if (!vfsuid_has_mapping(bprm->cred->user_ns, vfsuid) ||
!vfsgid_has_mapping(bprm->cred->user_ns, vfsgid))
diff --git a/fs/file.c b/fs/file.c
index a3b72aa64f11..655338effe9c 100644
--- a/fs/file.c
+++ b/fs/file.c
@@ -46,27 +46,23 @@ static void free_fdtable_rcu(struct rcu_head *rcu)
#define BITBIT_NR(nr) BITS_TO_LONGS(BITS_TO_LONGS(nr))
#define BITBIT_SIZE(nr) (BITBIT_NR(nr) * sizeof(long))
+#define fdt_words(fdt) ((fdt)->max_fds / BITS_PER_LONG) // words in ->open_fds
/*
* Copy 'count' fd bits from the old table to the new table and clear the extra
* space if any. This does not copy the file pointers. Called with the files
* spinlock held for write.
*/
-static void copy_fd_bitmaps(struct fdtable *nfdt, struct fdtable *ofdt,
- unsigned int count)
+static inline void copy_fd_bitmaps(struct fdtable *nfdt, struct fdtable *ofdt,
+ unsigned int copy_words)
{
- unsigned int cpy, set;
-
- cpy = count / BITS_PER_BYTE;
- set = (nfdt->max_fds - count) / BITS_PER_BYTE;
- memcpy(nfdt->open_fds, ofdt->open_fds, cpy);
- memset((char *)nfdt->open_fds + cpy, 0, set);
- memcpy(nfdt->close_on_exec, ofdt->close_on_exec, cpy);
- memset((char *)nfdt->close_on_exec + cpy, 0, set);
-
- cpy = BITBIT_SIZE(count);
- set = BITBIT_SIZE(nfdt->max_fds) - cpy;
- memcpy(nfdt->full_fds_bits, ofdt->full_fds_bits, cpy);
- memset((char *)nfdt->full_fds_bits + cpy, 0, set);
+ unsigned int nwords = fdt_words(nfdt);
+
+ bitmap_copy_and_extend(nfdt->open_fds, ofdt->open_fds,
+ copy_words * BITS_PER_LONG, nwords * BITS_PER_LONG);
+ bitmap_copy_and_extend(nfdt->close_on_exec, ofdt->close_on_exec,
+ copy_words * BITS_PER_LONG, nwords * BITS_PER_LONG);
+ bitmap_copy_and_extend(nfdt->full_fds_bits, ofdt->full_fds_bits,
+ copy_words, nwords);
}
/*
@@ -84,7 +80,7 @@ static void copy_fdtable(struct fdtable *nfdt, struct fdtable *ofdt)
memcpy(nfdt->fd, ofdt->fd, cpy);
memset((char *)nfdt->fd + cpy, 0, set);
- copy_fd_bitmaps(nfdt, ofdt, ofdt->max_fds);
+ copy_fd_bitmaps(nfdt, ofdt, fdt_words(ofdt));
}
/*
@@ -379,7 +375,7 @@ struct files_struct *dup_fd(struct files_struct *oldf, unsigned int max_fds, int
open_files = sane_fdtable_size(old_fdt, max_fds);
}
- copy_fd_bitmaps(new_fdt, old_fdt, open_files);
+ copy_fd_bitmaps(new_fdt, old_fdt, open_files / BITS_PER_LONG);
old_fds = old_fdt->fd;
new_fds = new_fdt->fd;
@@ -1248,6 +1244,7 @@ __releases(&files->file_lock)
* tables and this condition does not arise without those.
*/
fdt = files_fdtable(files);
+ fd = array_index_nospec(fd, fdt->max_fds);
tofree = fdt->fd[fd];
if (!tofree && fd_is_open(fd, fdt))
goto Ebusy;
diff --git a/fs/fuse/dev.c b/fs/fuse/dev.c
index 9eb191b5c4de..7146038b2fe7 100644
--- a/fs/fuse/dev.c
+++ b/fs/fuse/dev.c
@@ -1618,9 +1618,11 @@ static int fuse_notify_store(struct fuse_conn *fc, unsigned int size,
this_num = min_t(unsigned, num, PAGE_SIZE - offset);
err = fuse_copy_page(cs, &page, offset, this_num, 0);
- if (!err && offset == 0 &&
- (this_num == PAGE_SIZE || file_size == end))
+ if (!PageUptodate(page) && !err && offset == 0 &&
+ (this_num == PAGE_SIZE || file_size == end)) {
+ zero_user_segment(page, this_num, PAGE_SIZE);
SetPageUptodate(page);
+ }
unlock_page(page);
put_page(page);
diff --git a/fs/inode.c b/fs/inode.c
index 86670941884b..10c4619faeef 100644
--- a/fs/inode.c
+++ b/fs/inode.c
@@ -488,6 +488,39 @@ static void inode_lru_list_del(struct inode *inode)
this_cpu_dec(nr_unused);
}
+static void inode_pin_lru_isolating(struct inode *inode)
+{
+ lockdep_assert_held(&inode->i_lock);
+ WARN_ON(inode->i_state & (I_LRU_ISOLATING | I_FREEING | I_WILL_FREE));
+ inode->i_state |= I_LRU_ISOLATING;
+}
+
+static void inode_unpin_lru_isolating(struct inode *inode)
+{
+ spin_lock(&inode->i_lock);
+ WARN_ON(!(inode->i_state & I_LRU_ISOLATING));
+ inode->i_state &= ~I_LRU_ISOLATING;
+ smp_mb();
+ wake_up_bit(&inode->i_state, __I_LRU_ISOLATING);
+ spin_unlock(&inode->i_lock);
+}
+
+static void inode_wait_for_lru_isolating(struct inode *inode)
+{
+ spin_lock(&inode->i_lock);
+ if (inode->i_state & I_LRU_ISOLATING) {
+ DEFINE_WAIT_BIT(wq, &inode->i_state, __I_LRU_ISOLATING);
+ wait_queue_head_t *wqh;
+
+ wqh = bit_waitqueue(&inode->i_state, __I_LRU_ISOLATING);
+ spin_unlock(&inode->i_lock);
+ __wait_on_bit(wqh, &wq, bit_wait, TASK_UNINTERRUPTIBLE);
+ spin_lock(&inode->i_lock);
+ WARN_ON(inode->i_state & I_LRU_ISOLATING);
+ }
+ spin_unlock(&inode->i_lock);
+}
+
/**
* inode_sb_list_add - add inode to the superblock list of inodes
* @inode: inode to add
@@ -657,6 +690,8 @@ static void evict(struct inode *inode)
inode_sb_list_del(inode);
+ inode_wait_for_lru_isolating(inode);
+
/*
* Wait for flusher thread to be done with the inode so that filesystem
* does not start destroying it while writeback is still running. Since
@@ -855,7 +890,7 @@ static enum lru_status inode_lru_isolate(struct list_head *item,
* be under pressure before the cache inside the highmem zone.
*/
if (inode_has_buffers(inode) || !mapping_empty(&inode->i_data)) {
- __iget(inode);
+ inode_pin_lru_isolating(inode);
spin_unlock(&inode->i_lock);
spin_unlock(lru_lock);
if (remove_inode_buffers(inode)) {
@@ -867,7 +902,7 @@ static enum lru_status inode_lru_isolate(struct list_head *item,
__count_vm_events(PGINODESTEAL, reap);
mm_account_reclaimed_pages(reap);
}
- iput(inode);
+ inode_unpin_lru_isolating(inode);
spin_lock(lru_lock);
return LRU_RETRY;
}
diff --git a/fs/libfs.c b/fs/libfs.c
index 8aa34870449f..02602d00939e 100644
--- a/fs/libfs.c
+++ b/fs/libfs.c
@@ -450,6 +450,14 @@ void simple_offset_destroy(struct offset_ctx *octx)
mtree_destroy(&octx->mt);
}
+static int offset_dir_open(struct inode *inode, struct file *file)
+{
+ struct offset_ctx *ctx = inode->i_op->get_offset_ctx(inode);
+
+ file->private_data = (void *)ctx->next_offset;
+ return 0;
+}
+
/**
* offset_dir_llseek - Advance the read position of a directory descriptor
* @file: an open directory whose position is to be updated
@@ -463,6 +471,9 @@ void simple_offset_destroy(struct offset_ctx *octx)
*/
static loff_t offset_dir_llseek(struct file *file, loff_t offset, int whence)
{
+ struct inode *inode = file->f_inode;
+ struct offset_ctx *ctx = inode->i_op->get_offset_ctx(inode);
+
switch (whence) {
case SEEK_CUR:
offset += file->f_pos;
@@ -476,7 +487,8 @@ static loff_t offset_dir_llseek(struct file *file, loff_t offset, int whence)
}
/* In this case, ->private_data is protected by f_pos_lock */
- file->private_data = NULL;
+ if (!offset)
+ file->private_data = (void *)ctx->next_offset;
return vfs_setpos(file, offset, LONG_MAX);
}
@@ -507,7 +519,7 @@ static bool offset_dir_emit(struct dir_context *ctx, struct dentry *dentry)
inode->i_ino, fs_umode_to_dtype(inode->i_mode));
}
-static void *offset_iterate_dir(struct inode *inode, struct dir_context *ctx)
+static void offset_iterate_dir(struct inode *inode, struct dir_context *ctx, long last_index)
{
struct offset_ctx *octx = inode->i_op->get_offset_ctx(inode);
struct dentry *dentry;
@@ -515,17 +527,21 @@ static void *offset_iterate_dir(struct inode *inode, struct dir_context *ctx)
while (true) {
dentry = offset_find_next(octx, ctx->pos);
if (!dentry)
- return ERR_PTR(-ENOENT);
+ return;
+
+ if (dentry2offset(dentry) >= last_index) {
+ dput(dentry);
+ return;
+ }
if (!offset_dir_emit(ctx, dentry)) {
dput(dentry);
- break;
+ return;
}
ctx->pos = dentry2offset(dentry) + 1;
dput(dentry);
}
- return NULL;
}
/**
@@ -552,22 +568,19 @@ static void *offset_iterate_dir(struct inode *inode, struct dir_context *ctx)
static int offset_readdir(struct file *file, struct dir_context *ctx)
{
struct dentry *dir = file->f_path.dentry;
+ long last_index = (long)file->private_data;
lockdep_assert_held(&d_inode(dir)->i_rwsem);
if (!dir_emit_dots(file, ctx))
return 0;
- /* In this case, ->private_data is protected by f_pos_lock */
- if (ctx->pos == DIR_OFFSET_MIN)
- file->private_data = NULL;
- else if (file->private_data == ERR_PTR(-ENOENT))
- return 0;
- file->private_data = offset_iterate_dir(d_inode(dir), ctx);
+ offset_iterate_dir(d_inode(dir), ctx, last_index);
return 0;
}
const struct file_operations simple_offset_dir_operations = {
+ .open = offset_dir_open,
.llseek = offset_dir_llseek,
.iterate_shared = offset_readdir,
.read = generic_read_dir,
diff --git a/fs/locks.c b/fs/locks.c
index 9afb16e0683f..e45cad40f8b6 100644
--- a/fs/locks.c
+++ b/fs/locks.c
@@ -2984,7 +2984,7 @@ static int __init filelock_init(void)
filelock_cache = kmem_cache_create("file_lock_cache",
sizeof(struct file_lock), 0, SLAB_PANIC, NULL);
- filelease_cache = kmem_cache_create("file_lock_cache",
+ filelease_cache = kmem_cache_create("file_lease_cache",
sizeof(struct file_lease), 0, SLAB_PANIC, NULL);
for_each_possible_cpu(i) {
diff --git a/fs/netfs/Kconfig b/fs/netfs/Kconfig
index 1b78e8b65ebc..7701c037c328 100644
--- a/fs/netfs/Kconfig
+++ b/fs/netfs/Kconfig
@@ -24,7 +24,7 @@ config NETFS_STATS
config NETFS_DEBUG
bool "Enable dynamic debugging netfslib and FS-Cache"
- depends on NETFS
+ depends on NETFS_SUPPORT
help
This permits debugging to be dynamically enabled in the local caching
management module. If this is set, the debugging output may be
diff --git a/fs/netfs/buffered_read.c b/fs/netfs/buffered_read.c
index a688d4c75d99..27c750d39476 100644
--- a/fs/netfs/buffered_read.c
+++ b/fs/netfs/buffered_read.c
@@ -10,6 +10,97 @@
#include "internal.h"
/*
+ * [DEPRECATED] Unlock the folios in a read operation for when the filesystem
+ * is using PG_private_2 and direct writing to the cache from here rather than
+ * marking the page for writeback.
+ *
+ * Note that we don't touch folio->private in this code.
+ */
+static void netfs_rreq_unlock_folios_pgpriv2(struct netfs_io_request *rreq,
+ size_t *account)
+{
+ struct netfs_io_subrequest *subreq;
+ struct folio *folio;
+ pgoff_t start_page = rreq->start / PAGE_SIZE;
+ pgoff_t last_page = ((rreq->start + rreq->len) / PAGE_SIZE) - 1;
+ bool subreq_failed = false;
+
+ XA_STATE(xas, &rreq->mapping->i_pages, start_page);
+
+ /* Walk through the pagecache and the I/O request lists simultaneously.
+ * We may have a mixture of cached and uncached sections and we only
+ * really want to write out the uncached sections. This is slightly
+ * complicated by the possibility that we might have huge pages with a
+ * mixture inside.
+ */
+ subreq = list_first_entry(&rreq->subrequests,
+ struct netfs_io_subrequest, rreq_link);
+ subreq_failed = (subreq->error < 0);
+
+ trace_netfs_rreq(rreq, netfs_rreq_trace_unlock_pgpriv2);
+
+ rcu_read_lock();
+ xas_for_each(&xas, folio, last_page) {
+ loff_t pg_end;
+ bool pg_failed = false;
+ bool folio_started = false;
+
+ if (xas_retry(&xas, folio))
+ continue;
+
+ pg_end = folio_pos(folio) + folio_size(folio) - 1;
+
+ for (;;) {
+ loff_t sreq_end;
+
+ if (!subreq) {
+ pg_failed = true;
+ break;
+ }
+
+ if (!folio_started &&
+ test_bit(NETFS_SREQ_COPY_TO_CACHE, &subreq->flags) &&
+ fscache_operation_valid(&rreq->cache_resources)) {
+ trace_netfs_folio(folio, netfs_folio_trace_copy_to_cache);
+ folio_start_private_2(folio);
+ folio_started = true;
+ }
+
+ pg_failed |= subreq_failed;
+ sreq_end = subreq->start + subreq->len - 1;
+ if (pg_end < sreq_end)
+ break;
+
+ *account += subreq->transferred;
+ if (!list_is_last(&subreq->rreq_link, &rreq->subrequests)) {
+ subreq = list_next_entry(subreq, rreq_link);
+ subreq_failed = (subreq->error < 0);
+ } else {
+ subreq = NULL;
+ subreq_failed = false;
+ }
+
+ if (pg_end == sreq_end)
+ break;
+ }
+
+ if (!pg_failed) {
+ flush_dcache_folio(folio);
+ folio_mark_uptodate(folio);
+ }
+
+ if (!test_bit(NETFS_RREQ_DONT_UNLOCK_FOLIOS, &rreq->flags)) {
+ if (folio->index == rreq->no_unlock_folio &&
+ test_bit(NETFS_RREQ_NO_UNLOCK_FOLIO, &rreq->flags))
+ _debug("no unlock");
+ else
+ folio_unlock(folio);
+ }
+ }
+ rcu_read_unlock();
+}
+
+/*
* Unlock the folios in a read operation. We need to set PG_writeback on any
* folios we're going to write back before we unlock them.
*
@@ -35,6 +126,12 @@ void netfs_rreq_unlock_folios(struct netfs_io_request *rreq)
}
}
+ /* Handle deprecated PG_private_2 case. */
+ if (test_bit(NETFS_RREQ_USE_PGPRIV2, &rreq->flags)) {
+ netfs_rreq_unlock_folios_pgpriv2(rreq, &account);
+ goto out;
+ }
+
/* Walk through the pagecache and the I/O request lists simultaneously.
* We may have a mixture of cached and uncached sections and we only
* really want to write out the uncached sections. This is slightly
@@ -52,7 +149,6 @@ void netfs_rreq_unlock_folios(struct netfs_io_request *rreq)
loff_t pg_end;
bool pg_failed = false;
bool wback_to_cache = false;
- bool folio_started = false;
if (xas_retry(&xas, folio))
continue;
@@ -66,17 +162,8 @@ void netfs_rreq_unlock_folios(struct netfs_io_request *rreq)
pg_failed = true;
break;
}
- if (test_bit(NETFS_RREQ_USE_PGPRIV2, &rreq->flags)) {
- if (!folio_started && test_bit(NETFS_SREQ_COPY_TO_CACHE,
- &subreq->flags)) {
- trace_netfs_folio(folio, netfs_folio_trace_copy_to_cache);
- folio_start_private_2(folio);
- folio_started = true;
- }
- } else {
- wback_to_cache |=
- test_bit(NETFS_SREQ_COPY_TO_CACHE, &subreq->flags);
- }
+
+ wback_to_cache |= test_bit(NETFS_SREQ_COPY_TO_CACHE, &subreq->flags);
pg_failed |= subreq_failed;
sreq_end = subreq->start + subreq->len - 1;
if (pg_end < sreq_end)
@@ -124,6 +211,7 @@ void netfs_rreq_unlock_folios(struct netfs_io_request *rreq)
}
rcu_read_unlock();
+out:
task_io_account_read(account);
if (rreq->netfs_ops->done)
rreq->netfs_ops->done(rreq);
@@ -395,7 +483,7 @@ zero_out:
}
/**
- * netfs_write_begin - Helper to prepare for writing
+ * netfs_write_begin - Helper to prepare for writing [DEPRECATED]
* @ctx: The netfs context
* @file: The file to read from
* @mapping: The mapping to read from
@@ -426,6 +514,9 @@ zero_out:
* inode before calling this.
*
* This is usable whether or not caching is enabled.
+ *
+ * Note that this should be considered deprecated and netfs_perform_write()
+ * used instead.
*/
int netfs_write_begin(struct netfs_inode *ctx,
struct file *file, struct address_space *mapping,
@@ -466,7 +557,7 @@ retry:
if (!netfs_is_cache_enabled(ctx) &&
netfs_skip_folio_read(folio, pos, len, false)) {
netfs_stat(&netfs_n_rh_write_zskip);
- goto have_folio;
+ goto have_folio_no_wait;
}
rreq = netfs_alloc_request(mapping, file,
@@ -507,6 +598,10 @@ retry:
netfs_put_request(rreq, false, netfs_rreq_trace_put_return);
have_folio:
+ ret = folio_wait_private_2_killable(folio);
+ if (ret < 0)
+ goto error;
+have_folio_no_wait:
*_folio = folio;
_leave(" = 0");
return 0;
diff --git a/fs/netfs/buffered_write.c b/fs/netfs/buffered_write.c
index 4726c315453c..ca53c5d1622e 100644
--- a/fs/netfs/buffered_write.c
+++ b/fs/netfs/buffered_write.c
@@ -184,7 +184,7 @@ ssize_t netfs_perform_write(struct kiocb *iocb, struct iov_iter *iter,
unsigned int bdp_flags = (iocb->ki_flags & IOCB_NOWAIT) ? BDP_ASYNC : 0;
ssize_t written = 0, ret, ret2;
loff_t i_size, pos = iocb->ki_pos, from, to;
- size_t max_chunk = PAGE_SIZE << MAX_PAGECACHE_ORDER;
+ size_t max_chunk = mapping_max_folio_size(mapping);
bool maybe_trouble = false;
if (unlikely(test_bit(NETFS_ICTX_WRITETHROUGH, &ctx->flags) ||
diff --git a/fs/netfs/fscache_cookie.c b/fs/netfs/fscache_cookie.c
index bce2492186d0..d4d4b3a8b106 100644
--- a/fs/netfs/fscache_cookie.c
+++ b/fs/netfs/fscache_cookie.c
@@ -741,6 +741,10 @@ again_locked:
spin_lock(&cookie->lock);
}
if (test_bit(FSCACHE_COOKIE_DO_LRU_DISCARD, &cookie->flags)) {
+ if (atomic_read(&cookie->n_accesses) != 0)
+ /* still being accessed: postpone it */
+ break;
+
__fscache_set_cookie_state(cookie,
FSCACHE_COOKIE_STATE_LRU_DISCARDING);
wake = true;
diff --git a/fs/netfs/io.c b/fs/netfs/io.c
index c93851b98368..5367caf3fa28 100644
--- a/fs/netfs/io.c
+++ b/fs/netfs/io.c
@@ -99,6 +99,146 @@ static void netfs_rreq_completed(struct netfs_io_request *rreq, bool was_async)
}
/*
+ * [DEPRECATED] Deal with the completion of writing the data to the cache. We
+ * have to clear the PG_fscache bits on the folios involved and release the
+ * caller's ref.
+ *
+ * May be called in softirq mode and we inherit a ref from the caller.
+ */
+static void netfs_rreq_unmark_after_write(struct netfs_io_request *rreq,
+ bool was_async)
+{
+ struct netfs_io_subrequest *subreq;
+ struct folio *folio;
+ pgoff_t unlocked = 0;
+ bool have_unlocked = false;
+
+ rcu_read_lock();
+
+ list_for_each_entry(subreq, &rreq->subrequests, rreq_link) {
+ XA_STATE(xas, &rreq->mapping->i_pages, subreq->start / PAGE_SIZE);
+
+ xas_for_each(&xas, folio, (subreq->start + subreq->len - 1) / PAGE_SIZE) {
+ if (xas_retry(&xas, folio))
+ continue;
+
+ /* We might have multiple writes from the same huge
+ * folio, but we mustn't unlock a folio more than once.
+ */
+ if (have_unlocked && folio->index <= unlocked)
+ continue;
+ unlocked = folio_next_index(folio) - 1;
+ trace_netfs_folio(folio, netfs_folio_trace_end_copy);
+ folio_end_private_2(folio);
+ have_unlocked = true;
+ }
+ }
+
+ rcu_read_unlock();
+ netfs_rreq_completed(rreq, was_async);
+}
+
+static void netfs_rreq_copy_terminated(void *priv, ssize_t transferred_or_error,
+ bool was_async) /* [DEPRECATED] */
+{
+ struct netfs_io_subrequest *subreq = priv;
+ struct netfs_io_request *rreq = subreq->rreq;
+
+ if (IS_ERR_VALUE(transferred_or_error)) {
+ netfs_stat(&netfs_n_rh_write_failed);
+ trace_netfs_failure(rreq, subreq, transferred_or_error,
+ netfs_fail_copy_to_cache);
+ } else {
+ netfs_stat(&netfs_n_rh_write_done);
+ }
+
+ trace_netfs_sreq(subreq, netfs_sreq_trace_write_term);
+
+ /* If we decrement nr_copy_ops to 0, the ref belongs to us. */
+ if (atomic_dec_and_test(&rreq->nr_copy_ops))
+ netfs_rreq_unmark_after_write(rreq, was_async);
+
+ netfs_put_subrequest(subreq, was_async, netfs_sreq_trace_put_terminated);
+}
+
+/*
+ * [DEPRECATED] Perform any outstanding writes to the cache. We inherit a ref
+ * from the caller.
+ */
+static void netfs_rreq_do_write_to_cache(struct netfs_io_request *rreq)
+{
+ struct netfs_cache_resources *cres = &rreq->cache_resources;
+ struct netfs_io_subrequest *subreq, *next, *p;
+ struct iov_iter iter;
+ int ret;
+
+ trace_netfs_rreq(rreq, netfs_rreq_trace_copy);
+
+ /* We don't want terminating writes trying to wake us up whilst we're
+ * still going through the list.
+ */
+ atomic_inc(&rreq->nr_copy_ops);
+
+ list_for_each_entry_safe(subreq, p, &rreq->subrequests, rreq_link) {
+ if (!test_bit(NETFS_SREQ_COPY_TO_CACHE, &subreq->flags)) {
+ list_del_init(&subreq->rreq_link);
+ netfs_put_subrequest(subreq, false,
+ netfs_sreq_trace_put_no_copy);
+ }
+ }
+
+ list_for_each_entry(subreq, &rreq->subrequests, rreq_link) {
+ /* Amalgamate adjacent writes */
+ while (!list_is_last(&subreq->rreq_link, &rreq->subrequests)) {
+ next = list_next_entry(subreq, rreq_link);
+ if (next->start != subreq->start + subreq->len)
+ break;
+ subreq->len += next->len;
+ list_del_init(&next->rreq_link);
+ netfs_put_subrequest(next, false,
+ netfs_sreq_trace_put_merged);
+ }
+
+ ret = cres->ops->prepare_write(cres, &subreq->start, &subreq->len,
+ subreq->len, rreq->i_size, true);
+ if (ret < 0) {
+ trace_netfs_failure(rreq, subreq, ret, netfs_fail_prepare_write);
+ trace_netfs_sreq(subreq, netfs_sreq_trace_write_skip);
+ continue;
+ }
+
+ iov_iter_xarray(&iter, ITER_SOURCE, &rreq->mapping->i_pages,
+ subreq->start, subreq->len);
+
+ atomic_inc(&rreq->nr_copy_ops);
+ netfs_stat(&netfs_n_rh_write);
+ netfs_get_subrequest(subreq, netfs_sreq_trace_get_copy_to_cache);
+ trace_netfs_sreq(subreq, netfs_sreq_trace_write);
+ cres->ops->write(cres, subreq->start, &iter,
+ netfs_rreq_copy_terminated, subreq);
+ }
+
+ /* If we decrement nr_copy_ops to 0, the usage ref belongs to us. */
+ if (atomic_dec_and_test(&rreq->nr_copy_ops))
+ netfs_rreq_unmark_after_write(rreq, false);
+}
+
+static void netfs_rreq_write_to_cache_work(struct work_struct *work) /* [DEPRECATED] */
+{
+ struct netfs_io_request *rreq =
+ container_of(work, struct netfs_io_request, work);
+
+ netfs_rreq_do_write_to_cache(rreq);
+}
+
+static void netfs_rreq_write_to_cache(struct netfs_io_request *rreq) /* [DEPRECATED] */
+{
+ rreq->work.func = netfs_rreq_write_to_cache_work;
+ if (!queue_work(system_unbound_wq, &rreq->work))
+ BUG();
+}
+
+/*
* Handle a short read.
*/
static void netfs_rreq_short_read(struct netfs_io_request *rreq,
@@ -275,6 +415,10 @@ again:
clear_bit_unlock(NETFS_RREQ_IN_PROGRESS, &rreq->flags);
wake_up_bit(&rreq->flags, NETFS_RREQ_IN_PROGRESS);
+ if (test_bit(NETFS_RREQ_COPY_TO_CACHE, &rreq->flags) &&
+ test_bit(NETFS_RREQ_USE_PGPRIV2, &rreq->flags))
+ return netfs_rreq_write_to_cache(rreq);
+
netfs_rreq_completed(rreq, was_async);
}
@@ -386,7 +530,8 @@ incomplete:
if (transferred_or_error == 0) {
if (__test_and_set_bit(NETFS_SREQ_NO_PROGRESS, &subreq->flags)) {
- subreq->error = -ENODATA;
+ if (rreq->origin != NETFS_DIO_READ)
+ subreq->error = -ENODATA;
goto failed;
}
} else {
@@ -457,9 +602,14 @@ netfs_rreq_prepare_read(struct netfs_io_request *rreq,
}
if (subreq->len > ictx->zero_point - subreq->start)
subreq->len = ictx->zero_point - subreq->start;
+
+ /* We limit buffered reads to the EOF, but let the
+ * server deal with larger-than-EOF DIO/unbuffered
+ * reads.
+ */
+ if (subreq->len > rreq->i_size - subreq->start)
+ subreq->len = rreq->i_size - subreq->start;
}
- if (subreq->len > rreq->i_size - subreq->start)
- subreq->len = rreq->i_size - subreq->start;
if (rreq->rsize && subreq->len > rreq->rsize)
subreq->len = rreq->rsize;
@@ -595,11 +745,10 @@ int netfs_begin_read(struct netfs_io_request *rreq, bool sync)
do {
_debug("submit %llx + %llx >= %llx",
rreq->start, rreq->submitted, rreq->i_size);
- if (rreq->origin == NETFS_DIO_READ &&
- rreq->start + rreq->submitted >= rreq->i_size)
- break;
if (!netfs_rreq_submit_slice(rreq, &io_iter))
break;
+ if (test_bit(NETFS_SREQ_NO_PROGRESS, &rreq->flags))
+ break;
if (test_bit(NETFS_RREQ_BLOCKED, &rreq->flags) &&
test_bit(NETFS_RREQ_NONBLOCK, &rreq->flags))
break;
diff --git a/fs/netfs/objects.c b/fs/netfs/objects.c
index f4a642727479..0294df70c3ff 100644
--- a/fs/netfs/objects.c
+++ b/fs/netfs/objects.c
@@ -24,10 +24,6 @@ struct netfs_io_request *netfs_alloc_request(struct address_space *mapping,
struct netfs_io_request *rreq;
mempool_t *mempool = ctx->ops->request_pool ?: &netfs_request_pool;
struct kmem_cache *cache = mempool->pool_data;
- bool is_unbuffered = (origin == NETFS_UNBUFFERED_WRITE ||
- origin == NETFS_DIO_READ ||
- origin == NETFS_DIO_WRITE);
- bool cached = !is_unbuffered && netfs_is_cache_enabled(ctx);
int ret;
for (;;) {
@@ -56,12 +52,6 @@ struct netfs_io_request *netfs_alloc_request(struct address_space *mapping,
refcount_set(&rreq->ref, 1);
__set_bit(NETFS_RREQ_IN_PROGRESS, &rreq->flags);
- if (cached) {
- __set_bit(NETFS_RREQ_WRITE_TO_CACHE, &rreq->flags);
- if (test_bit(NETFS_ICTX_USE_PGPRIV2, &ctx->flags))
- /* Filesystem uses deprecated PG_private_2 marking. */
- __set_bit(NETFS_RREQ_USE_PGPRIV2, &rreq->flags);
- }
if (file && file->f_flags & O_NONBLOCK)
__set_bit(NETFS_RREQ_NONBLOCK, &rreq->flags);
if (rreq->netfs_ops->init_request) {
diff --git a/fs/netfs/write_issue.c b/fs/netfs/write_issue.c
index 9258d30cffe3..3f7e37e50c7d 100644
--- a/fs/netfs/write_issue.c
+++ b/fs/netfs/write_issue.c
@@ -94,6 +94,8 @@ struct netfs_io_request *netfs_create_write_req(struct address_space *mapping,
{
struct netfs_io_request *wreq;
struct netfs_inode *ictx;
+ bool is_buffered = (origin == NETFS_WRITEBACK ||
+ origin == NETFS_WRITETHROUGH);
wreq = netfs_alloc_request(mapping, file, start, 0, origin);
if (IS_ERR(wreq))
@@ -102,7 +104,7 @@ struct netfs_io_request *netfs_create_write_req(struct address_space *mapping,
_enter("R=%x", wreq->debug_id);
ictx = netfs_inode(wreq->inode);
- if (test_bit(NETFS_RREQ_WRITE_TO_CACHE, &wreq->flags))
+ if (is_buffered && netfs_is_cache_enabled(ictx))
fscache_begin_write_operation(&wreq->cache_resources, netfs_i_cookie(ictx));
wreq->contiguity = wreq->start;
diff --git a/fs/nfs/fscache.c b/fs/nfs/fscache.c
index 7202ce84d0eb..7a558dea75c4 100644
--- a/fs/nfs/fscache.c
+++ b/fs/nfs/fscache.c
@@ -265,6 +265,8 @@ static int nfs_netfs_init_request(struct netfs_io_request *rreq, struct file *fi
{
rreq->netfs_priv = get_nfs_open_context(nfs_file_open_context(file));
rreq->debug_id = atomic_inc_return(&nfs_netfs_debug_id);
+ /* [DEPRECATED] Use PG_private_2 to mark folio being written to the cache. */
+ __set_bit(NETFS_RREQ_USE_PGPRIV2, &rreq->flags);
return 0;
}
@@ -361,7 +363,8 @@ void nfs_netfs_read_completion(struct nfs_pgio_header *hdr)
return;
sreq = netfs->sreq;
- if (test_bit(NFS_IOHDR_EOF, &hdr->flags))
+ if (test_bit(NFS_IOHDR_EOF, &hdr->flags) &&
+ sreq->rreq->origin != NETFS_DIO_READ)
__set_bit(NETFS_SREQ_CLEAR_TAIL, &sreq->flags);
if (hdr->error)
diff --git a/fs/nfs/fscache.h b/fs/nfs/fscache.h
index fbed0027996f..e8adae1bc260 100644
--- a/fs/nfs/fscache.h
+++ b/fs/nfs/fscache.h
@@ -81,8 +81,6 @@ static inline void nfs_netfs_put(struct nfs_netfs_io_data *netfs)
static inline void nfs_netfs_inode_init(struct nfs_inode *nfsi)
{
netfs_inode_init(&nfsi->netfs, &nfs_netfs_ops, false);
- /* [DEPRECATED] Use PG_private_2 to mark folio being written to the cache. */
- __set_bit(NETFS_ICTX_USE_PGPRIV2, &nfsi->netfs.flags);
}
extern void nfs_netfs_initiate_read(struct nfs_pgio_header *hdr);
extern void nfs_netfs_read_completion(struct nfs_pgio_header *hdr);
diff --git a/fs/nfsd/nfsctl.c b/fs/nfsd/nfsctl.c
index 9e0ea6fc2aa3..34eb2c2cbcde 100644
--- a/fs/nfsd/nfsctl.c
+++ b/fs/nfsd/nfsctl.c
@@ -2069,8 +2069,7 @@ int nfsd_nl_listener_set_doit(struct sk_buff *skb, struct genl_info *info)
continue;
}
- ret = svc_xprt_create_from_sa(serv, xcl_name, net, sa,
- SVC_SOCK_ANONYMOUS,
+ ret = svc_xprt_create_from_sa(serv, xcl_name, net, sa, 0,
get_current_cred());
/* always save the latest error */
if (ret < 0)
diff --git a/fs/smb/client/cifs_debug.c b/fs/smb/client/cifs_debug.c
index c71ae5c04306..4a20e92474b2 100644
--- a/fs/smb/client/cifs_debug.c
+++ b/fs/smb/client/cifs_debug.c
@@ -1072,7 +1072,7 @@ static int cifs_security_flags_proc_open(struct inode *inode, struct file *file)
static void
cifs_security_flags_handle_must_flags(unsigned int *flags)
{
- unsigned int signflags = *flags & CIFSSEC_MUST_SIGN;
+ unsigned int signflags = *flags & (CIFSSEC_MUST_SIGN | CIFSSEC_MUST_SEAL);
if ((*flags & CIFSSEC_MUST_KRB5) == CIFSSEC_MUST_KRB5)
*flags = CIFSSEC_MUST_KRB5;
diff --git a/fs/smb/client/cifsfs.h b/fs/smb/client/cifsfs.h
index 62d5fee3e5eb..ca2bd204bcc5 100644
--- a/fs/smb/client/cifsfs.h
+++ b/fs/smb/client/cifsfs.h
@@ -147,6 +147,6 @@ extern const struct export_operations cifs_export_ops;
#endif /* CONFIG_CIFS_NFSD_EXPORT */
/* when changing internal version - update following two lines at same time */
-#define SMB3_PRODUCT_BUILD 49
-#define CIFS_VERSION "2.49"
+#define SMB3_PRODUCT_BUILD 50
+#define CIFS_VERSION "2.50"
#endif /* _CIFSFS_H */
diff --git a/fs/smb/client/cifsglob.h b/fs/smb/client/cifsglob.h
index 8e86fec7dcd2..5c9b3e6cd95f 100644
--- a/fs/smb/client/cifsglob.h
+++ b/fs/smb/client/cifsglob.h
@@ -345,7 +345,7 @@ struct smb_version_operations {
/* connect to a server share */
int (*tree_connect)(const unsigned int, struct cifs_ses *, const char *,
struct cifs_tcon *, const struct nls_table *);
- /* close tree connecion */
+ /* close tree connection */
int (*tree_disconnect)(const unsigned int, struct cifs_tcon *);
/* get DFS referrals */
int (*get_dfs_refer)(const unsigned int, struct cifs_ses *,
@@ -816,7 +816,7 @@ struct TCP_Server_Info {
* Protected by @refpath_lock and @srv_lock. The @refpath_lock is
* mostly used for not requiring a copy of @leaf_fullpath when getting
* cached or new DFS referrals (which might also sleep during I/O).
- * While @srv_lock is held for making string and NULL comparions against
+ * While @srv_lock is held for making string and NULL comparisons against
* both fields as in mount(2) and cache refresh.
*
* format: \\HOST\SHARE[\OPTIONAL PATH]
@@ -1471,29 +1471,6 @@ struct cifs_io_parms {
struct TCP_Server_Info *server;
};
-struct cifs_aio_ctx {
- struct kref refcount;
- struct list_head list;
- struct mutex aio_mutex;
- struct completion done;
- struct iov_iter iter;
- struct kiocb *iocb;
- struct cifsFileInfo *cfile;
- struct bio_vec *bv;
- loff_t pos;
- unsigned int nr_pinned_pages;
- ssize_t rc;
- unsigned int len;
- unsigned int total_len;
- unsigned int bv_need_unpin; /* If ->bv[] needs unpinning */
- bool should_dirty;
- /*
- * Indicates if this aio_ctx is for direct_io,
- * If yes, iter is a copy of the user passed iov_iter
- */
- bool direct_io;
-};
-
struct cifs_io_request {
struct netfs_io_request rreq;
struct cifsFileInfo *cfile;
@@ -1904,7 +1881,7 @@ static inline bool is_replayable_error(int error)
#define CIFSSEC_MAY_SIGN 0x00001
#define CIFSSEC_MAY_NTLMV2 0x00004
#define CIFSSEC_MAY_KRB5 0x00008
-#define CIFSSEC_MAY_SEAL 0x00040 /* not supported yet */
+#define CIFSSEC_MAY_SEAL 0x00040
#define CIFSSEC_MAY_NTLMSSP 0x00080 /* raw ntlmssp with ntlmv2 */
#define CIFSSEC_MUST_SIGN 0x01001
@@ -1914,11 +1891,11 @@ require use of the stronger protocol */
#define CIFSSEC_MUST_NTLMV2 0x04004
#define CIFSSEC_MUST_KRB5 0x08008
#ifdef CONFIG_CIFS_UPCALL
-#define CIFSSEC_MASK 0x8F08F /* flags supported if no weak allowed */
+#define CIFSSEC_MASK 0xCF0CF /* flags supported if no weak allowed */
#else
-#define CIFSSEC_MASK 0x87087 /* flags supported if no weak allowed */
+#define CIFSSEC_MASK 0xC70C7 /* flags supported if no weak allowed */
#endif /* UPCALL */
-#define CIFSSEC_MUST_SEAL 0x40040 /* not supported yet */
+#define CIFSSEC_MUST_SEAL 0x40040
#define CIFSSEC_MUST_NTLMSSP 0x80080 /* raw ntlmssp with ntlmv2 */
#define CIFSSEC_DEF (CIFSSEC_MAY_SIGN | CIFSSEC_MAY_NTLMV2 | CIFSSEC_MAY_NTLMSSP | CIFSSEC_MAY_SEAL)
@@ -2010,7 +1987,6 @@ require use of the stronger protocol */
* cifsFileInfo->file_info_lock cifsFileInfo->count cifs_new_fileinfo
* ->invalidHandle initiate_cifs_search
* ->oplock_break_cancelled
- * cifs_aio_ctx->aio_mutex cifs_aio_ctx cifs_aio_ctx_alloc
****************************************************************************/
#ifdef DECLARE_GLOBALS_HERE
diff --git a/fs/smb/client/cifsproto.h b/fs/smb/client/cifsproto.h
index c15bb5ee7eb7..497bf3c447bc 100644
--- a/fs/smb/client/cifsproto.h
+++ b/fs/smb/client/cifsproto.h
@@ -619,8 +619,6 @@ int __cifs_calc_signature(struct smb_rqst *rqst,
struct shash_desc *shash);
enum securityEnum cifs_select_sectype(struct TCP_Server_Info *,
enum securityEnum);
-struct cifs_aio_ctx *cifs_aio_ctx_alloc(void);
-void cifs_aio_ctx_release(struct kref *refcount);
int cifs_alloc_hash(const char *name, struct shash_desc **sdesc);
void cifs_free_hash(struct shash_desc **sdesc);
diff --git a/fs/smb/client/file.c b/fs/smb/client/file.c
index b2405dd4d4d4..1fc66bcf49eb 100644
--- a/fs/smb/client/file.c
+++ b/fs/smb/client/file.c
@@ -217,7 +217,8 @@ static void cifs_req_issue_read(struct netfs_io_subrequest *subreq)
goto out;
}
- __set_bit(NETFS_SREQ_CLEAR_TAIL, &subreq->flags);
+ if (subreq->rreq->origin != NETFS_DIO_READ)
+ __set_bit(NETFS_SREQ_CLEAR_TAIL, &subreq->flags);
rc = rdata->server->ops->async_readv(rdata);
out:
@@ -315,7 +316,7 @@ static void cifs_free_subrequest(struct netfs_io_subrequest *subreq)
#endif
}
- if (rdata->credits.value != 0)
+ if (rdata->credits.value != 0) {
trace_smb3_rw_credits(rdata->rreq->debug_id,
rdata->subreq.debug_index,
rdata->credits.value,
@@ -323,8 +324,12 @@ static void cifs_free_subrequest(struct netfs_io_subrequest *subreq)
rdata->server ? rdata->server->in_flight : 0,
-rdata->credits.value,
cifs_trace_rw_credits_free_subreq);
+ if (rdata->server)
+ add_credits_and_wake_if(rdata->server, &rdata->credits, 0);
+ else
+ rdata->credits.value = 0;
+ }
- add_credits_and_wake_if(rdata->server, &rdata->credits, 0);
if (rdata->have_xid)
free_xid(rdata->xid);
}
@@ -2749,6 +2754,7 @@ cifs_writev(struct kiocb *iocb, struct iov_iter *from)
struct inode *inode = file->f_mapping->host;
struct cifsInodeInfo *cinode = CIFS_I(inode);
struct TCP_Server_Info *server = tlink_tcon(cfile->tlink)->ses->server;
+ struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb);
ssize_t rc;
rc = netfs_start_io_write(inode);
@@ -2765,12 +2771,16 @@ cifs_writev(struct kiocb *iocb, struct iov_iter *from)
if (rc <= 0)
goto out;
- if (!cifs_find_lock_conflict(cfile, iocb->ki_pos, iov_iter_count(from),
+ if ((cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOPOSIXBRL) &&
+ (cifs_find_lock_conflict(cfile, iocb->ki_pos, iov_iter_count(from),
server->vals->exclusive_lock_type, 0,
- NULL, CIFS_WRITE_OP))
- rc = netfs_buffered_write_iter_locked(iocb, from, NULL);
- else
+ NULL, CIFS_WRITE_OP))) {
rc = -EACCES;
+ goto out;
+ }
+
+ rc = netfs_buffered_write_iter_locked(iocb, from, NULL);
+
out:
up_read(&cinode->lock_sem);
netfs_end_io_write(inode);
diff --git a/fs/smb/client/inode.c b/fs/smb/client/inode.c
index 4a8aa1de9522..dd0afa23734c 100644
--- a/fs/smb/client/inode.c
+++ b/fs/smb/client/inode.c
@@ -1042,13 +1042,26 @@ static int reparse_info_to_fattr(struct cifs_open_info_data *data,
}
rc = -EOPNOTSUPP;
- switch ((data->reparse.tag = tag)) {
- case 0: /* SMB1 symlink */
+ data->reparse.tag = tag;
+ if (!data->reparse.tag) {
if (server->ops->query_symlink) {
rc = server->ops->query_symlink(xid, tcon,
cifs_sb, full_path,
&data->symlink_target);
}
+ if (rc == -EOPNOTSUPP)
+ data->reparse.tag = IO_REPARSE_TAG_INTERNAL;
+ }
+
+ switch (data->reparse.tag) {
+ case 0: /* SMB1 symlink */
+ break;
+ case IO_REPARSE_TAG_INTERNAL:
+ rc = 0;
+ if (le32_to_cpu(data->fi.Attributes) & ATTR_DIRECTORY) {
+ cifs_create_junction_fattr(fattr, sb);
+ goto out;
+ }
break;
case IO_REPARSE_TAG_MOUNT_POINT:
cifs_create_junction_fattr(fattr, sb);
diff --git a/fs/smb/client/ioctl.c b/fs/smb/client/ioctl.c
index 855ac5a62edf..44dbaf9929a4 100644
--- a/fs/smb/client/ioctl.c
+++ b/fs/smb/client/ioctl.c
@@ -170,7 +170,10 @@ static long smb_mnt_get_fsinfo(unsigned int xid, struct cifs_tcon *tcon,
static int cifs_shutdown(struct super_block *sb, unsigned long arg)
{
struct cifs_sb_info *sbi = CIFS_SB(sb);
+ struct tcon_link *tlink;
+ struct cifs_tcon *tcon;
__u32 flags;
+ int rc;
if (!capable(CAP_SYS_ADMIN))
return -EPERM;
@@ -178,14 +181,21 @@ static int cifs_shutdown(struct super_block *sb, unsigned long arg)
if (get_user(flags, (__u32 __user *)arg))
return -EFAULT;
- if (flags > CIFS_GOING_FLAGS_NOLOGFLUSH)
- return -EINVAL;
+ tlink = cifs_sb_tlink(sbi);
+ if (IS_ERR(tlink))
+ return PTR_ERR(tlink);
+ tcon = tlink_tcon(tlink);
+
+ trace_smb3_shutdown_enter(flags, tcon->tid);
+ if (flags > CIFS_GOING_FLAGS_NOLOGFLUSH) {
+ rc = -EINVAL;
+ goto shutdown_out_err;
+ }
if (cifs_forced_shutdown(sbi))
- return 0;
+ goto shutdown_good;
cifs_dbg(VFS, "shut down requested (%d)", flags);
-/* trace_cifs_shutdown(sb, flags);*/
/*
* see:
@@ -201,7 +211,8 @@ static int cifs_shutdown(struct super_block *sb, unsigned long arg)
*/
case CIFS_GOING_FLAGS_DEFAULT:
cifs_dbg(FYI, "shutdown with default flag not supported\n");
- return -EINVAL;
+ rc = -EINVAL;
+ goto shutdown_out_err;
/*
* FLAGS_LOGFLUSH is easy since it asks to write out metadata (not
* data) but metadata writes are not cached on the client, so can treat
@@ -210,11 +221,18 @@ static int cifs_shutdown(struct super_block *sb, unsigned long arg)
case CIFS_GOING_FLAGS_LOGFLUSH:
case CIFS_GOING_FLAGS_NOLOGFLUSH:
sbi->mnt_cifs_flags |= CIFS_MOUNT_SHUTDOWN;
- return 0;
+ goto shutdown_good;
default:
- return -EINVAL;
+ rc = -EINVAL;
+ goto shutdown_out_err;
}
+
+shutdown_good:
+ trace_smb3_shutdown_done(flags, tcon->tid);
return 0;
+shutdown_out_err:
+ trace_smb3_shutdown_err(rc, flags, tcon->tid);
+ return rc;
}
static int cifs_dump_full_key(struct cifs_tcon *tcon, struct smb3_full_key_debug_info __user *in)
diff --git a/fs/smb/client/misc.c b/fs/smb/client/misc.c
index 07c468ddb88a..c6f11e6f9eb9 100644
--- a/fs/smb/client/misc.c
+++ b/fs/smb/client/misc.c
@@ -352,7 +352,7 @@ checkSMB(char *buf, unsigned int total_read, struct TCP_Server_Info *server)
* on simple responses (wct, bcc both zero)
* in particular have seen this on
* ulogoffX and FindClose. This leaves
- * one byte of bcc potentially unitialized
+ * one byte of bcc potentially uninitialized
*/
/* zero rest of bcc */
tmp[sizeof(struct smb_hdr)+1] = 0;
@@ -995,60 +995,6 @@ parse_DFS_referrals_exit:
return rc;
}
-struct cifs_aio_ctx *
-cifs_aio_ctx_alloc(void)
-{
- struct cifs_aio_ctx *ctx;
-
- /*
- * Must use kzalloc to initialize ctx->bv to NULL and ctx->direct_io
- * to false so that we know when we have to unreference pages within
- * cifs_aio_ctx_release()
- */
- ctx = kzalloc(sizeof(struct cifs_aio_ctx), GFP_KERNEL);
- if (!ctx)
- return NULL;
-
- INIT_LIST_HEAD(&ctx->list);
- mutex_init(&ctx->aio_mutex);
- init_completion(&ctx->done);
- kref_init(&ctx->refcount);
- return ctx;
-}
-
-void
-cifs_aio_ctx_release(struct kref *refcount)
-{
- struct cifs_aio_ctx *ctx = container_of(refcount,
- struct cifs_aio_ctx, refcount);
-
- cifsFileInfo_put(ctx->cfile);
-
- /*
- * ctx->bv is only set if setup_aio_ctx_iter() was call successfuly
- * which means that iov_iter_extract_pages() was a success and thus
- * that we may have references or pins on pages that we need to
- * release.
- */
- if (ctx->bv) {
- if (ctx->should_dirty || ctx->bv_need_unpin) {
- unsigned int i;
-
- for (i = 0; i < ctx->nr_pinned_pages; i++) {
- struct page *page = ctx->bv[i].bv_page;
-
- if (ctx->should_dirty)
- set_page_dirty(page);
- if (ctx->bv_need_unpin)
- unpin_user_page(page);
- }
- }
- kvfree(ctx->bv);
- }
-
- kfree(ctx);
-}
-
/**
* cifs_alloc_hash - allocate hash and hash context together
* @name: The name of the crypto hash algo
@@ -1288,6 +1234,7 @@ int cifs_inval_name_dfs_link_error(const unsigned int xid,
const char *full_path,
bool *islink)
{
+ struct TCP_Server_Info *server = tcon->ses->server;
struct cifs_ses *ses = tcon->ses;
size_t len;
char *path;
@@ -1304,12 +1251,12 @@ int cifs_inval_name_dfs_link_error(const unsigned int xid,
!is_tcon_dfs(tcon))
return 0;
- spin_lock(&tcon->tc_lock);
- if (!tcon->origin_fullpath) {
- spin_unlock(&tcon->tc_lock);
+ spin_lock(&server->srv_lock);
+ if (!server->leaf_fullpath) {
+ spin_unlock(&server->srv_lock);
return 0;
}
- spin_unlock(&tcon->tc_lock);
+ spin_unlock(&server->srv_lock);
/*
* Slow path - tcon is DFS and @full_path has prefix path, so attempt
diff --git a/fs/smb/client/reparse.c b/fs/smb/client/reparse.c
index a0ffbda90733..689d8a506d45 100644
--- a/fs/smb/client/reparse.c
+++ b/fs/smb/client/reparse.c
@@ -505,6 +505,10 @@ bool cifs_reparse_point_to_fattr(struct cifs_sb_info *cifs_sb,
}
switch (tag) {
+ case IO_REPARSE_TAG_INTERNAL:
+ if (!(fattr->cf_cifsattrs & ATTR_DIRECTORY))
+ return false;
+ fallthrough;
case IO_REPARSE_TAG_DFS:
case IO_REPARSE_TAG_DFSR:
case IO_REPARSE_TAG_MOUNT_POINT:
diff --git a/fs/smb/client/reparse.h b/fs/smb/client/reparse.h
index 6b55d1df9e2f..2c0644bc4e65 100644
--- a/fs/smb/client/reparse.h
+++ b/fs/smb/client/reparse.h
@@ -12,6 +12,12 @@
#include "fs_context.h"
#include "cifsglob.h"
+/*
+ * Used only by cifs.ko to ignore reparse points from files when client or
+ * server doesn't support FSCTL_GET_REPARSE_POINT.
+ */
+#define IO_REPARSE_TAG_INTERNAL ((__u32)~0U)
+
static inline dev_t reparse_nfs_mkdev(struct reparse_posix_data *buf)
{
u64 v = le64_to_cpu(*(__le64 *)buf->DataBuffer);
@@ -78,10 +84,19 @@ static inline u32 reparse_mode_wsl_tag(mode_t mode)
static inline bool reparse_inode_match(struct inode *inode,
struct cifs_fattr *fattr)
{
+ struct cifsInodeInfo *cinode = CIFS_I(inode);
struct timespec64 ctime = inode_get_ctime(inode);
- return (CIFS_I(inode)->cifsAttrs & ATTR_REPARSE) &&
- CIFS_I(inode)->reparse_tag == fattr->cf_cifstag &&
+ /*
+ * Do not match reparse tags when client or server doesn't support
+ * FSCTL_GET_REPARSE_POINT. @fattr->cf_cifstag should contain correct
+ * reparse tag from query dir response but the client won't be able to
+ * read the reparse point data anyway. This spares us a revalidation.
+ */
+ if (cinode->reparse_tag != IO_REPARSE_TAG_INTERNAL &&
+ cinode->reparse_tag != fattr->cf_cifstag)
+ return false;
+ return (cinode->cifsAttrs & ATTR_REPARSE) &&
timespec64_equal(&ctime, &fattr->cf_ctime);
}
diff --git a/fs/smb/client/smb2inode.c b/fs/smb/client/smb2inode.c
index 5c02a12251c8..9f5bc41433c1 100644
--- a/fs/smb/client/smb2inode.c
+++ b/fs/smb/client/smb2inode.c
@@ -930,6 +930,8 @@ int smb2_query_path_info(const unsigned int xid,
switch (rc) {
case 0:
+ rc = parse_create_response(data, cifs_sb, &out_iov[0]);
+ break;
case -EOPNOTSUPP:
/*
* BB TODO: When support for special files added to Samba
@@ -948,7 +950,8 @@ int smb2_query_path_info(const unsigned int xid,
cmds[num_cmds++] = SMB2_OP_GET_REPARSE;
oparms = CIFS_OPARMS(cifs_sb, tcon, full_path,
- FILE_READ_ATTRIBUTES | FILE_READ_EA,
+ FILE_READ_ATTRIBUTES |
+ FILE_READ_EA | SYNCHRONIZE,
FILE_OPEN, create_options |
OPEN_REPARSE_POINT, ACL_NO_MODE);
cifs_get_readable_path(tcon, full_path, &cfile);
@@ -1256,7 +1259,8 @@ int smb2_query_reparse_point(const unsigned int xid,
cifs_dbg(FYI, "%s: path: %s\n", __func__, full_path);
cifs_get_readable_path(tcon, full_path, &cfile);
- oparms = CIFS_OPARMS(cifs_sb, tcon, full_path, FILE_READ_ATTRIBUTES,
+ oparms = CIFS_OPARMS(cifs_sb, tcon, full_path,
+ FILE_READ_ATTRIBUTES | FILE_READ_EA | SYNCHRONIZE,
FILE_OPEN, OPEN_REPARSE_POINT, ACL_NO_MODE);
rc = smb2_compound_op(xid, tcon, cifs_sb,
full_path, &oparms, &in_iov,
diff --git a/fs/smb/client/smb2pdu.c b/fs/smb/client/smb2pdu.c
index 9a06b5594669..83facb54276a 100644
--- a/fs/smb/client/smb2pdu.c
+++ b/fs/smb/client/smb2pdu.c
@@ -82,6 +82,9 @@ int smb3_encryption_required(const struct cifs_tcon *tcon)
if (tcon->seal &&
(tcon->ses->server->capabilities & SMB2_GLOBAL_CAP_ENCRYPTION))
return 1;
+ if (((global_secflags & CIFSSEC_MUST_SEAL) == CIFSSEC_MUST_SEAL) &&
+ (tcon->ses->server->capabilities & SMB2_GLOBAL_CAP_ENCRYPTION))
+ return 1;
return 0;
}
diff --git a/fs/smb/client/smbdirect.c b/fs/smb/client/smbdirect.c
index d74e829de51c..7bcc379014ca 100644
--- a/fs/smb/client/smbdirect.c
+++ b/fs/smb/client/smbdirect.c
@@ -406,7 +406,7 @@ static void smbd_post_send_credits(struct work_struct *work)
else
response = get_empty_queue_buffer(info);
if (!response) {
- /* now switch to emtpy packet queue */
+ /* now switch to empty packet queue */
if (use_receive_queue) {
use_receive_queue = 0;
continue;
@@ -618,7 +618,7 @@ out:
/*
* Test if FRWR (Fast Registration Work Requests) is supported on the device
- * This implementation requries FRWR on RDMA read/write
+ * This implementation requires FRWR on RDMA read/write
* return value: true if it is supported
*/
static bool frwr_is_supported(struct ib_device_attr *attrs)
@@ -2177,7 +2177,7 @@ cleanup_entries:
* MR available in the list. It may access the list while the
* smbd_mr_recovery_work is recovering the MR list. This doesn't need a lock
* as they never modify the same places. However, there may be several CPUs
- * issueing I/O trying to get MR at the same time, mr_list_lock is used to
+ * issuing I/O trying to get MR at the same time, mr_list_lock is used to
* protect this situation.
*/
static struct smbd_mr *get_mr(struct smbd_connection *info)
@@ -2311,7 +2311,7 @@ struct smbd_mr *smbd_register_mr(struct smbd_connection *info,
/*
* There is no need for waiting for complemtion on ib_post_send
* on IB_WR_REG_MR. Hardware enforces a barrier and order of execution
- * on the next ib_post_send when we actaully send I/O to remote peer
+ * on the next ib_post_send when we actually send I/O to remote peer
*/
rc = ib_post_send(info->id->qp, &reg_wr->wr, NULL);
if (!rc)
diff --git a/fs/smb/client/trace.h b/fs/smb/client/trace.h
index 6b3bdfb97211..0f0c10c7ada7 100644
--- a/fs/smb/client/trace.h
+++ b/fs/smb/client/trace.h
@@ -1388,7 +1388,7 @@ DECLARE_EVENT_CLASS(smb3_ioctl_class,
__entry->command = command;
),
TP_printk("xid=%u fid=0x%llx ioctl cmd=0x%x",
- __entry->xid, __entry->fid, __entry->command)
+ __entry->xid, __entry->fid, __entry->command)
)
#define DEFINE_SMB3_IOCTL_EVENT(name) \
@@ -1400,9 +1400,58 @@ DEFINE_EVENT(smb3_ioctl_class, smb3_##name, \
DEFINE_SMB3_IOCTL_EVENT(ioctl);
+DECLARE_EVENT_CLASS(smb3_shutdown_class,
+ TP_PROTO(__u32 flags,
+ __u32 tid),
+ TP_ARGS(flags, tid),
+ TP_STRUCT__entry(
+ __field(__u32, flags)
+ __field(__u32, tid)
+ ),
+ TP_fast_assign(
+ __entry->flags = flags;
+ __entry->tid = tid;
+ ),
+ TP_printk("flags=0x%x tid=0x%x",
+ __entry->flags, __entry->tid)
+)
+
+#define DEFINE_SMB3_SHUTDOWN_EVENT(name) \
+DEFINE_EVENT(smb3_shutdown_class, smb3_##name, \
+ TP_PROTO(__u32 flags, \
+ __u32 tid), \
+ TP_ARGS(flags, tid))
+
+DEFINE_SMB3_SHUTDOWN_EVENT(shutdown_enter);
+DEFINE_SMB3_SHUTDOWN_EVENT(shutdown_done);
+DECLARE_EVENT_CLASS(smb3_shutdown_err_class,
+ TP_PROTO(int rc,
+ __u32 flags,
+ __u32 tid),
+ TP_ARGS(rc, flags, tid),
+ TP_STRUCT__entry(
+ __field(int, rc)
+ __field(__u32, flags)
+ __field(__u32, tid)
+ ),
+ TP_fast_assign(
+ __entry->rc = rc;
+ __entry->flags = flags;
+ __entry->tid = tid;
+ ),
+ TP_printk("rc=%d flags=0x%x tid=0x%x",
+ __entry->rc, __entry->flags, __entry->tid)
+)
+#define DEFINE_SMB3_SHUTDOWN_ERR_EVENT(name) \
+DEFINE_EVENT(smb3_shutdown_err_class, smb3_##name, \
+ TP_PROTO(int rc, \
+ __u32 flags, \
+ __u32 tid), \
+ TP_ARGS(rc, flags, tid))
+DEFINE_SMB3_SHUTDOWN_ERR_EVENT(shutdown_err);
DECLARE_EVENT_CLASS(smb3_credit_class,
TP_PROTO(__u64 currmid,
diff --git a/fs/smb/client/transport.c b/fs/smb/client/transport.c
index adfe0d058701..6e68aaf5bd20 100644
--- a/fs/smb/client/transport.c
+++ b/fs/smb/client/transport.c
@@ -1289,7 +1289,7 @@ compound_send_recv(const unsigned int xid, struct cifs_ses *ses,
out:
/*
* This will dequeue all mids. After this it is important that the
- * demultiplex_thread will not process any of these mids any futher.
+ * demultiplex_thread will not process any of these mids any further.
* This is prevented above by using a noop callback that will not
* wake this thread except for the very last PDU.
*/
diff --git a/fs/smb/common/smb2pdu.h b/fs/smb/common/smb2pdu.h
index c3ee42188d25..c769f9dbc0b4 100644
--- a/fs/smb/common/smb2pdu.h
+++ b/fs/smb/common/smb2pdu.h
@@ -1216,6 +1216,8 @@ struct create_context {
);
__u8 Buffer[];
} __packed;
+static_assert(offsetof(struct create_context, Buffer) == sizeof(struct create_context_hdr),
+ "struct member likely outside of __struct_group()");
struct smb2_create_req {
struct smb2_hdr hdr;
diff --git a/fs/smb/server/mgmt/share_config.c b/fs/smb/server/mgmt/share_config.c
index e0a6b758094f..d8d03070ae44 100644
--- a/fs/smb/server/mgmt/share_config.c
+++ b/fs/smb/server/mgmt/share_config.c
@@ -15,6 +15,7 @@
#include "share_config.h"
#include "user_config.h"
#include "user_session.h"
+#include "../connection.h"
#include "../transport_ipc.h"
#include "../misc.h"
@@ -120,12 +121,13 @@ static int parse_veto_list(struct ksmbd_share_config *share,
return 0;
}
-static struct ksmbd_share_config *share_config_request(struct unicode_map *um,
+static struct ksmbd_share_config *share_config_request(struct ksmbd_work *work,
const char *name)
{
struct ksmbd_share_config_response *resp;
struct ksmbd_share_config *share = NULL;
struct ksmbd_share_config *lookup;
+ struct unicode_map *um = work->conn->um;
int ret;
resp = ksmbd_ipc_share_config_request(name);
@@ -181,7 +183,14 @@ static struct ksmbd_share_config *share_config_request(struct unicode_map *um,
KSMBD_SHARE_CONFIG_VETO_LIST(resp),
resp->veto_list_sz);
if (!ret && share->path) {
+ if (__ksmbd_override_fsids(work, share)) {
+ kill_share(share);
+ share = NULL;
+ goto out;
+ }
+
ret = kern_path(share->path, 0, &share->vfs_path);
+ ksmbd_revert_fsids(work);
if (ret) {
ksmbd_debug(SMB, "failed to access '%s'\n",
share->path);
@@ -214,7 +223,7 @@ out:
return share;
}
-struct ksmbd_share_config *ksmbd_share_config_get(struct unicode_map *um,
+struct ksmbd_share_config *ksmbd_share_config_get(struct ksmbd_work *work,
const char *name)
{
struct ksmbd_share_config *share;
@@ -227,7 +236,7 @@ struct ksmbd_share_config *ksmbd_share_config_get(struct unicode_map *um,
if (share)
return share;
- return share_config_request(um, name);
+ return share_config_request(work, name);
}
bool ksmbd_share_veto_filename(struct ksmbd_share_config *share,
diff --git a/fs/smb/server/mgmt/share_config.h b/fs/smb/server/mgmt/share_config.h
index 5f591751b923..d4ac2dd4de20 100644
--- a/fs/smb/server/mgmt/share_config.h
+++ b/fs/smb/server/mgmt/share_config.h
@@ -11,6 +11,8 @@
#include <linux/path.h>
#include <linux/unicode.h>
+struct ksmbd_work;
+
struct ksmbd_share_config {
char *name;
char *path;
@@ -68,7 +70,7 @@ static inline void ksmbd_share_config_put(struct ksmbd_share_config *share)
__ksmbd_share_config_put(share);
}
-struct ksmbd_share_config *ksmbd_share_config_get(struct unicode_map *um,
+struct ksmbd_share_config *ksmbd_share_config_get(struct ksmbd_work *work,
const char *name);
bool ksmbd_share_veto_filename(struct ksmbd_share_config *share,
const char *filename);
diff --git a/fs/smb/server/mgmt/tree_connect.c b/fs/smb/server/mgmt/tree_connect.c
index d2c81a8a11dd..94a52a75014a 100644
--- a/fs/smb/server/mgmt/tree_connect.c
+++ b/fs/smb/server/mgmt/tree_connect.c
@@ -16,17 +16,18 @@
#include "user_session.h"
struct ksmbd_tree_conn_status
-ksmbd_tree_conn_connect(struct ksmbd_conn *conn, struct ksmbd_session *sess,
- const char *share_name)
+ksmbd_tree_conn_connect(struct ksmbd_work *work, const char *share_name)
{
struct ksmbd_tree_conn_status status = {-ENOENT, NULL};
struct ksmbd_tree_connect_response *resp = NULL;
struct ksmbd_share_config *sc;
struct ksmbd_tree_connect *tree_conn = NULL;
struct sockaddr *peer_addr;
+ struct ksmbd_conn *conn = work->conn;
+ struct ksmbd_session *sess = work->sess;
int ret;
- sc = ksmbd_share_config_get(conn->um, share_name);
+ sc = ksmbd_share_config_get(work, share_name);
if (!sc)
return status;
@@ -61,7 +62,7 @@ ksmbd_tree_conn_connect(struct ksmbd_conn *conn, struct ksmbd_session *sess,
struct ksmbd_share_config *new_sc;
ksmbd_share_config_del(sc);
- new_sc = ksmbd_share_config_get(conn->um, share_name);
+ new_sc = ksmbd_share_config_get(work, share_name);
if (!new_sc) {
pr_err("Failed to update stale share config\n");
status.ret = -ESTALE;
diff --git a/fs/smb/server/mgmt/tree_connect.h b/fs/smb/server/mgmt/tree_connect.h
index 6377a70b811c..a42cdd051041 100644
--- a/fs/smb/server/mgmt/tree_connect.h
+++ b/fs/smb/server/mgmt/tree_connect.h
@@ -13,6 +13,7 @@
struct ksmbd_share_config;
struct ksmbd_user;
struct ksmbd_conn;
+struct ksmbd_work;
enum {
TREE_NEW = 0,
@@ -50,8 +51,7 @@ static inline int test_tree_conn_flag(struct ksmbd_tree_connect *tree_conn,
struct ksmbd_session;
struct ksmbd_tree_conn_status
-ksmbd_tree_conn_connect(struct ksmbd_conn *conn, struct ksmbd_session *sess,
- const char *share_name);
+ksmbd_tree_conn_connect(struct ksmbd_work *work, const char *share_name);
void ksmbd_tree_connect_put(struct ksmbd_tree_connect *tcon);
int ksmbd_tree_conn_disconnect(struct ksmbd_session *sess,
diff --git a/fs/smb/server/smb2pdu.c b/fs/smb/server/smb2pdu.c
index 37a39ab4ee65..2df1354288e6 100644
--- a/fs/smb/server/smb2pdu.c
+++ b/fs/smb/server/smb2pdu.c
@@ -1955,7 +1955,7 @@ int smb2_tree_connect(struct ksmbd_work *work)
ksmbd_debug(SMB, "tree connect request for tree %s treename %s\n",
name, treename);
- status = ksmbd_tree_conn_connect(conn, sess, name);
+ status = ksmbd_tree_conn_connect(work, name);
if (status.ret == KSMBD_TREE_CONN_STATUS_OK)
rsp->hdr.Id.SyncId.TreeId = cpu_to_le32(status.tree_conn->id);
else
@@ -5596,6 +5596,11 @@ int smb2_query_info(struct ksmbd_work *work)
ksmbd_debug(SMB, "GOT query info request\n");
+ if (ksmbd_override_fsids(work)) {
+ rc = -ENOMEM;
+ goto err_out;
+ }
+
switch (req->InfoType) {
case SMB2_O_INFO_FILE:
ksmbd_debug(SMB, "GOT SMB2_O_INFO_FILE\n");
@@ -5614,6 +5619,7 @@ int smb2_query_info(struct ksmbd_work *work)
req->InfoType);
rc = -EOPNOTSUPP;
}
+ ksmbd_revert_fsids(work);
if (!rc) {
rsp->StructureSize = cpu_to_le16(9);
@@ -5623,6 +5629,7 @@ int smb2_query_info(struct ksmbd_work *work)
le32_to_cpu(rsp->OutputBufferLength));
}
+err_out:
if (rc < 0) {
if (rc == -EACCES)
rsp->hdr.Status = STATUS_ACCESS_DENIED;
diff --git a/fs/smb/server/smb_common.c b/fs/smb/server/smb_common.c
index 474dadf6b7b8..13818ecb6e1b 100644
--- a/fs/smb/server/smb_common.c
+++ b/fs/smb/server/smb_common.c
@@ -732,10 +732,10 @@ bool is_asterisk(char *p)
return p && p[0] == '*';
}
-int ksmbd_override_fsids(struct ksmbd_work *work)
+int __ksmbd_override_fsids(struct ksmbd_work *work,
+ struct ksmbd_share_config *share)
{
struct ksmbd_session *sess = work->sess;
- struct ksmbd_share_config *share = work->tcon->share_conf;
struct cred *cred;
struct group_info *gi;
unsigned int uid;
@@ -775,6 +775,11 @@ int ksmbd_override_fsids(struct ksmbd_work *work)
return 0;
}
+int ksmbd_override_fsids(struct ksmbd_work *work)
+{
+ return __ksmbd_override_fsids(work, work->tcon->share_conf);
+}
+
void ksmbd_revert_fsids(struct ksmbd_work *work)
{
const struct cred *cred;
diff --git a/fs/smb/server/smb_common.h b/fs/smb/server/smb_common.h
index f1092519c0c2..4a3148b0167f 100644
--- a/fs/smb/server/smb_common.h
+++ b/fs/smb/server/smb_common.h
@@ -447,6 +447,8 @@ int ksmbd_extract_shortname(struct ksmbd_conn *conn,
int ksmbd_smb_negotiate_common(struct ksmbd_work *work, unsigned int command);
int ksmbd_smb_check_shared_mode(struct file *filp, struct ksmbd_file *curr_fp);
+int __ksmbd_override_fsids(struct ksmbd_work *work,
+ struct ksmbd_share_config *share);
int ksmbd_override_fsids(struct ksmbd_work *work);
void ksmbd_revert_fsids(struct ksmbd_work *work);
diff --git a/fs/squashfs/inode.c b/fs/squashfs/inode.c
index 16bd693d0b3a..d5918eba27e3 100644
--- a/fs/squashfs/inode.c
+++ b/fs/squashfs/inode.c
@@ -279,8 +279,13 @@ int squashfs_read_inode(struct inode *inode, long long ino)
if (err < 0)
goto failed_read;
- set_nlink(inode, le32_to_cpu(sqsh_ino->nlink));
inode->i_size = le32_to_cpu(sqsh_ino->symlink_size);
+ if (inode->i_size > PAGE_SIZE) {
+ ERROR("Corrupted symlink\n");
+ return -EINVAL;
+ }
+
+ set_nlink(inode, le32_to_cpu(sqsh_ino->nlink));
inode->i_op = &squashfs_symlink_inode_ops;
inode_nohighmem(inode);
inode->i_data.a_ops = &squashfs_symlink_aops;
diff --git a/fs/tracefs/event_inode.c b/fs/tracefs/event_inode.c
index 5d88c184f0fc..01e99e98457d 100644
--- a/fs/tracefs/event_inode.c
+++ b/fs/tracefs/event_inode.c
@@ -112,7 +112,7 @@ static void release_ei(struct kref *ref)
entry->release(entry->name, ei->data);
}
- call_rcu(&ei->rcu, free_ei_rcu);
+ call_srcu(&eventfs_srcu, &ei->rcu, free_ei_rcu);
}
static inline void put_ei(struct eventfs_inode *ei)
@@ -736,7 +736,7 @@ struct eventfs_inode *eventfs_create_dir(const char *name, struct eventfs_inode
/* Was the parent freed? */
if (list_empty(&ei->list)) {
cleanup_ei(ei);
- ei = NULL;
+ ei = ERR_PTR(-EBUSY);
}
return ei;
}
diff --git a/fs/tracefs/inode.c b/fs/tracefs/inode.c
index 1028ab6d9a74..1748dff58c3b 100644
--- a/fs/tracefs/inode.c
+++ b/fs/tracefs/inode.c
@@ -42,7 +42,7 @@ static struct inode *tracefs_alloc_inode(struct super_block *sb)
struct tracefs_inode *ti;
unsigned long flags;
- ti = kmem_cache_alloc(tracefs_inode_cachep, GFP_KERNEL);
+ ti = alloc_inode_sb(sb, tracefs_inode_cachep, GFP_KERNEL);
if (!ti)
return NULL;
@@ -53,15 +53,14 @@ static struct inode *tracefs_alloc_inode(struct super_block *sb)
return &ti->vfs_inode;
}
-static void tracefs_free_inode_rcu(struct rcu_head *rcu)
+static void tracefs_free_inode(struct inode *inode)
{
- struct tracefs_inode *ti;
+ struct tracefs_inode *ti = get_tracefs(inode);
- ti = container_of(rcu, struct tracefs_inode, rcu);
kmem_cache_free(tracefs_inode_cachep, ti);
}
-static void tracefs_free_inode(struct inode *inode)
+static void tracefs_destroy_inode(struct inode *inode)
{
struct tracefs_inode *ti = get_tracefs(inode);
unsigned long flags;
@@ -69,8 +68,6 @@ static void tracefs_free_inode(struct inode *inode)
spin_lock_irqsave(&tracefs_inode_lock, flags);
list_del_rcu(&ti->list);
spin_unlock_irqrestore(&tracefs_inode_lock, flags);
-
- call_rcu(&ti->rcu, tracefs_free_inode_rcu);
}
static ssize_t default_read_file(struct file *file, char __user *buf,
@@ -437,6 +434,7 @@ static int tracefs_drop_inode(struct inode *inode)
static const struct super_operations tracefs_super_operations = {
.alloc_inode = tracefs_alloc_inode,
.free_inode = tracefs_free_inode,
+ .destroy_inode = tracefs_destroy_inode,
.drop_inode = tracefs_drop_inode,
.statfs = simple_statfs,
.show_options = tracefs_show_options,
diff --git a/fs/tracefs/internal.h b/fs/tracefs/internal.h
index f704d8348357..d83c2a25f288 100644
--- a/fs/tracefs/internal.h
+++ b/fs/tracefs/internal.h
@@ -10,10 +10,7 @@ enum {
};
struct tracefs_inode {
- union {
- struct inode vfs_inode;
- struct rcu_head rcu;
- };
+ struct inode vfs_inode;
/* The below gets initialized with memset_after(ti, 0, vfs_inode) */
struct list_head list;
unsigned long flags;
diff --git a/fs/xfs/libxfs/xfs_quota_defs.h b/fs/xfs/libxfs/xfs_quota_defs.h
index cb035da3f990..fb05f44f6c75 100644
--- a/fs/xfs/libxfs/xfs_quota_defs.h
+++ b/fs/xfs/libxfs/xfs_quota_defs.h
@@ -56,7 +56,7 @@ typedef uint8_t xfs_dqtype_t;
* And, of course, we also need to take into account the dquot log format item
* used to describe each dquot.
*/
-#define XFS_DQUOT_LOGRES(mp) \
+#define XFS_DQUOT_LOGRES \
((sizeof(struct xfs_dq_logformat) + sizeof(struct xfs_disk_dquot)) * 6)
#define XFS_IS_QUOTA_ON(mp) ((mp)->m_qflags & XFS_ALL_QUOTA_ACCT)
diff --git a/fs/xfs/libxfs/xfs_trans_resv.c b/fs/xfs/libxfs/xfs_trans_resv.c
index 3dc8f785bf29..45aaf169806a 100644
--- a/fs/xfs/libxfs/xfs_trans_resv.c
+++ b/fs/xfs/libxfs/xfs_trans_resv.c
@@ -338,11 +338,11 @@ xfs_calc_write_reservation(
blksz);
t1 += adj;
t3 += adj;
- return XFS_DQUOT_LOGRES(mp) + max3(t1, t2, t3);
+ return XFS_DQUOT_LOGRES + max3(t1, t2, t3);
}
t4 = xfs_calc_refcountbt_reservation(mp, 1);
- return XFS_DQUOT_LOGRES(mp) + max(t4, max3(t1, t2, t3));
+ return XFS_DQUOT_LOGRES + max(t4, max3(t1, t2, t3));
}
unsigned int
@@ -410,11 +410,11 @@ xfs_calc_itruncate_reservation(
xfs_refcountbt_block_count(mp, 4),
blksz);
- return XFS_DQUOT_LOGRES(mp) + max3(t1, t2, t3);
+ return XFS_DQUOT_LOGRES + max3(t1, t2, t3);
}
t4 = xfs_calc_refcountbt_reservation(mp, 2);
- return XFS_DQUOT_LOGRES(mp) + max(t4, max3(t1, t2, t3));
+ return XFS_DQUOT_LOGRES + max(t4, max3(t1, t2, t3));
}
unsigned int
@@ -466,7 +466,7 @@ STATIC uint
xfs_calc_rename_reservation(
struct xfs_mount *mp)
{
- unsigned int overhead = XFS_DQUOT_LOGRES(mp);
+ unsigned int overhead = XFS_DQUOT_LOGRES;
struct xfs_trans_resv *resp = M_RES(mp);
unsigned int t1, t2, t3 = 0;
@@ -577,7 +577,7 @@ STATIC uint
xfs_calc_link_reservation(
struct xfs_mount *mp)
{
- unsigned int overhead = XFS_DQUOT_LOGRES(mp);
+ unsigned int overhead = XFS_DQUOT_LOGRES;
struct xfs_trans_resv *resp = M_RES(mp);
unsigned int t1, t2, t3 = 0;
@@ -641,7 +641,7 @@ STATIC uint
xfs_calc_remove_reservation(
struct xfs_mount *mp)
{
- unsigned int overhead = XFS_DQUOT_LOGRES(mp);
+ unsigned int overhead = XFS_DQUOT_LOGRES;
struct xfs_trans_resv *resp = M_RES(mp);
unsigned int t1, t2, t3 = 0;
@@ -729,7 +729,7 @@ xfs_calc_icreate_reservation(
struct xfs_mount *mp)
{
struct xfs_trans_resv *resp = M_RES(mp);
- unsigned int overhead = XFS_DQUOT_LOGRES(mp);
+ unsigned int overhead = XFS_DQUOT_LOGRES;
unsigned int t1, t2, t3 = 0;
t1 = xfs_calc_icreate_resv_alloc(mp);
@@ -747,7 +747,7 @@ STATIC uint
xfs_calc_create_tmpfile_reservation(
struct xfs_mount *mp)
{
- uint res = XFS_DQUOT_LOGRES(mp);
+ uint res = XFS_DQUOT_LOGRES;
res += xfs_calc_icreate_resv_alloc(mp);
return res + xfs_calc_iunlink_add_reservation(mp);
@@ -829,7 +829,7 @@ STATIC uint
xfs_calc_ifree_reservation(
struct xfs_mount *mp)
{
- return XFS_DQUOT_LOGRES(mp) +
+ return XFS_DQUOT_LOGRES +
xfs_calc_inode_res(mp, 1) +
xfs_calc_buf_res(3, mp->m_sb.sb_sectsize) +
xfs_calc_iunlink_remove_reservation(mp) +
@@ -846,7 +846,7 @@ STATIC uint
xfs_calc_ichange_reservation(
struct xfs_mount *mp)
{
- return XFS_DQUOT_LOGRES(mp) +
+ return XFS_DQUOT_LOGRES +
xfs_calc_inode_res(mp, 1) +
xfs_calc_buf_res(1, mp->m_sb.sb_sectsize);
@@ -955,7 +955,7 @@ STATIC uint
xfs_calc_addafork_reservation(
struct xfs_mount *mp)
{
- return XFS_DQUOT_LOGRES(mp) +
+ return XFS_DQUOT_LOGRES +
xfs_calc_inode_res(mp, 1) +
xfs_calc_buf_res(2, mp->m_sb.sb_sectsize) +
xfs_calc_buf_res(1, mp->m_dir_geo->blksize) +
@@ -1003,7 +1003,7 @@ STATIC uint
xfs_calc_attrsetm_reservation(
struct xfs_mount *mp)
{
- return XFS_DQUOT_LOGRES(mp) +
+ return XFS_DQUOT_LOGRES +
xfs_calc_inode_res(mp, 1) +
xfs_calc_buf_res(1, mp->m_sb.sb_sectsize) +
xfs_calc_buf_res(XFS_DA_NODE_MAXDEPTH, XFS_FSB_TO_B(mp, 1));
@@ -1043,7 +1043,7 @@ STATIC uint
xfs_calc_attrrm_reservation(
struct xfs_mount *mp)
{
- return XFS_DQUOT_LOGRES(mp) +
+ return XFS_DQUOT_LOGRES +
max((xfs_calc_inode_res(mp, 1) +
xfs_calc_buf_res(XFS_DA_NODE_MAXDEPTH,
XFS_FSB_TO_B(mp, 1)) +
diff --git a/fs/xfs/scrub/agheader_repair.c b/fs/xfs/scrub/agheader_repair.c
index 0dbc484b182f..2f98d90d7fd6 100644
--- a/fs/xfs/scrub/agheader_repair.c
+++ b/fs/xfs/scrub/agheader_repair.c
@@ -696,7 +696,7 @@ xrep_agfl_init_header(
* step.
*/
xagb_bitmap_init(&af.used_extents);
- af.agfl_bno = xfs_buf_to_agfl_bno(agfl_bp),
+ af.agfl_bno = xfs_buf_to_agfl_bno(agfl_bp);
xagb_bitmap_walk(agfl_extents, xrep_agfl_fill, &af);
error = xagb_bitmap_disunion(agfl_extents, &af.used_extents);
if (error)
diff --git a/fs/xfs/scrub/bmap.c b/fs/xfs/scrub/bmap.c
index 24a15bf784f1..5ab2ac53c920 100644
--- a/fs/xfs/scrub/bmap.c
+++ b/fs/xfs/scrub/bmap.c
@@ -938,7 +938,13 @@ xchk_bmap(
}
break;
case XFS_ATTR_FORK:
- if (!xfs_has_attr(mp) && !xfs_has_attr2(mp))
+ /*
+ * "attr" means that an attr fork was created at some point in
+ * the life of this filesystem. "attr2" means that inodes have
+ * variable-sized data/attr fork areas. Hence we only check
+ * attr here.
+ */
+ if (!xfs_has_attr(mp))
xchk_ino_set_corrupt(sc, sc->ip->i_ino);
break;
default:
diff --git a/fs/xfs/scrub/parent.c b/fs/xfs/scrub/parent.c
index 733c410a2279..91e7b51ce068 100644
--- a/fs/xfs/scrub/parent.c
+++ b/fs/xfs/scrub/parent.c
@@ -799,7 +799,7 @@ xchk_parent_pptr(
}
if (pp->sc->sm->sm_flags & XFS_SCRUB_OFLAG_CORRUPT)
- goto out_pp;
+ goto out_names;
/*
* Complain if the number of parent pointers doesn't match the link
diff --git a/fs/xfs/scrub/trace.h b/fs/xfs/scrub/trace.h
index 92ef4cdc486e..c886d5d0eb02 100644
--- a/fs/xfs/scrub/trace.h
+++ b/fs/xfs/scrub/trace.h
@@ -959,18 +959,16 @@ TRACE_EVENT(xfile_create,
TP_STRUCT__entry(
__field(dev_t, dev)
__field(unsigned long, ino)
- __array(char, pathname, 256)
+ __array(char, pathname, MAXNAMELEN)
),
TP_fast_assign(
- char pathname[257];
char *path;
__entry->ino = file_inode(xf->file)->i_ino;
- memset(pathname, 0, sizeof(pathname));
- path = file_path(xf->file, pathname, sizeof(pathname) - 1);
+ path = file_path(xf->file, __entry->pathname, MAXNAMELEN);
if (IS_ERR(path))
- path = "(unknown)";
- strncpy(__entry->pathname, path, sizeof(__entry->pathname));
+ strncpy(__entry->pathname, "(unknown)",
+ sizeof(__entry->pathname));
),
TP_printk("xfino 0x%lx path '%s'",
__entry->ino,
diff --git a/fs/xfs/xfs_attr_list.c b/fs/xfs/xfs_attr_list.c
index 5c947e5ce8b8..7db386304875 100644
--- a/fs/xfs/xfs_attr_list.c
+++ b/fs/xfs/xfs_attr_list.c
@@ -139,7 +139,7 @@ xfs_attr_shortform_list(
sbp->name = sfe->nameval;
sbp->namelen = sfe->namelen;
/* These are bytes, and both on-disk, don't endian-flip */
- sbp->value = &sfe->nameval[sfe->namelen],
+ sbp->value = &sfe->nameval[sfe->namelen];
sbp->valuelen = sfe->valuelen;
sbp->flags = sfe->flags;
sbp->hash = xfs_attr_hashval(dp->i_mount, sfe->flags,
diff --git a/fs/xfs/xfs_ioctl.c b/fs/xfs/xfs_ioctl.c
index 4e933db75b12..6b13666d4e96 100644
--- a/fs/xfs/xfs_ioctl.c
+++ b/fs/xfs/xfs_ioctl.c
@@ -483,6 +483,17 @@ xfs_ioctl_setattr_xflags(
/* Can't change realtime flag if any extents are allocated. */
if (ip->i_df.if_nextents || ip->i_delayed_blks)
return -EINVAL;
+
+ /*
+ * If S_DAX is enabled on this file, we can only switch the
+ * device if both support fsdax. We can't update S_DAX because
+ * there might be other threads walking down the access paths.
+ */
+ if (IS_DAX(VFS_I(ip)) &&
+ (mp->m_ddev_targp->bt_daxdev == NULL ||
+ (mp->m_rtdev_targp &&
+ mp->m_rtdev_targp->bt_daxdev == NULL)))
+ return -EINVAL;
}
if (rtflag) {
diff --git a/fs/xfs/xfs_trace.h b/fs/xfs/xfs_trace.h
index 5646d300b286..180ce697305a 100644
--- a/fs/xfs/xfs_trace.h
+++ b/fs/xfs/xfs_trace.h
@@ -4715,20 +4715,18 @@ TRACE_EVENT(xmbuf_create,
TP_STRUCT__entry(
__field(dev_t, dev)
__field(unsigned long, ino)
- __array(char, pathname, 256)
+ __array(char, pathname, MAXNAMELEN)
),
TP_fast_assign(
- char pathname[257];
char *path;
struct file *file = btp->bt_file;
__entry->dev = btp->bt_mount->m_super->s_dev;
__entry->ino = file_inode(file)->i_ino;
- memset(pathname, 0, sizeof(pathname));
- path = file_path(file, pathname, sizeof(pathname) - 1);
+ path = file_path(file, __entry->pathname, MAXNAMELEN);
if (IS_ERR(path))
- path = "(unknown)";
- strncpy(__entry->pathname, path, sizeof(__entry->pathname));
+ strncpy(__entry->pathname, "(unknown)",
+ sizeof(__entry->pathname));
),
TP_printk("dev %d:%d xmino 0x%lx path '%s'",
MAJOR(__entry->dev), MINOR(__entry->dev),
diff --git a/fs/xfs/xfs_trans_ail.c b/fs/xfs/xfs_trans_ail.c
index 0fafcc9f3dbe..8ede9d099d1f 100644
--- a/fs/xfs/xfs_trans_ail.c
+++ b/fs/xfs/xfs_trans_ail.c
@@ -644,7 +644,12 @@ xfsaild(
set_freezable();
while (1) {
- if (tout)
+ /*
+ * Long waits of 50ms or more occur when we've run out of items
+ * to push, so we only want uninterruptible state if we're
+ * actually blocked on something.
+ */
+ if (tout && tout <= 20)
set_current_state(TASK_KILLABLE|TASK_FREEZABLE);
else
set_current_state(TASK_INTERRUPTIBLE|TASK_FREEZABLE);
diff --git a/fs/xfs/xfs_xattr.c b/fs/xfs/xfs_xattr.c
index ab3d22f662f2..eaf849260bd6 100644
--- a/fs/xfs/xfs_xattr.c
+++ b/fs/xfs/xfs_xattr.c
@@ -110,7 +110,24 @@ xfs_attr_change(
args->whichfork = XFS_ATTR_FORK;
xfs_attr_sethash(args);
- return xfs_attr_set(args, op, args->attr_filter & XFS_ATTR_ROOT);
+ /*
+ * Some xattrs must be resistant to allocation failure at ENOSPC, e.g.
+ * creating an inode with ACLs or security attributes requires the
+ * allocation of the xattr holding that information to succeed. Hence
+ * we allow xattrs in the VFS TRUSTED, SYSTEM, POSIX_ACL and SECURITY
+ * (LSM xattr) namespaces to dip into the reserve block pool to allow
+ * manipulation of these xattrs when at ENOSPC. These VFS xattr
+ * namespaces translate to the XFS_ATTR_ROOT and XFS_ATTR_SECURE on-disk
+ * namespaces.
+ *
+ * For most of these cases, these special xattrs will fit in the inode
+ * itself and so consume no extra space or only require temporary extra
+ * space while an overwrite is being made. Hence the use of the reserved
+ * pool is largely to avoid the worst case reservation from preventing
+ * the xattr from being created at ENOSPC.
+ */
+ return xfs_attr_set(args, op,
+ args->attr_filter & (XFS_ATTR_ROOT | XFS_ATTR_SECURE));
}