summaryrefslogtreecommitdiff
path: root/fs/bcachefs/sysfs.c
diff options
context:
space:
mode:
authorKent Overstreet <kent.overstreet@linux.dev>2024-04-20 21:49:22 +0300
committerKent Overstreet <kent.overstreet@linux.dev>2024-05-09 00:29:20 +0300
commit5147b9ae768758982b196d1b259e6372e328955e (patch)
tree6da266b587c28905f152a4e12f4d9fa6f541526c /fs/bcachefs/sysfs.c
parente4f2c4dfeeaeb70164967aa4abfa1190f7e61781 (diff)
downloadlinux-5147b9ae768758982b196d1b259e6372e328955e.tar.xz
bcachefs: Btree key cache instrumentation
It turns out the btree key cache shrinker wasn't actually reclaiming anything, prior to the previous patch. This adds instrumentation so that if we have further issues we can see what's going on. Specifically, sysfs internal/btree_key_cache is greatly expanded with new counters, and the SRCU sequence numbers of the first 10 entries on each pending freelist, and we also add trigger_btree_key_cache_shrink for testing without having to prune all the system caches. Signed-off-by: Kent Overstreet <kent.overstreet@linux.dev>
Diffstat (limited to 'fs/bcachefs/sysfs.c')
-rw-r--r--fs/bcachefs/sysfs.c34
1 files changed, 12 insertions, 22 deletions
diff --git a/fs/bcachefs/sysfs.c b/fs/bcachefs/sysfs.c
index 43edda74d3cb..4627b0ba179e 100644
--- a/fs/bcachefs/sysfs.c
+++ b/fs/bcachefs/sysfs.c
@@ -140,8 +140,8 @@ write_attribute(trigger_gc);
write_attribute(trigger_discards);
write_attribute(trigger_invalidates);
write_attribute(trigger_journal_flush);
-write_attribute(prune_cache);
-write_attribute(btree_wakeup);
+write_attribute(trigger_btree_cache_shrink);
+write_attribute(trigger_btree_key_cache_shrink);
rw_attribute(gc_gens_pos);
read_attribute(uuid);
@@ -346,21 +346,6 @@ static void bch2_gc_gens_pos_to_text(struct printbuf *out, struct bch_fs *c)
prt_printf(out, "\n");
}
-static void bch2_btree_wakeup_all(struct bch_fs *c)
-{
- struct btree_trans *trans;
-
- seqmutex_lock(&c->btree_trans_lock);
- list_for_each_entry(trans, &c->btree_trans_list, list) {
- struct btree_bkey_cached_common *b = READ_ONCE(trans->locking);
-
- if (b)
- six_lock_wakeup_all(&b->lock);
-
- }
- seqmutex_unlock(&c->btree_trans_lock);
-}
-
static void fs_alloc_debug_to_text(struct printbuf *out, struct bch_fs *c)
{
unsigned nr[BCH_DATA_NR];
@@ -513,7 +498,7 @@ STORE(bch2_fs)
if (!bch2_write_ref_tryget(c, BCH_WRITE_REF_sysfs))
return -EROFS;
- if (attr == &sysfs_prune_cache) {
+ if (attr == &sysfs_trigger_btree_cache_shrink) {
struct shrink_control sc;
sc.gfp_mask = GFP_KERNEL;
@@ -521,8 +506,13 @@ STORE(bch2_fs)
c->btree_cache.shrink->scan_objects(c->btree_cache.shrink, &sc);
}
- if (attr == &sysfs_btree_wakeup)
- bch2_btree_wakeup_all(c);
+ if (attr == &sysfs_trigger_btree_key_cache_shrink) {
+ struct shrink_control sc;
+
+ sc.gfp_mask = GFP_KERNEL;
+ sc.nr_to_scan = strtoul_or_return(buf);
+ c->btree_key_cache.shrink->scan_objects(c->btree_cache.shrink, &sc);
+ }
if (attr == &sysfs_trigger_gc)
bch2_gc_gens(c);
@@ -656,8 +646,8 @@ struct attribute *bch2_fs_internal_files[] = {
&sysfs_trigger_discards,
&sysfs_trigger_invalidates,
&sysfs_trigger_journal_flush,
- &sysfs_prune_cache,
- &sysfs_btree_wakeup,
+ &sysfs_trigger_btree_cache_shrink,
+ &sysfs_trigger_btree_key_cache_shrink,
&sysfs_gc_gens_pos,