summaryrefslogtreecommitdiff
path: root/fs
diff options
context:
space:
mode:
authorKent Overstreet <kent.overstreet@gmail.com>2021-04-01 04:07:37 +0300
committerKent Overstreet <kent.overstreet@linux.dev>2023-10-23 00:08:58 +0300
commitba5f03d362a8c7a32fd63c54cd3aeea0c9f3d7cc (patch)
tree28737b92313da213b1489195b7fbe0e988e74d10 /fs
parentd5a43661a1e9d9448e9e508470deec973c3d6644 (diff)
downloadlinux-ba5f03d362a8c7a32fd63c54cd3aeea0c9f3d7cc.tar.xz
bcachefs: Add a sysfs var for average btree write size
Useful number for performance tuning. Signed-off-by: Kent Overstreet <kent.overstreet@gmail.com> Signed-off-by: Kent Overstreet <kent.overstreet@linux.dev>
Diffstat (limited to 'fs')
-rw-r--r--fs/bcachefs/bcachefs.h3
-rw-r--r--fs/bcachefs/btree_io.c3
-rw-r--r--fs/bcachefs/sysfs.c12
3 files changed, 18 insertions, 0 deletions
diff --git a/fs/bcachefs/bcachefs.h b/fs/bcachefs/bcachefs.h
index 51aefecb5cbb..c5ff142871c7 100644
--- a/fs/bcachefs/bcachefs.h
+++ b/fs/bcachefs/bcachefs.h
@@ -796,6 +796,9 @@ mempool_t bio_bounce_pages;
struct bio_set dio_write_bioset;
struct bio_set dio_read_bioset;
+
+ atomic64_t btree_writes_nr;
+ atomic64_t btree_writes_sectors;
struct bio_list btree_write_error_list;
struct work_struct btree_write_error_work;
spinlock_t btree_write_error_lock;
diff --git a/fs/bcachefs/btree_io.c b/fs/bcachefs/btree_io.c
index fc94782afb60..3b45389a8e06 100644
--- a/fs/bcachefs/btree_io.c
+++ b/fs/bcachefs/btree_io.c
@@ -1551,6 +1551,9 @@ void __bch2_btree_node_write(struct bch_fs *c, struct btree *b,
b->written += sectors_to_write;
+ atomic64_inc(&c->btree_writes_nr);
+ atomic64_add(sectors_to_write, &c->btree_writes_sectors);
+
/* XXX: submitting IO with btree locks held: */
bch2_submit_wbio_replicas(&wbio->wbio, c, BCH_DATA_btree, k.k);
bch2_bkey_buf_exit(&k, c);
diff --git a/fs/bcachefs/sysfs.c b/fs/bcachefs/sysfs.c
index 4b83a98621d7..dd9b54e0d80b 100644
--- a/fs/bcachefs/sysfs.c
+++ b/fs/bcachefs/sysfs.c
@@ -153,6 +153,8 @@ read_attribute(io_latency_stats_read);
read_attribute(io_latency_stats_write);
read_attribute(congested);
+read_attribute(btree_avg_write_size);
+
read_attribute(bucket_quantiles_last_read);
read_attribute(bucket_quantiles_last_write);
read_attribute(bucket_quantiles_fragmentation);
@@ -228,6 +230,14 @@ static size_t bch2_btree_cache_size(struct bch_fs *c)
return ret;
}
+static size_t bch2_btree_avg_write_size(struct bch_fs *c)
+{
+ u64 nr = atomic64_read(&c->btree_writes_nr);
+ u64 sectors = atomic64_read(&c->btree_writes_sectors);
+
+ return nr ? div64_u64(sectors, nr) : 0;
+}
+
static int fs_alloc_debug_to_text(struct printbuf *out, struct bch_fs *c)
{
struct bch_fs_usage_online *fs_usage = bch2_fs_usage_read(c);
@@ -316,6 +326,7 @@ SHOW(bch2_fs)
sysfs_print(block_size, block_bytes(c));
sysfs_print(btree_node_size, btree_bytes(c));
sysfs_hprint(btree_cache_size, bch2_btree_cache_size(c));
+ sysfs_hprint(btree_avg_write_size, bch2_btree_avg_write_size(c));
sysfs_print(read_realloc_races,
atomic_long_read(&c->read_realloc_races));
@@ -507,6 +518,7 @@ struct attribute *bch2_fs_files[] = {
&sysfs_block_size,
&sysfs_btree_node_size,
&sysfs_btree_cache_size,
+ &sysfs_btree_avg_write_size,
&sysfs_journal_write_delay_ms,
&sysfs_journal_reclaim_delay_ms,