diff options
author | Kent Overstreet <kent.overstreet@linux.dev> | 2024-09-06 02:25:01 +0300 |
---|---|---|
committer | Kent Overstreet <kent.overstreet@linux.dev> | 2024-09-21 18:39:48 +0300 |
commit | 691f2cba229189033c55f19b904bc6f4bd68b480 (patch) | |
tree | baa58573543931e12f6096ba28c40eb7d4be8282 /fs/bcachefs | |
parent | ad5dbe3ce533ec13abacad78076050672e3d39eb (diff) | |
download | linux-691f2cba229189033c55f19b904bc6f4bd68b480.tar.xz |
bcachefs: btree cache counters should be size_t
32 bits won't overflow any time soon, but size_t is the correct type for
counting objects in memory.
Signed-off-by: Kent Overstreet <kent.overstreet@linux.dev>
Diffstat (limited to 'fs/bcachefs')
-rw-r--r-- | fs/bcachefs/btree_cache.c | 46 | ||||
-rw-r--r-- | fs/bcachefs/btree_io.c | 2 | ||||
-rw-r--r-- | fs/bcachefs/btree_io.h | 4 | ||||
-rw-r--r-- | fs/bcachefs/btree_types.h | 13 | ||||
-rw-r--r-- | fs/bcachefs/journal_reclaim.c | 6 | ||||
-rw-r--r-- | fs/bcachefs/super.c | 2 |
6 files changed, 37 insertions, 36 deletions
diff --git a/fs/bcachefs/btree_cache.c b/fs/bcachefs/btree_cache.c index 585301822692..a7eb07d6e7f9 100644 --- a/fs/bcachefs/btree_cache.c +++ b/fs/bcachefs/btree_cache.c @@ -32,24 +32,24 @@ const char * const bch2_btree_node_flags[] = { void bch2_recalc_btree_reserve(struct bch_fs *c) { - unsigned i, reserve = 16; + unsigned reserve = 16; if (!c->btree_roots_known[0].b) reserve += 8; - for (i = 0; i < btree_id_nr_alive(c); i++) { + for (unsigned i = 0; i < btree_id_nr_alive(c); i++) { struct btree_root *r = bch2_btree_id_root(c, i); if (r->b) reserve += min_t(unsigned, 1, r->b->c.level) * 8; } - c->btree_cache.reserve = reserve; + c->btree_cache.nr_reserve = reserve; } -static inline unsigned btree_cache_can_free(struct btree_cache *bc) +static inline size_t btree_cache_can_free(struct btree_cache *bc) { - return max_t(int, 0, bc->used - bc->reserve); + return max_t(int, 0, bc->nr_used - bc->nr_reserve); } static void btree_node_to_freedlist(struct btree_cache *bc, struct btree *b) @@ -87,7 +87,7 @@ static void btree_node_data_free(struct bch_fs *c, struct btree *b) #endif b->aux_data = NULL; - bc->used--; + bc->nr_used--; btree_node_to_freedlist(bc, b); } @@ -167,7 +167,7 @@ struct btree *__bch2_btree_node_mem_alloc(struct bch_fs *c) bch2_btree_lock_init(&b->c, 0); - bc->used++; + bc->nr_used++; list_add(&b->list, &bc->freeable); return b; } @@ -194,7 +194,7 @@ void bch2_btree_node_hash_remove(struct btree_cache *bc, struct btree *b) b->hash_val = 0; if (b->c.btree_id < BTREE_ID_NR) - --bc->used_by_btree[b->c.btree_id]; + --bc->nr_by_btree[b->c.btree_id]; } int __bch2_btree_node_hash_insert(struct btree_cache *bc, struct btree *b) @@ -205,7 +205,7 @@ int __bch2_btree_node_hash_insert(struct btree_cache *bc, struct btree *b) int ret = rhashtable_lookup_insert_fast(&bc->table, &b->hash, bch_btree_cache_params); if (!ret && b->c.btree_id < BTREE_ID_NR) - bc->used_by_btree[b->c.btree_id]++; + bc->nr_by_btree[b->c.btree_id]++; return ret; } @@ -401,8 +401,8 @@ static unsigned long bch2_btree_cache_scan(struct shrinker *shrink, unsigned long touched = 0; unsigned i, flags; unsigned long ret = SHRINK_STOP; - bool trigger_writes = atomic_read(&bc->dirty) + nr >= - bc->used * 3 / 4; + bool trigger_writes = atomic_long_read(&bc->nr_dirty) + nr >= + bc->nr_used * 3 / 4; if (bch2_btree_shrinker_disabled) return SHRINK_STOP; @@ -439,7 +439,7 @@ static unsigned long bch2_btree_cache_scan(struct shrinker *shrink, six_unlock_write(&b->c.lock); six_unlock_intent(&b->c.lock); freed++; - bc->freed++; + bc->nr_freed++; } } restart: @@ -453,7 +453,7 @@ restart: } else if (!btree_node_reclaim(c, b, true)) { freed++; btree_node_data_free(c, b); - bc->freed++; + bc->nr_freed++; bch2_btree_node_hash_remove(bc, b); six_unlock_write(&b->c.lock); @@ -539,7 +539,7 @@ void bch2_fs_btree_cache_exit(struct bch_fs *c) } BUG_ON(!bch2_journal_error(&c->journal) && - atomic_read(&c->btree_cache.dirty)); + atomic_long_read(&c->btree_cache.nr_dirty)); list_splice(&bc->freed_pcpu, &bc->freed_nonpcpu); @@ -572,7 +572,7 @@ int bch2_fs_btree_cache_init(struct bch_fs *c) bch2_recalc_btree_reserve(c); - for (i = 0; i < bc->reserve; i++) + for (i = 0; i < bc->nr_reserve; i++) if (!__bch2_btree_node_mem_alloc(c)) goto err; @@ -739,7 +739,7 @@ got_node: } mutex_lock(&bc->lock); - bc->used++; + bc->nr_used++; got_mem: mutex_unlock(&bc->lock); @@ -1353,11 +1353,11 @@ void bch2_btree_node_to_text(struct printbuf *out, struct bch_fs *c, const struc } static void prt_btree_cache_line(struct printbuf *out, const struct bch_fs *c, - const char *label, unsigned nr) + const char *label, size_t nr) { prt_printf(out, "%s\t", label); prt_human_readable_u64(out, nr * c->opts.btree_node_size); - prt_printf(out, " (%u)\n", nr); + prt_printf(out, " (%zu)\n", nr); } static const char * const bch2_btree_cache_not_freed_reasons_strs[] = { @@ -1374,16 +1374,16 @@ void bch2_btree_cache_to_text(struct printbuf *out, const struct btree_cache *bc if (!out->nr_tabstops) printbuf_tabstop_push(out, 32); - prt_btree_cache_line(out, c, "total:", bc->used); - prt_btree_cache_line(out, c, "nr dirty:", atomic_read(&bc->dirty)); + prt_btree_cache_line(out, c, "total:", bc->nr_used); + prt_btree_cache_line(out, c, "nr dirty:", atomic_long_read(&bc->nr_dirty)); prt_printf(out, "cannibalize lock:\t%p\n", bc->alloc_lock); prt_newline(out); - for (unsigned i = 0; i < ARRAY_SIZE(bc->used_by_btree); i++) - prt_btree_cache_line(out, c, bch2_btree_id_str(i), bc->used_by_btree[i]); + for (unsigned i = 0; i < ARRAY_SIZE(bc->nr_by_btree); i++) + prt_btree_cache_line(out, c, bch2_btree_id_str(i), bc->nr_by_btree[i]); prt_newline(out); - prt_printf(out, "freed:\t%u\n", bc->freed); + prt_printf(out, "freed:\t%zu\n", bc->nr_freed); prt_printf(out, "not freed:\n"); for (unsigned i = 0; i < ARRAY_SIZE(bc->not_freed); i++) diff --git a/fs/bcachefs/btree_io.c b/fs/bcachefs/btree_io.c index 18cff98650de..aad89ba16b9b 100644 --- a/fs/bcachefs/btree_io.c +++ b/fs/bcachefs/btree_io.c @@ -2031,7 +2031,7 @@ void __bch2_btree_node_write(struct bch_fs *c, struct btree *b, unsigned flags) do_write: BUG_ON((type == BTREE_WRITE_initial) != (b->written == 0)); - atomic_dec(&c->btree_cache.dirty); + atomic_long_dec(&c->btree_cache.nr_dirty); BUG_ON(btree_node_fake(b)); BUG_ON((b->will_make_reachable != 0) != !b->written); diff --git a/fs/bcachefs/btree_io.h b/fs/bcachefs/btree_io.h index 63d76f5c6403..9b01ca3de907 100644 --- a/fs/bcachefs/btree_io.h +++ b/fs/bcachefs/btree_io.h @@ -18,13 +18,13 @@ struct btree_node_read_all; static inline void set_btree_node_dirty_acct(struct bch_fs *c, struct btree *b) { if (!test_and_set_bit(BTREE_NODE_dirty, &b->flags)) - atomic_inc(&c->btree_cache.dirty); + atomic_long_inc(&c->btree_cache.nr_dirty); } static inline void clear_btree_node_dirty_acct(struct bch_fs *c, struct btree *b) { if (test_and_clear_bit(BTREE_NODE_dirty, &b->flags)) - atomic_dec(&c->btree_cache.dirty); + atomic_long_dec(&c->btree_cache.nr_dirty); } static inline unsigned btree_ptr_sectors_written(struct bkey_s_c k) diff --git a/fs/bcachefs/btree_types.h b/fs/bcachefs/btree_types.h index c1ab824e1c34..806d27b7f41b 100644 --- a/fs/bcachefs/btree_types.h +++ b/fs/bcachefs/btree_types.h @@ -180,15 +180,16 @@ struct btree_cache { struct list_head freed_nonpcpu; /* Number of elements in live + freeable lists */ - unsigned used; - unsigned reserve; - unsigned freed; - atomic_t dirty; + size_t nr_used; + size_t nr_reserve; + size_t nr_by_btree[BTREE_ID_NR]; + atomic_long_t nr_dirty; + + /* shrinker stats */ + size_t nr_freed; u64 not_freed[BCH_BTREE_CACHE_NOT_FREED_REASONS_NR]; struct shrinker *shrink; - unsigned used_by_btree[BTREE_ID_NR]; - /* * If we need to allocate memory for a new btree node and that * allocation fails, we can cannibalize another node in the btree cache diff --git a/fs/bcachefs/journal_reclaim.c b/fs/bcachefs/journal_reclaim.c index 70b998d9f19c..9794b6d214cd 100644 --- a/fs/bcachefs/journal_reclaim.c +++ b/fs/bcachefs/journal_reclaim.c @@ -681,7 +681,7 @@ static int __bch2_journal_reclaim(struct journal *j, bool direct, bool kicked) if (j->watermark != BCH_WATERMARK_stripe) min_nr = 1; - if (atomic_read(&c->btree_cache.dirty) * 2 > c->btree_cache.used) + if (atomic_long_read(&c->btree_cache.nr_dirty) * 2 > c->btree_cache.nr_used) min_nr = 1; min_key_cache = min(bch2_nr_btree_keys_need_flush(c), (size_t) 128); @@ -689,8 +689,8 @@ static int __bch2_journal_reclaim(struct journal *j, bool direct, bool kicked) trace_and_count(c, journal_reclaim_start, c, direct, kicked, min_nr, min_key_cache, - atomic_read(&c->btree_cache.dirty), - c->btree_cache.used, + atomic_long_read(&c->btree_cache.nr_dirty), + c->btree_cache.nr_used, atomic_long_read(&c->btree_key_cache.nr_dirty), atomic_long_read(&c->btree_key_cache.nr_keys)); diff --git a/fs/bcachefs/super.c b/fs/bcachefs/super.c index 1c949389d88f..873e4be7e1dc 100644 --- a/fs/bcachefs/super.c +++ b/fs/bcachefs/super.c @@ -370,7 +370,7 @@ void bch2_fs_read_only(struct bch_fs *c) test_bit(BCH_FS_clean_shutdown, &c->flags) && c->recovery_pass_done >= BCH_RECOVERY_PASS_journal_replay) { BUG_ON(c->journal.last_empty_seq != journal_cur_seq(&c->journal)); - BUG_ON(atomic_read(&c->btree_cache.dirty)); + BUG_ON(atomic_long_read(&c->btree_cache.nr_dirty)); BUG_ON(atomic_long_read(&c->btree_key_cache.nr_dirty)); BUG_ON(c->btree_write_buffer.inc.keys.nr); BUG_ON(c->btree_write_buffer.flushing.keys.nr); |