diff options
author | Kent Overstreet <kent.overstreet@linux.dev> | 2022-12-05 18:24:19 +0300 |
---|---|---|
committer | Kent Overstreet <kent.overstreet@linux.dev> | 2023-10-23 00:09:53 +0300 |
commit | 80c33085783656617d0d07e1bc9fba70a592ce5c (patch) | |
tree | 2f2a6b43a3b6caab092c4f74df9f5a582e1a60b0 /fs/bcachefs/alloc_background.c | |
parent | 1b30ed5fd87828b5e29647510eefb18a363e4d19 (diff) | |
download | linux-80c33085783656617d0d07e1bc9fba70a592ce5c.tar.xz |
bcachefs: Fragmentation LRU
Now that we have much more efficient updates to the LRU btree, this
patch adds a new LRU that indexes buckets by fragmentation.
This means copygc no longer has to scan every bucket to find buckets
that need to be evacuated.
Changes:
- A new field in bch_alloc_v4, fragmentation_lru - this corresponds to
the bucket's position in the fragmentation LRU. We add a new field
for this instead of calculating it as needed because we may make the
fragmentation LRU optional; this field indicates whether a bucket is
on the fragmentation LRU.
Also, zoned devices will introduce variable bucket sizes; explicitly
recording the LRU position will be safer for them.
- A new copygc path for using the fragmentation LRU instead of
scanning every bucket and building up an in-memory heap.
Signed-off-by: Kent Overstreet <kent.overstreet@linux.dev>
Diffstat (limited to 'fs/bcachefs/alloc_background.c')
-rw-r--r-- | fs/bcachefs/alloc_background.c | 20 |
1 files changed, 17 insertions, 3 deletions
diff --git a/fs/bcachefs/alloc_background.c b/fs/bcachefs/alloc_background.c index af3e55fdd54a..aefe72d34c5b 100644 --- a/fs/bcachefs/alloc_background.c +++ b/fs/bcachefs/alloc_background.c @@ -415,6 +415,8 @@ void bch2_alloc_to_text(struct printbuf *out, struct bch_fs *c, struct bkey_s_c prt_newline(out); prt_printf(out, "io_time[WRITE] %llu", a->io_time[WRITE]); prt_newline(out); + prt_printf(out, "fragmentation %llu", a->fragmentation_lru); + prt_newline(out); prt_printf(out, "bp_start %llu", BCH_ALLOC_V4_BACKPOINTERS_START(a)); prt_newline(out); @@ -910,8 +912,8 @@ int bch2_trans_mark_alloc(struct btree_trans *trans, !new_a->io_time[READ]) new_a->io_time[READ] = max_t(u64, 1, atomic64_read(&c->io_clock[READ].now)); - old_lru = alloc_lru_idx(*old_a); - new_lru = alloc_lru_idx(*new_a); + old_lru = alloc_lru_idx_read(*old_a); + new_lru = alloc_lru_idx_read(*new_a); if (old_lru != new_lru) { ret = bch2_lru_change(trans, new->k.p.inode, @@ -921,6 +923,18 @@ int bch2_trans_mark_alloc(struct btree_trans *trans, return ret; } + new_a->fragmentation_lru = alloc_lru_idx_fragmentation(*new_a, + bch_dev_bkey_exists(c, new->k.p.inode)); + + if (old_a->fragmentation_lru != new_a->fragmentation_lru) { + ret = bch2_lru_change(trans, + BCH_LRU_FRAGMENTATION_START, + bucket_to_u64(new->k.p), + old_a->fragmentation_lru, new_a->fragmentation_lru); + if (ret) + return ret; + } + if (old_a->gen != new_a->gen) { ret = bch2_bucket_gen_update(trans, new->k.p, new_a->gen); if (ret) @@ -1777,7 +1791,7 @@ static int invalidate_one_bucket(struct btree_trans *trans, goto out; /* We expect harmless races here due to the btree write buffer: */ - if (lru_pos_time(lru_iter->pos) != alloc_lru_idx(a->v)) + if (lru_pos_time(lru_iter->pos) != alloc_lru_idx_read(a->v)) goto out; BUG_ON(a->v.data_type != BCH_DATA_cached); |