diff options
-rw-r--r-- | fs/bcachefs/buckets_types.h | 2 | ||||
-rw-r--r-- | fs/bcachefs/move.c | 291 | ||||
-rw-r--r-- | fs/bcachefs/move.h | 10 | ||||
-rw-r--r-- | fs/bcachefs/movinggc.c | 236 | ||||
-rw-r--r-- | fs/bcachefs/trace.h | 31 |
5 files changed, 346 insertions, 224 deletions
diff --git a/fs/bcachefs/buckets_types.h b/fs/bcachefs/buckets_types.h index 0a9dd5af3524..1dbba7d906dd 100644 --- a/fs/bcachefs/buckets_types.h +++ b/fs/bcachefs/buckets_types.h @@ -95,7 +95,7 @@ struct copygc_heap_entry { u8 replicas; u32 fragmentation; u32 sectors; - u64 offset; + u64 bucket; }; typedef HEAP(struct copygc_heap_entry) copygc_heap; diff --git a/fs/bcachefs/move.c b/fs/bcachefs/move.c index 46677ad911cd..690c3128c5e1 100644 --- a/fs/bcachefs/move.c +++ b/fs/bcachefs/move.c @@ -1,14 +1,18 @@ // SPDX-License-Identifier: GPL-2.0 #include "bcachefs.h" +#include "alloc_background.h" #include "alloc_foreground.h" +#include "backpointers.h" #include "bkey_buf.h" #include "btree_gc.h" #include "btree_update.h" #include "btree_update_interior.h" +#include "btree_write_buffer.h" #include "disk_groups.h" #include "ec.h" #include "errcode.h" +#include "error.h" #include "inode.h" #include "io.h" #include "journal_reclaim.h" @@ -66,6 +70,9 @@ static void move_write_done(struct bch_write_op *op) struct moving_io *io = container_of(op, struct moving_io, write.op); struct moving_context *ctxt = io->write.ctxt; + if (io->write.op.error) + ctxt->write_error = true; + atomic_sub(io->write_sectors, &io->write.ctxt->write_sectors); move_free(io); closure_put(&ctxt->cl); @@ -401,6 +408,30 @@ static int move_ratelimit(struct btree_trans *trans, return 0; } +static int move_get_io_opts(struct btree_trans *trans, + struct bch_io_opts *io_opts, + struct bkey_s_c k, u64 *cur_inum) +{ + struct bch_inode_unpacked inode; + int ret; + + if (*cur_inum == k.k->p.inode) + return 0; + + ret = lookup_inode(trans, + SPOS(0, k.k->p.inode, k.k->p.snapshot), + &inode); + if (bch2_err_matches(ret, BCH_ERR_transaction_restart)) + return ret; + + if (!ret) + bch2_inode_opts_get(io_opts, trans->c, &inode); + else + *io_opts = bch2_opts_to_inode_opts(trans->c->opts); + *cur_inum = k.k->p.inode; + return 0; +} + static int __bch2_move_data(struct moving_context *ctxt, struct bpos start, struct bpos end, @@ -452,23 +483,9 @@ static int __bch2_move_data(struct moving_context *ctxt, if (!bkey_extent_is_direct_data(k.k)) goto next_nondata; - if (btree_id == BTREE_ID_extents && - cur_inum != k.k->p.inode) { - struct bch_inode_unpacked inode; - - io_opts = bch2_opts_to_inode_opts(c->opts); - - ret = lookup_inode(&trans, - SPOS(0, k.k->p.inode, k.k->p.snapshot), - &inode); - if (bch2_err_matches(ret, BCH_ERR_transaction_restart)) - continue; - - if (!ret) - bch2_inode_opts_get(&io_opts, c, &inode); - - cur_inum = k.k->p.inode; - } + ret = move_get_io_opts(&trans, &io_opts, k, &cur_inum); + if (ret) + continue; memset(&data_opts, 0, sizeof(data_opts)); if (!pred(c, arg, k, &io_opts, &data_opts)) @@ -549,6 +566,246 @@ int bch2_move_data(struct bch_fs *c, return ret; } +static int verify_bucket_evacuated(struct btree_trans *trans, struct bpos bucket, int gen) +{ + struct bch_fs *c = trans->c; + struct btree_iter iter; + struct bkey_s_c k; + struct printbuf buf = PRINTBUF; + struct bch_backpointer bp; + u64 bp_offset = 0; + int ret; + + bch2_trans_iter_init(trans, &iter, BTREE_ID_alloc, + bucket, BTREE_ITER_CACHED); +again: + k = bch2_btree_iter_peek_slot(&iter); + ret = bkey_err(k); + + if (!ret && k.k->type == KEY_TYPE_alloc_v4) { + struct bkey_s_c_alloc_v4 a = bkey_s_c_to_alloc_v4(k); + + if (a.v->gen == gen && + a.v->dirty_sectors) { + if (a.v->data_type == BCH_DATA_btree) { + bch2_trans_unlock(trans); + if (bch2_btree_interior_updates_flush(c)) + goto again; + goto failed_to_evacuate; + } + } + } + + bch2_trans_iter_exit(trans, &iter); + return ret; +failed_to_evacuate: + bch2_trans_iter_exit(trans, &iter); + + prt_printf(&buf, bch2_log_msg(c, "failed to evacuate bucket ")); + bch2_bkey_val_to_text(&buf, c, k); + + while (1) { + bch2_trans_begin(trans); + + ret = bch2_get_next_backpointer(trans, bucket, gen, + &bp_offset, &bp); + if (bch2_err_matches(ret, BCH_ERR_transaction_restart)) + continue; + if (ret) + break; + if (bp_offset == U64_MAX) + break; + + k = bch2_backpointer_get_key(trans, &iter, + bucket, bp_offset, bp); + ret = bkey_err(k); + if (bch2_err_matches(ret, BCH_ERR_transaction_restart)) + continue; + if (ret) + break; + if (!k.k) + continue; + prt_newline(&buf); + bch2_bkey_val_to_text(&buf, c, k); + bch2_trans_iter_exit(trans, &iter); + } + + bch2_print_string_as_lines(KERN_ERR, buf.buf); + printbuf_exit(&buf); + return 0; +} + +int __bch2_evacuate_bucket(struct moving_context *ctxt, + struct bpos bucket, int gen, + struct data_update_opts _data_opts) +{ + struct bch_fs *c = ctxt->c; + struct bch_io_opts io_opts = bch2_opts_to_inode_opts(c->opts); + struct btree_trans trans; + struct btree_iter iter; + struct bkey_buf sk; + struct bch_backpointer bp; + struct bch_alloc_v4 a_convert; + const struct bch_alloc_v4 *a; + struct bkey_s_c k; + struct data_update_opts data_opts; + unsigned dirty_sectors, bucket_size; + u64 bp_offset = 0, cur_inum = U64_MAX; + int ret = 0; + + bch2_bkey_buf_init(&sk); + bch2_trans_init(&trans, c, 0, 0); + + bch2_trans_iter_init(&trans, &iter, BTREE_ID_alloc, + bucket, BTREE_ITER_CACHED); + ret = lockrestart_do(&trans, + bkey_err(k = bch2_btree_iter_peek_slot(&iter))); + bch2_trans_iter_exit(&trans, &iter); + + if (ret) { + bch_err(c, "%s: error looking up alloc key: %s", __func__, bch2_err_str(ret)); + goto err; + } + + a = bch2_alloc_to_v4(k, &a_convert); + dirty_sectors = a->dirty_sectors; + bucket_size = bch_dev_bkey_exists(c, bucket.inode)->mi.bucket_size; + + ret = bch2_btree_write_buffer_flush(&trans); + if (ret) { + bch_err(c, "%s: error flushing btree write buffer: %s", __func__, bch2_err_str(ret)); + goto err; + } + + while (!(ret = move_ratelimit(&trans, ctxt))) { + bch2_trans_begin(&trans); + + ret = bch2_get_next_backpointer(&trans, bucket, gen, + &bp_offset, &bp); + if (bch2_err_matches(ret, BCH_ERR_transaction_restart)) + continue; + if (ret) + goto err; + if (bp_offset == U64_MAX) + break; + + if (!bp.level) { + const struct bch_extent_ptr *ptr; + struct bkey_s_c k; + unsigned i = 0; + + k = bch2_backpointer_get_key(&trans, &iter, + bucket, bp_offset, bp); + ret = bkey_err(k); + if (bch2_err_matches(ret, BCH_ERR_transaction_restart)) + continue; + if (ret) + goto err; + if (!k.k) + goto next; + + bch2_bkey_buf_reassemble(&sk, c, k); + k = bkey_i_to_s_c(sk.k); + + ret = move_get_io_opts(&trans, &io_opts, k, &cur_inum); + if (ret) { + bch2_trans_iter_exit(&trans, &iter); + continue; + } + + data_opts = _data_opts; + data_opts.target = io_opts.background_target; + data_opts.rewrite_ptrs = 0; + + bkey_for_each_ptr(bch2_bkey_ptrs_c(k), ptr) { + if (ptr->dev == bucket.inode) + data_opts.rewrite_ptrs |= 1U << i; + i++; + } + + ret = bch2_move_extent(&trans, &iter, ctxt, io_opts, + bp.btree_id, k, data_opts); + bch2_trans_iter_exit(&trans, &iter); + + if (bch2_err_matches(ret, BCH_ERR_transaction_restart)) + continue; + if (ret == -ENOMEM) { + /* memory allocation failure, wait for some IO to finish */ + bch2_move_ctxt_wait_for_io(ctxt, &trans); + continue; + } + if (ret) + goto err; + + if (ctxt->rate) + bch2_ratelimit_increment(ctxt->rate, k.k->size); + atomic64_add(k.k->size, &ctxt->stats->sectors_seen); + } else { + struct btree *b; + + b = bch2_backpointer_get_node(&trans, &iter, + bucket, bp_offset, bp); + ret = PTR_ERR_OR_ZERO(b); + if (ret == -BCH_ERR_backpointer_to_overwritten_btree_node) + continue; + if (bch2_err_matches(ret, BCH_ERR_transaction_restart)) + continue; + if (ret) + goto err; + if (!b) + goto next; + + ret = bch2_btree_node_rewrite(&trans, &iter, b, 0); + bch2_trans_iter_exit(&trans, &iter); + + if (bch2_err_matches(ret, BCH_ERR_transaction_restart)) + continue; + if (ret) + goto err; + + if (ctxt->rate) + bch2_ratelimit_increment(ctxt->rate, + c->opts.btree_node_size >> 9); + atomic64_add(c->opts.btree_node_size >> 9, &ctxt->stats->sectors_seen); + atomic64_add(c->opts.btree_node_size >> 9, &ctxt->stats->sectors_moved); + } +next: + bp_offset++; + } + + trace_evacuate_bucket(c, &bucket, dirty_sectors, bucket_size, ret); + + if (IS_ENABLED(CONFIG_BCACHEFS_DEBUG) && gen >= 0) { + bch2_trans_unlock(&trans); + move_ctxt_wait_event(ctxt, NULL, list_empty(&ctxt->reads)); + closure_sync(&ctxt->cl); + if (!ctxt->write_error) + lockrestart_do(&trans, verify_bucket_evacuated(&trans, bucket, gen)); + } +err: + bch2_trans_exit(&trans); + bch2_bkey_buf_exit(&sk, c); + return ret; +} + +int bch2_evacuate_bucket(struct bch_fs *c, + struct bpos bucket, int gen, + struct data_update_opts data_opts, + struct bch_ratelimit *rate, + struct bch_move_stats *stats, + struct write_point_specifier wp, + bool wait_on_copygc) +{ + struct moving_context ctxt; + int ret; + + bch2_moving_ctxt_init(&ctxt, c, rate, stats, wp, wait_on_copygc); + ret = __bch2_evacuate_bucket(&ctxt, bucket, gen, data_opts); + bch2_moving_ctxt_exit(&ctxt); + + return ret; +} + typedef bool (*move_btree_pred)(struct bch_fs *, void *, struct btree *, struct bch_io_opts *, struct data_update_opts *); diff --git a/fs/bcachefs/move.h b/fs/bcachefs/move.h index 2eb6a15542e0..b14f679f6904 100644 --- a/fs/bcachefs/move.h +++ b/fs/bcachefs/move.h @@ -15,6 +15,7 @@ struct moving_context { struct bch_move_stats *stats; struct write_point_specifier wp; bool wait_on_copygc; + bool write_error; /* For waiting on outstanding reads and writes: */ struct closure cl; @@ -46,6 +47,15 @@ int bch2_move_data(struct bch_fs *, bool, move_pred_fn, void *); +int __bch2_evacuate_bucket(struct moving_context *, + struct bpos, int, + struct data_update_opts); +int bch2_evacuate_bucket(struct bch_fs *, struct bpos, int, + struct data_update_opts, + struct bch_ratelimit *, + struct bch_move_stats *, + struct write_point_specifier, + bool); int bch2_data_job(struct bch_fs *, struct bch_move_stats *, struct bch_ioctl_data); diff --git a/fs/bcachefs/movinggc.c b/fs/bcachefs/movinggc.c index a04e2330d0e6..b420b79edb36 100644 --- a/fs/bcachefs/movinggc.c +++ b/fs/bcachefs/movinggc.c @@ -31,79 +31,6 @@ #include <linux/sort.h> #include <linux/wait.h> -static int bucket_offset_cmp(const void *_l, const void *_r, size_t size) -{ - const struct copygc_heap_entry *l = _l; - const struct copygc_heap_entry *r = _r; - - return cmp_int(l->dev, r->dev) ?: - cmp_int(l->offset, r->offset); -} - -static bool copygc_pred(struct bch_fs *c, void *arg, - struct bkey_s_c k, - struct bch_io_opts *io_opts, - struct data_update_opts *data_opts) -{ - copygc_heap *h = &c->copygc_heap; - struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k); - const union bch_extent_entry *entry; - struct extent_ptr_decoded p = { 0 }; - unsigned i = 0; - - /* - * We need to use the journal reserve here, because - * - journal reclaim depends on btree key cache - * flushing to make forward progress, - * - which has to make forward progress when the - * journal is pre-reservation full, - * - and depends on allocation - meaning allocator and - * copygc - */ - - data_opts->rewrite_ptrs = 0; - data_opts->target = io_opts->background_target; - data_opts->extra_replicas = 0; - data_opts->btree_insert_flags = BTREE_INSERT_USE_RESERVE| - JOURNAL_WATERMARK_copygc; - - bkey_for_each_ptr_decode(k.k, ptrs, p, entry) { - struct bch_dev *ca = bch_dev_bkey_exists(c, p.ptr.dev); - struct copygc_heap_entry search = { - .dev = p.ptr.dev, - .offset = p.ptr.offset, - }; - ssize_t eytz; - - if (p.ptr.cached) - continue; - - eytz = eytzinger0_find_le(h->data, h->used, - sizeof(h->data[0]), - bucket_offset_cmp, &search); -#if 0 - /* eytzinger search verify code: */ - ssize_t j = -1, k; - - for (k = 0; k < h->used; k++) - if (h->data[k].offset <= ptr->offset && - (j < 0 || h->data[k].offset > h->data[j].offset)) - j = k; - - BUG_ON(i != j); -#endif - if (eytz >= 0 && - p.ptr.dev == h->data[eytz].dev && - p.ptr.offset < h->data[eytz].offset + ca->mi.bucket_size && - p.ptr.gen == h->data[eytz].gen) - data_opts->rewrite_ptrs |= 1U << i; - - i++; - } - - return data_opts->rewrite_ptrs != 0; -} - static inline int fragmentation_cmp(copygc_heap *heap, struct copygc_heap_entry l, struct copygc_heap_entry r) @@ -111,7 +38,7 @@ static inline int fragmentation_cmp(copygc_heap *heap, return cmp_int(l.fragmentation, r.fragmentation); } -static int walk_buckets_to_copygc(struct bch_fs *c) +static int find_buckets_to_copygc(struct bch_fs *c) { copygc_heap *h = &c->copygc_heap; struct btree_trans trans; @@ -121,6 +48,14 @@ static int walk_buckets_to_copygc(struct bch_fs *c) bch2_trans_init(&trans, c, 0, 0); + /* + * Find buckets with lowest sector counts, skipping completely + * empty buckets, by building a maxheap sorted by sector count, + * and repeatedly replacing the maximum element until all + * buckets have been visited. + */ + h->used = 0; + for_each_btree_key(&trans, iter, BTREE_ID_alloc, POS_MIN, BTREE_ITER_PREFETCH, k, ret) { struct bch_dev *ca = bch_dev_bkey_exists(c, iter.pos.inode); @@ -130,7 +65,8 @@ static int walk_buckets_to_copygc(struct bch_fs *c) a = bch2_alloc_to_v4(k, &a_convert); - if (a->data_type != BCH_DATA_user || + if ((a->data_type != BCH_DATA_btree && + a->data_type != BCH_DATA_user) || a->dirty_sectors >= ca->mi.bucket_size || bch2_bucket_is_open(c, iter.pos.inode, iter.pos.offset)) continue; @@ -142,7 +78,7 @@ static int walk_buckets_to_copygc(struct bch_fs *c) .fragmentation = div_u64((u64) a->dirty_sectors * (1ULL << 31), ca->mi.bucket_size), .sectors = a->dirty_sectors, - .offset = bucket_to_sector(ca, iter.pos.offset), + .bucket = iter.pos.offset, }; heap_add_or_replace(h, e, -fragmentation_cmp, NULL); @@ -153,77 +89,22 @@ static int walk_buckets_to_copygc(struct bch_fs *c) return ret; } -static int bucket_inorder_cmp(const void *_l, const void *_r) -{ - const struct copygc_heap_entry *l = _l; - const struct copygc_heap_entry *r = _r; - - return cmp_int(l->dev, r->dev) ?: cmp_int(l->offset, r->offset); -} - -static int check_copygc_was_done(struct bch_fs *c, - u64 *sectors_not_moved, - u64 *buckets_not_moved) -{ - copygc_heap *h = &c->copygc_heap; - struct btree_trans trans; - struct btree_iter iter; - struct bkey_s_c k; - struct bch_alloc_v4 a; - struct copygc_heap_entry *i; - int ret = 0; - - sort(h->data, h->used, sizeof(h->data[0]), bucket_inorder_cmp, NULL); - - bch2_trans_init(&trans, c, 0, 0); - bch2_trans_iter_init(&trans, &iter, BTREE_ID_alloc, POS_MIN, 0); - - for (i = h->data; i < h->data + h->used; i++) { - struct bch_dev *ca = bch_dev_bkey_exists(c, i->dev); - - bch2_btree_iter_set_pos(&iter, POS(i->dev, sector_to_bucket(ca, i->offset))); - - ret = lockrestart_do(&trans, - bkey_err(k = bch2_btree_iter_peek_slot(&iter))); - if (ret) - break; - - bch2_alloc_to_v4(k, &a); - - if (a.gen == i->gen && a.dirty_sectors) { - *sectors_not_moved += a.dirty_sectors; - *buckets_not_moved += 1; - } - } - bch2_trans_iter_exit(&trans, &iter); - - bch2_trans_exit(&trans); - return ret; -} - static int bch2_copygc(struct bch_fs *c) { copygc_heap *h = &c->copygc_heap; - struct copygc_heap_entry e, *i; + struct copygc_heap_entry e; struct bch_move_stats move_stats; - u64 sectors_to_move = 0, sectors_to_write = 0, sectors_not_moved = 0; - u64 sectors_reserved = 0; - u64 buckets_to_move, buckets_not_moved = 0; struct bch_dev *ca; unsigned dev_idx; size_t heap_size = 0; - int ret; + struct moving_context ctxt; + struct data_update_opts data_opts = { + .btree_insert_flags = BTREE_INSERT_USE_RESERVE|JOURNAL_WATERMARK_copygc, + }; + int ret = 0; bch2_move_stats_init(&move_stats, "copygc"); - /* - * Find buckets with lowest sector counts, skipping completely - * empty buckets, by building a maxheap sorted by sector count, - * and repeatedly replacing the maximum element until all - * buckets have been visited. - */ - h->used = 0; - for_each_rw_member(ca, c, dev_idx) heap_size += ca->mi.nbuckets >> 7; @@ -235,21 +116,7 @@ static int bch2_copygc(struct bch_fs *c) } } - for_each_rw_member(ca, c, dev_idx) { - struct bch_dev_usage usage = bch2_dev_usage_read(ca); - - u64 avail = max_t(s64, 0, - usage.d[BCH_DATA_free].buckets + - usage.d[BCH_DATA_need_discard].buckets - - ca->nr_open_buckets - - bch2_dev_buckets_reserved(ca, RESERVE_movinggc)); - - avail = min(avail, ca->mi.nbuckets >> 6); - - sectors_reserved += avail * ca->mi.bucket_size; - } - - ret = walk_buckets_to_copygc(c); + ret = find_buckets_to_copygc(c); if (ret) { bch2_fs_fatal_error(c, "error walking buckets to copygc!"); return ret; @@ -281,69 +148,26 @@ static int bch2_copygc(struct bch_fs *c) return 0; } - /* - * Our btree node allocations also come out of RESERVE_movingc: - */ - sectors_reserved = (sectors_reserved * 3) / 4; - if (!sectors_reserved) { - bch2_fs_fatal_error(c, "stuck, ran out of copygc reserve!"); - return -1; - } + heap_resort(h, fragmentation_cmp, NULL); - for (i = h->data; i < h->data + h->used; i++) { - sectors_to_move += i->sectors; - sectors_to_write += i->sectors * i->replicas; - } + bch2_moving_ctxt_init(&ctxt, c, NULL, &move_stats, + writepoint_ptr(&c->copygc_write_point), + false); - while (sectors_to_write > sectors_reserved) { + /* not correct w.r.t. device removal */ + while (h->used && !ret) { BUG_ON(!heap_pop(h, e, -fragmentation_cmp, NULL)); - sectors_to_write -= e.sectors * e.replicas; + ret = __bch2_evacuate_bucket(&ctxt, POS(e.dev, e.bucket), e.gen, + data_opts); } - buckets_to_move = h->used; + bch2_moving_ctxt_exit(&ctxt); - if (!buckets_to_move) { - bch_err_ratelimited(c, "copygc cannot run - sectors_reserved %llu!", - sectors_reserved); - return 0; - } - - eytzinger0_sort(h->data, h->used, - sizeof(h->data[0]), - bucket_offset_cmp, NULL); - - ret = bch2_move_data(c, - 0, POS_MIN, - BTREE_ID_NR, POS_MAX, - NULL, - &move_stats, - writepoint_ptr(&c->copygc_write_point), - false, - copygc_pred, NULL); if (ret < 0 && !bch2_err_matches(ret, EROFS)) bch_err(c, "error from bch2_move_data() in copygc: %s", bch2_err_str(ret)); - if (ret) - return ret; - - ret = check_copygc_was_done(c, §ors_not_moved, &buckets_not_moved); - if (ret) { - bch_err(c, "error %i from check_copygc_was_done()", ret); - return ret; - } - if (sectors_not_moved) - bch_warn_ratelimited(c, - "copygc finished but %llu/%llu sectors, %llu/%llu buckets not moved (move stats: moved %llu sectors, raced %llu keys, %llu sectors)", - sectors_not_moved, sectors_to_move, - buckets_not_moved, buckets_to_move, - atomic64_read(&move_stats.sectors_moved), - atomic64_read(&move_stats.keys_raced), - atomic64_read(&move_stats.sectors_raced)); - - trace_and_count(c, copygc, c, - atomic64_read(&move_stats.sectors_moved), sectors_not_moved, - buckets_to_move, buckets_not_moved); - return 0; + trace_and_count(c, copygc, c, atomic64_read(&move_stats.sectors_moved), 0, 0, 0); + return ret; } /* diff --git a/fs/bcachefs/trace.h b/fs/bcachefs/trace.h index 937fd132bfd2..fabee8302afa 100644 --- a/fs/bcachefs/trace.h +++ b/fs/bcachefs/trace.h @@ -717,6 +717,37 @@ TRACE_EVENT(move_data, __entry->sectors_moved, __entry->keys_moved) ); +TRACE_EVENT(evacuate_bucket, + TP_PROTO(struct bch_fs *c, struct bpos *bucket, + unsigned sectors, unsigned bucket_size, + int ret), + TP_ARGS(c, bucket, sectors, bucket_size, ret), + + TP_STRUCT__entry( + __field(dev_t, dev ) + __field(u64, member ) + __field(u64, bucket ) + __field(u32, sectors ) + __field(u32, bucket_size ) + __field(int, ret ) + ), + + TP_fast_assign( + __entry->dev = c->dev; + __entry->member = bucket->inode; + __entry->bucket = bucket->offset; + __entry->sectors = sectors; + __entry->bucket_size = bucket_size; + __entry->ret = ret; + ), + + TP_printk("%d,%d %llu:%llu sectors %u/%u ret %i", + MAJOR(__entry->dev), MINOR(__entry->dev), + __entry->member, __entry->bucket, + __entry->sectors, __entry->bucket_size, + __entry->ret) +); + TRACE_EVENT(copygc, TP_PROTO(struct bch_fs *c, u64 sectors_moved, u64 sectors_not_moved, |