diff options
author | Christy Lee <christylee@fb.com> | 2022-01-08 03:42:14 +0300 |
---|---|---|
committer | Andrii Nakryiko <andrii@kernel.org> | 2022-01-13 04:01:38 +0300 |
commit | 76acfce664ca6353ab1ab14003d3d1d8b139ad78 (patch) | |
tree | 814b02e59e77f99a9a5c4739b860e75ff29ef286 /samples | |
parent | 0991f6a38f576aa9a5e34713e23c998a3310d4d0 (diff) | |
download | linux-76acfce664ca6353ab1ab14003d3d1d8b139ad78.tar.xz |
samples/bpf: Stop using bpf_map__def() API
libbpf bpf_map__def() API is being deprecated, replace samples/bpf's
usage with the appropriate getters and setters.
Signed-off-by: Christy Lee <christylee@fb.com>
Signed-off-by: Andrii Nakryiko <andrii@kernel.org>
Link: https://lore.kernel.org/bpf/20220108004218.355761-2-christylee@fb.com
Diffstat (limited to 'samples')
-rw-r--r-- | samples/bpf/xdp_rxq_info_user.c | 10 |
1 files changed, 5 insertions, 5 deletions
diff --git a/samples/bpf/xdp_rxq_info_user.c b/samples/bpf/xdp_rxq_info_user.c index 74a2926eba08..4033f345aa29 100644 --- a/samples/bpf/xdp_rxq_info_user.c +++ b/samples/bpf/xdp_rxq_info_user.c @@ -209,7 +209,7 @@ static struct datarec *alloc_record_per_cpu(void) static struct record *alloc_record_per_rxq(void) { - unsigned int nr_rxqs = bpf_map__def(rx_queue_index_map)->max_entries; + unsigned int nr_rxqs = bpf_map__max_entries(rx_queue_index_map); struct record *array; array = calloc(nr_rxqs, sizeof(struct record)); @@ -222,7 +222,7 @@ static struct record *alloc_record_per_rxq(void) static struct stats_record *alloc_stats_record(void) { - unsigned int nr_rxqs = bpf_map__def(rx_queue_index_map)->max_entries; + unsigned int nr_rxqs = bpf_map__max_entries(rx_queue_index_map); struct stats_record *rec; int i; @@ -241,7 +241,7 @@ static struct stats_record *alloc_stats_record(void) static void free_stats_record(struct stats_record *r) { - unsigned int nr_rxqs = bpf_map__def(rx_queue_index_map)->max_entries; + unsigned int nr_rxqs = bpf_map__max_entries(rx_queue_index_map); int i; for (i = 0; i < nr_rxqs; i++) @@ -289,7 +289,7 @@ static void stats_collect(struct stats_record *rec) map_collect_percpu(fd, 0, &rec->stats); fd = bpf_map__fd(rx_queue_index_map); - max_rxqs = bpf_map__def(rx_queue_index_map)->max_entries; + max_rxqs = bpf_map__max_entries(rx_queue_index_map); for (i = 0; i < max_rxqs; i++) map_collect_percpu(fd, i, &rec->rxq[i]); } @@ -335,7 +335,7 @@ static void stats_print(struct stats_record *stats_rec, struct stats_record *stats_prev, int action, __u32 cfg_opt) { - unsigned int nr_rxqs = bpf_map__def(rx_queue_index_map)->max_entries; + unsigned int nr_rxqs = bpf_map__max_entries(rx_queue_index_map); unsigned int nr_cpus = bpf_num_possible_cpus(); double pps = 0, err = 0; struct record *rec, *prev; |