diff options
Diffstat (limited to 'tools/testing/selftests/bpf')
94 files changed, 5265 insertions, 1156 deletions
diff --git a/tools/testing/selftests/bpf/DENYLIST.aarch64 b/tools/testing/selftests/bpf/DENYLIST.aarch64 index 0a6837f97c32..08adc805878b 100644 --- a/tools/testing/selftests/bpf/DENYLIST.aarch64 +++ b/tools/testing/selftests/bpf/DENYLIST.aarch64 @@ -1,33 +1,6 @@ -bloom_filter_map # libbpf: prog 'check_bloom': failed to attach: ERROR: strerror_r(-524)=22 -bpf_cookie/lsm -bpf_cookie/multi_kprobe_attach_api -bpf_cookie/multi_kprobe_link_api -bpf_cookie/trampoline -bpf_loop/check_callback_fn_stop # link unexpected error: -524 -bpf_loop/check_invalid_flags -bpf_loop/check_nested_calls -bpf_loop/check_non_constant_callback -bpf_loop/check_nr_loops -bpf_loop/check_null_callback_ctx -bpf_loop/check_stack -bpf_mod_race # bpf_mod_kfunc_race__attach unexpected error: -524 (errno 524) -bpf_tcp_ca/dctcp_fallback -btf_dump/btf_dump: var_data # find type id unexpected find type id: actual -2 < expected 0 -cgroup_hierarchical_stats # attach unexpected error: -524 (errno 524) -d_path/basic # setup attach failed: -524 -deny_namespace # attach unexpected error: -524 (errno 524) -fentry_fexit # fentry_attach unexpected error: -1 (errno 524) -fentry_test # fentry_attach unexpected error: -1 (errno 524) -fexit_sleep # fexit_attach fexit attach failed: -1 -fexit_stress # fexit attach unexpected fexit attach: actual -524 < expected 0 -fexit_test # fexit_attach unexpected error: -1 (errno 524) -get_func_args_test # get_func_args_test__attach unexpected error: -524 (errno 524) (trampoline) -get_func_ip_test # get_func_ip_test__attach unexpected error: -524 (errno 524) (trampoline) -htab_update/reenter_update -kfree_skb # attach fentry unexpected error: -524 (trampoline) -kfunc_call/subprog # extern (var ksym) 'bpf_prog_active': not found in kernel BTF -kfunc_call/subprog_lskel # skel unexpected error: -2 -kfunc_dynptr_param/dynptr_data_null # libbpf: prog 'dynptr_data_null': failed to attach: ERROR: strerror_r(-524)=22 +bpf_cookie/multi_kprobe_attach_api # kprobe_multi_link_api_subtest:FAIL:fentry_raw_skel_load unexpected error: -3 +bpf_cookie/multi_kprobe_link_api # kprobe_multi_link_api_subtest:FAIL:fentry_raw_skel_load unexpected error: -3 +fexit_sleep # The test never returns. The remaining tests cannot start. kprobe_multi_bench_attach # bpf_program__attach_kprobe_multi_opts unexpected error: -95 kprobe_multi_test/attach_api_addrs # bpf_program__attach_kprobe_multi_opts unexpected error: -95 kprobe_multi_test/attach_api_pattern # bpf_program__attach_kprobe_multi_opts unexpected error: -95 @@ -35,51 +8,5 @@ kprobe_multi_test/attach_api_syms # bpf_program__attach_kprobe_mu kprobe_multi_test/bench_attach # bpf_program__attach_kprobe_multi_opts unexpected error: -95 kprobe_multi_test/link_api_addrs # link_fd unexpected link_fd: actual -95 < expected 0 kprobe_multi_test/link_api_syms # link_fd unexpected link_fd: actual -95 < expected 0 -kprobe_multi_test/skel_api # kprobe_multi__attach unexpected error: -524 (errno 524) -ksyms_module/libbpf # 'bpf_testmod_ksym_percpu': not found in kernel BTF -ksyms_module/lskel # test_ksyms_module_lskel__open_and_load unexpected error: -2 -libbpf_get_fd_by_id_opts # test_libbpf_get_fd_by_id_opts__attach unexpected error: -524 (errno 524) -linked_list -lookup_key # test_lookup_key__attach unexpected error: -524 (errno 524) -lru_bug # lru_bug__attach unexpected error: -524 (errno 524) -modify_return # modify_return__attach failed unexpected error: -524 (errno 524) -module_attach # skel_attach skeleton attach failed: -524 -module_fentry_shadow # bpf_link_create unexpected bpf_link_create: actual -524 < expected 0 -mptcp/base # run_test mptcp unexpected error: -524 (errno 524) -netcnt # packets unexpected packets: actual 10001 != expected 10000 -rcu_read_lock # failed to attach: ERROR: strerror_r(-524)=22 -recursion # skel_attach unexpected error: -524 (errno 524) -ringbuf # skel_attach skeleton attachment failed: -1 -setget_sockopt # attach_cgroup unexpected error: -524 -sk_storage_tracing # test_sk_storage_tracing__attach unexpected error: -524 (errno 524) -skc_to_unix_sock # could not attach BPF object unexpected error: -524 (errno 524) -socket_cookie # prog_attach unexpected error: -524 -stacktrace_build_id # compare_stack_ips stackmap vs. stack_amap err -1 errno 2 -task_local_storage/exit_creds # skel_attach unexpected error: -524 (errno 524) -task_local_storage/recursion # skel_attach unexpected error: -524 (errno 524) -test_bprm_opts # attach attach failed: -524 -test_ima # attach attach failed: -524 -test_local_storage # attach lsm attach failed: -524 -test_lsm # test_lsm_first_attach unexpected error: -524 (errno 524) -test_overhead # attach_fentry unexpected error: -524 -timer # timer unexpected error: -524 (errno 524) -timer_crash # timer_crash__attach unexpected error: -524 (errno 524) -timer_mim # timer_mim unexpected error: -524 (errno 524) -trace_printk # trace_printk__attach unexpected error: -1 (errno 524) -trace_vprintk # trace_vprintk__attach unexpected error: -1 (errno 524) -tracing_struct # tracing_struct__attach unexpected error: -524 (errno 524) -trampoline_count # attach_prog unexpected error: -524 -unpriv_bpf_disabled # skel_attach unexpected error: -524 (errno 524) -user_ringbuf/test_user_ringbuf_post_misaligned # misaligned_skel unexpected error: -524 (errno 524) -user_ringbuf/test_user_ringbuf_post_producer_wrong_offset -user_ringbuf/test_user_ringbuf_post_larger_than_ringbuf_sz -user_ringbuf/test_user_ringbuf_basic # ringbuf_basic_skel unexpected error: -524 (errno 524) -user_ringbuf/test_user_ringbuf_sample_full_ring_buffer -user_ringbuf/test_user_ringbuf_post_alignment_autoadjust -user_ringbuf/test_user_ringbuf_overfill -user_ringbuf/test_user_ringbuf_discards_properly_ignored -user_ringbuf/test_user_ringbuf_loop -user_ringbuf/test_user_ringbuf_msg_protocol -user_ringbuf/test_user_ringbuf_blocking_reserve -verify_pkcs7_sig # test_verify_pkcs7_sig__attach unexpected error: -524 (errno 524) -vmlinux # skel_attach skeleton attach failed: -524 +kprobe_multi_test/skel_api # libbpf: failed to load BPF skeleton 'kprobe_multi': -3 +module_attach # prog 'kprobe_multi': failed to auto-attach: -95 diff --git a/tools/testing/selftests/bpf/DENYLIST.s390x b/tools/testing/selftests/bpf/DENYLIST.s390x index c7463f3ec3c0..5061d9e24c16 100644 --- a/tools/testing/selftests/bpf/DENYLIST.s390x +++ b/tools/testing/selftests/bpf/DENYLIST.s390x @@ -26,3 +26,4 @@ user_ringbuf # failed to find kernel BTF type ID of verif_stats # trace_vprintk__open_and_load unexpected error: -9 (?) xdp_bonding # failed to auto-attach program 'trace_on_entry': -524 (trampoline) xdp_metadata # JIT does not support calling kernel function (kfunc) +test_task_under_cgroup # JIT does not support calling kernel function (kfunc) diff --git a/tools/testing/selftests/bpf/Makefile b/tools/testing/selftests/bpf/Makefile index 28d2c77262be..538df8fb8c42 100644 --- a/tools/testing/selftests/bpf/Makefile +++ b/tools/testing/selftests/bpf/Makefile @@ -88,8 +88,7 @@ TEST_GEN_PROGS_EXTENDED = test_sock_addr test_skb_cgroup_id_user \ xskxceiver xdp_redirect_multi xdp_synproxy veristat xdp_hw_metadata \ xdp_features -TEST_CUSTOM_PROGS = $(OUTPUT)/urandom_read $(OUTPUT)/sign-file -TEST_GEN_FILES += liburandom_read.so +TEST_GEN_FILES += liburandom_read.so urandom_read sign-file # Emit succinct information message describing current building step # $1 - generic step name (e.g., CC, LINK, etc); diff --git a/tools/testing/selftests/bpf/bench.c b/tools/testing/selftests/bpf/bench.c index d9c080ac1796..41fe5a82b88b 100644 --- a/tools/testing/selftests/bpf/bench.c +++ b/tools/testing/selftests/bpf/bench.c @@ -17,7 +17,7 @@ struct env env = { .duration_sec = 5, .affinity = false, .quiet = false, - .consumer_cnt = 1, + .consumer_cnt = 0, .producer_cnt = 1, }; @@ -441,12 +441,14 @@ static void setup_timer() static void set_thread_affinity(pthread_t thread, int cpu) { cpu_set_t cpuset; + int err; CPU_ZERO(&cpuset); CPU_SET(cpu, &cpuset); - if (pthread_setaffinity_np(thread, sizeof(cpuset), &cpuset)) { + err = pthread_setaffinity_np(thread, sizeof(cpuset), &cpuset); + if (err) { fprintf(stderr, "setting affinity to CPU #%d failed: %d\n", - cpu, errno); + cpu, -err); exit(1); } } @@ -467,7 +469,7 @@ static int next_cpu(struct cpu_set *cpu_set) exit(1); } - return cpu_set->next_cpu++; + return cpu_set->next_cpu++ % env.nr_cpus; } static struct bench_state { @@ -605,7 +607,7 @@ static void setup_benchmark(void) bench->consumer_thread, (void *)(long)i); if (err) { fprintf(stderr, "failed to create consumer thread #%d: %d\n", - i, -errno); + i, -err); exit(1); } if (env.affinity) @@ -624,7 +626,7 @@ static void setup_benchmark(void) bench->producer_thread, (void *)(long)i); if (err) { fprintf(stderr, "failed to create producer thread #%d: %d\n", - i, -errno); + i, -err); exit(1); } if (env.affinity) @@ -657,6 +659,7 @@ static void collect_measurements(long delta_ns) { int main(int argc, char **argv) { + env.nr_cpus = get_nprocs(); parse_cmdline_args_init(argc, argv); if (env.list) { diff --git a/tools/testing/selftests/bpf/bench.h b/tools/testing/selftests/bpf/bench.h index 402729c6a3ac..7ff32be3d730 100644 --- a/tools/testing/selftests/bpf/bench.h +++ b/tools/testing/selftests/bpf/bench.h @@ -27,6 +27,7 @@ struct env { bool quiet; int consumer_cnt; int producer_cnt; + int nr_cpus; struct cpu_set prod_cpus; struct cpu_set cons_cpus; }; diff --git a/tools/testing/selftests/bpf/benchs/bench_bloom_filter_map.c b/tools/testing/selftests/bpf/benchs/bench_bloom_filter_map.c index 7c8ccc108313..e289dd1a14ee 100644 --- a/tools/testing/selftests/bpf/benchs/bench_bloom_filter_map.c +++ b/tools/testing/selftests/bpf/benchs/bench_bloom_filter_map.c @@ -107,9 +107,9 @@ const struct argp bench_bloom_map_argp = { static void validate(void) { - if (env.consumer_cnt != 1) { + if (env.consumer_cnt != 0) { fprintf(stderr, - "The bloom filter benchmarks do not support multi-consumer use\n"); + "The bloom filter benchmarks do not support consumer\n"); exit(1); } } @@ -421,18 +421,12 @@ static void measure(struct bench_res *res) last_false_hits = total_false_hits; } -static void *consumer(void *input) -{ - return NULL; -} - const struct bench bench_bloom_lookup = { .name = "bloom-lookup", .argp = &bench_bloom_map_argp, .validate = validate, .setup = bloom_lookup_setup, .producer_thread = producer, - .consumer_thread = consumer, .measure = measure, .report_progress = hits_drops_report_progress, .report_final = hits_drops_report_final, @@ -444,7 +438,6 @@ const struct bench bench_bloom_update = { .validate = validate, .setup = bloom_update_setup, .producer_thread = producer, - .consumer_thread = consumer, .measure = measure, .report_progress = hits_drops_report_progress, .report_final = hits_drops_report_final, @@ -456,7 +449,6 @@ const struct bench bench_bloom_false_positive = { .validate = validate, .setup = false_positive_setup, .producer_thread = producer, - .consumer_thread = consumer, .measure = measure, .report_progress = false_hits_report_progress, .report_final = false_hits_report_final, @@ -468,7 +460,6 @@ const struct bench bench_hashmap_without_bloom = { .validate = validate, .setup = hashmap_no_bloom_setup, .producer_thread = producer, - .consumer_thread = consumer, .measure = measure, .report_progress = hits_drops_report_progress, .report_final = hits_drops_report_final, @@ -480,7 +471,6 @@ const struct bench bench_hashmap_with_bloom = { .validate = validate, .setup = hashmap_with_bloom_setup, .producer_thread = producer, - .consumer_thread = consumer, .measure = measure, .report_progress = hits_drops_report_progress, .report_final = hits_drops_report_final, diff --git a/tools/testing/selftests/bpf/benchs/bench_bpf_hashmap_full_update.c b/tools/testing/selftests/bpf/benchs/bench_bpf_hashmap_full_update.c index 75abe8137b6c..ee1dc12c5e5e 100644 --- a/tools/testing/selftests/bpf/benchs/bench_bpf_hashmap_full_update.c +++ b/tools/testing/selftests/bpf/benchs/bench_bpf_hashmap_full_update.c @@ -14,8 +14,8 @@ static struct ctx { static void validate(void) { - if (env.consumer_cnt != 1) { - fprintf(stderr, "benchmark doesn't support multi-consumer!\n"); + if (env.consumer_cnt != 0) { + fprintf(stderr, "benchmark doesn't support consumer!\n"); exit(1); } } @@ -30,11 +30,6 @@ static void *producer(void *input) return NULL; } -static void *consumer(void *input) -{ - return NULL; -} - static void measure(struct bench_res *res) { } @@ -88,7 +83,6 @@ const struct bench bench_bpf_hashmap_full_update = { .validate = validate, .setup = setup, .producer_thread = producer, - .consumer_thread = consumer, .measure = measure, .report_progress = NULL, .report_final = hashmap_report_final, diff --git a/tools/testing/selftests/bpf/benchs/bench_bpf_hashmap_lookup.c b/tools/testing/selftests/bpf/benchs/bench_bpf_hashmap_lookup.c index 8dbb02f75cff..279ff1b8b5b2 100644 --- a/tools/testing/selftests/bpf/benchs/bench_bpf_hashmap_lookup.c +++ b/tools/testing/selftests/bpf/benchs/bench_bpf_hashmap_lookup.c @@ -113,8 +113,8 @@ const struct argp bench_hashmap_lookup_argp = { static void validate(void) { - if (env.consumer_cnt != 1) { - fprintf(stderr, "benchmark doesn't support multi-consumer!\n"); + if (env.consumer_cnt != 0) { + fprintf(stderr, "benchmark doesn't support consumer!\n"); exit(1); } @@ -134,11 +134,6 @@ static void *producer(void *input) return NULL; } -static void *consumer(void *input) -{ - return NULL; -} - static void measure(struct bench_res *res) { } @@ -276,7 +271,6 @@ const struct bench bench_bpf_hashmap_lookup = { .validate = validate, .setup = setup, .producer_thread = producer, - .consumer_thread = consumer, .measure = measure, .report_progress = NULL, .report_final = hashmap_report_final, diff --git a/tools/testing/selftests/bpf/benchs/bench_bpf_loop.c b/tools/testing/selftests/bpf/benchs/bench_bpf_loop.c index d8a0394e10b1..a705cfb2bccc 100644 --- a/tools/testing/selftests/bpf/benchs/bench_bpf_loop.c +++ b/tools/testing/selftests/bpf/benchs/bench_bpf_loop.c @@ -47,8 +47,8 @@ const struct argp bench_bpf_loop_argp = { static void validate(void) { - if (env.consumer_cnt != 1) { - fprintf(stderr, "benchmark doesn't support multi-consumer!\n"); + if (env.consumer_cnt != 0) { + fprintf(stderr, "benchmark doesn't support consumer!\n"); exit(1); } } @@ -62,11 +62,6 @@ static void *producer(void *input) return NULL; } -static void *consumer(void *input) -{ - return NULL; -} - static void measure(struct bench_res *res) { res->hits = atomic_swap(&ctx.skel->bss->hits, 0); @@ -99,7 +94,6 @@ const struct bench bench_bpf_loop = { .validate = validate, .setup = setup, .producer_thread = producer, - .consumer_thread = consumer, .measure = measure, .report_progress = ops_report_progress, .report_final = ops_report_final, diff --git a/tools/testing/selftests/bpf/benchs/bench_count.c b/tools/testing/selftests/bpf/benchs/bench_count.c index 078972ce208e..ba89ed3936b7 100644 --- a/tools/testing/selftests/bpf/benchs/bench_count.c +++ b/tools/testing/selftests/bpf/benchs/bench_count.c @@ -18,11 +18,6 @@ static void *count_global_producer(void *input) return NULL; } -static void *count_global_consumer(void *input) -{ - return NULL; -} - static void count_global_measure(struct bench_res *res) { struct count_global_ctx *ctx = &count_global_ctx; @@ -40,7 +35,7 @@ static void count_local_setup(void) { struct count_local_ctx *ctx = &count_local_ctx; - ctx->hits = calloc(env.consumer_cnt, sizeof(*ctx->hits)); + ctx->hits = calloc(env.producer_cnt, sizeof(*ctx->hits)); if (!ctx->hits) exit(1); } @@ -56,11 +51,6 @@ static void *count_local_producer(void *input) return NULL; } -static void *count_local_consumer(void *input) -{ - return NULL; -} - static void count_local_measure(struct bench_res *res) { struct count_local_ctx *ctx = &count_local_ctx; @@ -74,7 +64,6 @@ static void count_local_measure(struct bench_res *res) const struct bench bench_count_global = { .name = "count-global", .producer_thread = count_global_producer, - .consumer_thread = count_global_consumer, .measure = count_global_measure, .report_progress = hits_drops_report_progress, .report_final = hits_drops_report_final, @@ -84,7 +73,6 @@ const struct bench bench_count_local = { .name = "count-local", .setup = count_local_setup, .producer_thread = count_local_producer, - .consumer_thread = count_local_consumer, .measure = count_local_measure, .report_progress = hits_drops_report_progress, .report_final = hits_drops_report_final, diff --git a/tools/testing/selftests/bpf/benchs/bench_local_storage.c b/tools/testing/selftests/bpf/benchs/bench_local_storage.c index d4b2817306d4..452499428ceb 100644 --- a/tools/testing/selftests/bpf/benchs/bench_local_storage.c +++ b/tools/testing/selftests/bpf/benchs/bench_local_storage.c @@ -74,8 +74,8 @@ static void validate(void) fprintf(stderr, "benchmark doesn't support multi-producer!\n"); exit(1); } - if (env.consumer_cnt != 1) { - fprintf(stderr, "benchmark doesn't support multi-consumer!\n"); + if (env.consumer_cnt != 0) { + fprintf(stderr, "benchmark doesn't support consumer!\n"); exit(1); } @@ -230,11 +230,6 @@ static inline void trigger_bpf_program(void) syscall(__NR_getpgid); } -static void *consumer(void *input) -{ - return NULL; -} - static void *producer(void *input) { while (true) @@ -259,7 +254,6 @@ const struct bench bench_local_storage_cache_seq_get = { .validate = validate, .setup = local_storage_cache_get_setup, .producer_thread = producer, - .consumer_thread = consumer, .measure = measure, .report_progress = local_storage_report_progress, .report_final = local_storage_report_final, @@ -271,7 +265,6 @@ const struct bench bench_local_storage_cache_interleaved_get = { .validate = validate, .setup = local_storage_cache_get_interleaved_setup, .producer_thread = producer, - .consumer_thread = consumer, .measure = measure, .report_progress = local_storage_report_progress, .report_final = local_storage_report_final, @@ -283,7 +276,6 @@ const struct bench bench_local_storage_cache_hashmap_control = { .validate = validate, .setup = hashmap_setup, .producer_thread = producer, - .consumer_thread = consumer, .measure = measure, .report_progress = local_storage_report_progress, .report_final = local_storage_report_final, diff --git a/tools/testing/selftests/bpf/benchs/bench_local_storage_create.c b/tools/testing/selftests/bpf/benchs/bench_local_storage_create.c index cff703f90e95..b36de42ee4d9 100644 --- a/tools/testing/selftests/bpf/benchs/bench_local_storage_create.c +++ b/tools/testing/selftests/bpf/benchs/bench_local_storage_create.c @@ -71,7 +71,7 @@ const struct argp bench_local_storage_create_argp = { static void validate(void) { - if (env.consumer_cnt > 1) { + if (env.consumer_cnt != 0) { fprintf(stderr, "local-storage-create benchmark does not need consumer\n"); exit(1); @@ -143,11 +143,6 @@ static void measure(struct bench_res *res) res->drops = atomic_swap(&skel->bss->kmalloc_cnts, 0); } -static void *consumer(void *input) -{ - return NULL; -} - static void *sk_producer(void *input) { struct thread *t = &threads[(long)(input)]; @@ -257,7 +252,6 @@ const struct bench bench_local_storage_create = { .validate = validate, .setup = setup, .producer_thread = producer, - .consumer_thread = consumer, .measure = measure, .report_progress = report_progress, .report_final = report_final, diff --git a/tools/testing/selftests/bpf/benchs/bench_local_storage_rcu_tasks_trace.c b/tools/testing/selftests/bpf/benchs/bench_local_storage_rcu_tasks_trace.c index d5eb5587f2aa..edf0b00418c1 100644 --- a/tools/testing/selftests/bpf/benchs/bench_local_storage_rcu_tasks_trace.c +++ b/tools/testing/selftests/bpf/benchs/bench_local_storage_rcu_tasks_trace.c @@ -72,8 +72,8 @@ static void validate(void) fprintf(stderr, "benchmark doesn't support multi-producer!\n"); exit(1); } - if (env.consumer_cnt != 1) { - fprintf(stderr, "benchmark doesn't support multi-consumer!\n"); + if (env.consumer_cnt != 0) { + fprintf(stderr, "benchmark doesn't support consumer!\n"); exit(1); } @@ -197,11 +197,6 @@ static void measure(struct bench_res *res) ctx.prev_kthread_stime = ticks; } -static void *consumer(void *input) -{ - return NULL; -} - static void *producer(void *input) { while (true) @@ -262,7 +257,6 @@ const struct bench bench_local_storage_tasks_trace = { .validate = validate, .setup = local_storage_tasks_trace_setup, .producer_thread = producer, - .consumer_thread = consumer, .measure = measure, .report_progress = report_progress, .report_final = report_final, diff --git a/tools/testing/selftests/bpf/benchs/bench_rename.c b/tools/testing/selftests/bpf/benchs/bench_rename.c index 3c203b6d6a6e..bf66893c7a33 100644 --- a/tools/testing/selftests/bpf/benchs/bench_rename.c +++ b/tools/testing/selftests/bpf/benchs/bench_rename.c @@ -17,8 +17,8 @@ static void validate(void) fprintf(stderr, "benchmark doesn't support multi-producer!\n"); exit(1); } - if (env.consumer_cnt != 1) { - fprintf(stderr, "benchmark doesn't support multi-consumer!\n"); + if (env.consumer_cnt != 0) { + fprintf(stderr, "benchmark doesn't support consumer!\n"); exit(1); } } @@ -106,17 +106,11 @@ static void setup_fexit(void) attach_bpf(ctx.skel->progs.prog5); } -static void *consumer(void *input) -{ - return NULL; -} - const struct bench bench_rename_base = { .name = "rename-base", .validate = validate, .setup = setup_base, .producer_thread = producer, - .consumer_thread = consumer, .measure = measure, .report_progress = hits_drops_report_progress, .report_final = hits_drops_report_final, @@ -127,7 +121,6 @@ const struct bench bench_rename_kprobe = { .validate = validate, .setup = setup_kprobe, .producer_thread = producer, - .consumer_thread = consumer, .measure = measure, .report_progress = hits_drops_report_progress, .report_final = hits_drops_report_final, @@ -138,7 +131,6 @@ const struct bench bench_rename_kretprobe = { .validate = validate, .setup = setup_kretprobe, .producer_thread = producer, - .consumer_thread = consumer, .measure = measure, .report_progress = hits_drops_report_progress, .report_final = hits_drops_report_final, @@ -149,7 +141,6 @@ const struct bench bench_rename_rawtp = { .validate = validate, .setup = setup_rawtp, .producer_thread = producer, - .consumer_thread = consumer, .measure = measure, .report_progress = hits_drops_report_progress, .report_final = hits_drops_report_final, @@ -160,7 +151,6 @@ const struct bench bench_rename_fentry = { .validate = validate, .setup = setup_fentry, .producer_thread = producer, - .consumer_thread = consumer, .measure = measure, .report_progress = hits_drops_report_progress, .report_final = hits_drops_report_final, @@ -171,7 +161,6 @@ const struct bench bench_rename_fexit = { .validate = validate, .setup = setup_fexit, .producer_thread = producer, - .consumer_thread = consumer, .measure = measure, .report_progress = hits_drops_report_progress, .report_final = hits_drops_report_final, diff --git a/tools/testing/selftests/bpf/benchs/bench_ringbufs.c b/tools/testing/selftests/bpf/benchs/bench_ringbufs.c index fc91fdac4faa..3ca14ad36607 100644 --- a/tools/testing/selftests/bpf/benchs/bench_ringbufs.c +++ b/tools/testing/selftests/bpf/benchs/bench_ringbufs.c @@ -96,7 +96,7 @@ static inline void bufs_trigger_batch(void) static void bufs_validate(void) { if (env.consumer_cnt != 1) { - fprintf(stderr, "rb-libbpf benchmark doesn't support multi-consumer!\n"); + fprintf(stderr, "rb-libbpf benchmark needs one consumer!\n"); exit(1); } diff --git a/tools/testing/selftests/bpf/benchs/bench_strncmp.c b/tools/testing/selftests/bpf/benchs/bench_strncmp.c index d3fad2ba6916..a5e1428fd7a0 100644 --- a/tools/testing/selftests/bpf/benchs/bench_strncmp.c +++ b/tools/testing/selftests/bpf/benchs/bench_strncmp.c @@ -50,8 +50,8 @@ const struct argp bench_strncmp_argp = { static void strncmp_validate(void) { - if (env.consumer_cnt != 1) { - fprintf(stderr, "strncmp benchmark doesn't support multi-consumer!\n"); + if (env.consumer_cnt != 0) { + fprintf(stderr, "strncmp benchmark doesn't support consumer!\n"); exit(1); } } @@ -128,11 +128,6 @@ static void *strncmp_producer(void *ctx) return NULL; } -static void *strncmp_consumer(void *ctx) -{ - return NULL; -} - static void strncmp_measure(struct bench_res *res) { res->hits = atomic_swap(&ctx.skel->bss->hits, 0); @@ -144,7 +139,6 @@ const struct bench bench_strncmp_no_helper = { .validate = strncmp_validate, .setup = strncmp_no_helper_setup, .producer_thread = strncmp_producer, - .consumer_thread = strncmp_consumer, .measure = strncmp_measure, .report_progress = hits_drops_report_progress, .report_final = hits_drops_report_final, @@ -156,7 +150,6 @@ const struct bench bench_strncmp_helper = { .validate = strncmp_validate, .setup = strncmp_helper_setup, .producer_thread = strncmp_producer, - .consumer_thread = strncmp_consumer, .measure = strncmp_measure, .report_progress = hits_drops_report_progress, .report_final = hits_drops_report_final, diff --git a/tools/testing/selftests/bpf/benchs/bench_trigger.c b/tools/testing/selftests/bpf/benchs/bench_trigger.c index 0c481de2833d..dbd362771d6a 100644 --- a/tools/testing/selftests/bpf/benchs/bench_trigger.c +++ b/tools/testing/selftests/bpf/benchs/bench_trigger.c @@ -13,8 +13,8 @@ static struct counter base_hits; static void trigger_validate(void) { - if (env.consumer_cnt != 1) { - fprintf(stderr, "benchmark doesn't support multi-consumer!\n"); + if (env.consumer_cnt != 0) { + fprintf(stderr, "benchmark doesn't support consumer!\n"); exit(1); } } @@ -103,11 +103,6 @@ static void trigger_fmodret_setup(void) attach_bpf(ctx.skel->progs.bench_trigger_fmodret); } -static void *trigger_consumer(void *input) -{ - return NULL; -} - /* make sure call is not inlined and not avoided by compiler, so __weak and * inline asm volatile in the body of the function * @@ -205,7 +200,6 @@ const struct bench bench_trig_base = { .name = "trig-base", .validate = trigger_validate, .producer_thread = trigger_base_producer, - .consumer_thread = trigger_consumer, .measure = trigger_base_measure, .report_progress = hits_drops_report_progress, .report_final = hits_drops_report_final, @@ -216,7 +210,6 @@ const struct bench bench_trig_tp = { .validate = trigger_validate, .setup = trigger_tp_setup, .producer_thread = trigger_producer, - .consumer_thread = trigger_consumer, .measure = trigger_measure, .report_progress = hits_drops_report_progress, .report_final = hits_drops_report_final, @@ -227,7 +220,6 @@ const struct bench bench_trig_rawtp = { .validate = trigger_validate, .setup = trigger_rawtp_setup, .producer_thread = trigger_producer, - .consumer_thread = trigger_consumer, .measure = trigger_measure, .report_progress = hits_drops_report_progress, .report_final = hits_drops_report_final, @@ -238,7 +230,6 @@ const struct bench bench_trig_kprobe = { .validate = trigger_validate, .setup = trigger_kprobe_setup, .producer_thread = trigger_producer, - .consumer_thread = trigger_consumer, .measure = trigger_measure, .report_progress = hits_drops_report_progress, .report_final = hits_drops_report_final, @@ -249,7 +240,6 @@ const struct bench bench_trig_fentry = { .validate = trigger_validate, .setup = trigger_fentry_setup, .producer_thread = trigger_producer, - .consumer_thread = trigger_consumer, .measure = trigger_measure, .report_progress = hits_drops_report_progress, .report_final = hits_drops_report_final, @@ -260,7 +250,6 @@ const struct bench bench_trig_fentry_sleep = { .validate = trigger_validate, .setup = trigger_fentry_sleep_setup, .producer_thread = trigger_producer, - .consumer_thread = trigger_consumer, .measure = trigger_measure, .report_progress = hits_drops_report_progress, .report_final = hits_drops_report_final, @@ -271,7 +260,6 @@ const struct bench bench_trig_fmodret = { .validate = trigger_validate, .setup = trigger_fmodret_setup, .producer_thread = trigger_producer, - .consumer_thread = trigger_consumer, .measure = trigger_measure, .report_progress = hits_drops_report_progress, .report_final = hits_drops_report_final, @@ -281,7 +269,6 @@ const struct bench bench_trig_uprobe_base = { .name = "trig-uprobe-base", .setup = NULL, /* no uprobe/uretprobe is attached */ .producer_thread = uprobe_base_producer, - .consumer_thread = trigger_consumer, .measure = trigger_base_measure, .report_progress = hits_drops_report_progress, .report_final = hits_drops_report_final, @@ -291,7 +278,6 @@ const struct bench bench_trig_uprobe_with_nop = { .name = "trig-uprobe-with-nop", .setup = uprobe_setup_with_nop, .producer_thread = uprobe_producer_with_nop, - .consumer_thread = trigger_consumer, .measure = trigger_measure, .report_progress = hits_drops_report_progress, .report_final = hits_drops_report_final, @@ -301,7 +287,6 @@ const struct bench bench_trig_uretprobe_with_nop = { .name = "trig-uretprobe-with-nop", .setup = uretprobe_setup_with_nop, .producer_thread = uprobe_producer_with_nop, - .consumer_thread = trigger_consumer, .measure = trigger_measure, .report_progress = hits_drops_report_progress, .report_final = hits_drops_report_final, @@ -311,7 +296,6 @@ const struct bench bench_trig_uprobe_without_nop = { .name = "trig-uprobe-without-nop", .setup = uprobe_setup_without_nop, .producer_thread = uprobe_producer_without_nop, - .consumer_thread = trigger_consumer, .measure = trigger_measure, .report_progress = hits_drops_report_progress, .report_final = hits_drops_report_final, @@ -321,7 +305,6 @@ const struct bench bench_trig_uretprobe_without_nop = { .name = "trig-uretprobe-without-nop", .setup = uretprobe_setup_without_nop, .producer_thread = uprobe_producer_without_nop, - .consumer_thread = trigger_consumer, .measure = trigger_measure, .report_progress = hits_drops_report_progress, .report_final = hits_drops_report_final, diff --git a/tools/testing/selftests/bpf/benchs/run_bench_ringbufs.sh b/tools/testing/selftests/bpf/benchs/run_bench_ringbufs.sh index ada028aa9007..91e3567962ff 100755 --- a/tools/testing/selftests/bpf/benchs/run_bench_ringbufs.sh +++ b/tools/testing/selftests/bpf/benchs/run_bench_ringbufs.sh @@ -4,46 +4,48 @@ source ./benchs/run_common.sh set -eufo pipefail +RUN_RB_BENCH="$RUN_BENCH -c1" + header "Single-producer, parallel producer" for b in rb-libbpf rb-custom pb-libbpf pb-custom; do - summarize $b "$($RUN_BENCH $b)" + summarize $b "$($RUN_RB_BENCH $b)" done header "Single-producer, parallel producer, sampled notification" for b in rb-libbpf rb-custom pb-libbpf pb-custom; do - summarize $b "$($RUN_BENCH --rb-sampled $b)" + summarize $b "$($RUN_RB_BENCH --rb-sampled $b)" done header "Single-producer, back-to-back mode" for b in rb-libbpf rb-custom pb-libbpf pb-custom; do - summarize $b "$($RUN_BENCH --rb-b2b $b)" - summarize $b-sampled "$($RUN_BENCH --rb-sampled --rb-b2b $b)" + summarize $b "$($RUN_RB_BENCH --rb-b2b $b)" + summarize $b-sampled "$($RUN_RB_BENCH --rb-sampled --rb-b2b $b)" done header "Ringbuf back-to-back, effect of sample rate" for b in 1 5 10 25 50 100 250 500 1000 2000 3000; do - summarize "rb-sampled-$b" "$($RUN_BENCH --rb-b2b --rb-batch-cnt $b --rb-sampled --rb-sample-rate $b rb-custom)" + summarize "rb-sampled-$b" "$($RUN_RB_BENCH --rb-b2b --rb-batch-cnt $b --rb-sampled --rb-sample-rate $b rb-custom)" done header "Perfbuf back-to-back, effect of sample rate" for b in 1 5 10 25 50 100 250 500 1000 2000 3000; do - summarize "pb-sampled-$b" "$($RUN_BENCH --rb-b2b --rb-batch-cnt $b --rb-sampled --rb-sample-rate $b pb-custom)" + summarize "pb-sampled-$b" "$($RUN_RB_BENCH --rb-b2b --rb-batch-cnt $b --rb-sampled --rb-sample-rate $b pb-custom)" done header "Ringbuf back-to-back, reserve+commit vs output" -summarize "reserve" "$($RUN_BENCH --rb-b2b rb-custom)" -summarize "output" "$($RUN_BENCH --rb-b2b --rb-use-output rb-custom)" +summarize "reserve" "$($RUN_RB_BENCH --rb-b2b rb-custom)" +summarize "output" "$($RUN_RB_BENCH --rb-b2b --rb-use-output rb-custom)" header "Ringbuf sampled, reserve+commit vs output" -summarize "reserve-sampled" "$($RUN_BENCH --rb-sampled rb-custom)" -summarize "output-sampled" "$($RUN_BENCH --rb-sampled --rb-use-output rb-custom)" +summarize "reserve-sampled" "$($RUN_RB_BENCH --rb-sampled rb-custom)" +summarize "output-sampled" "$($RUN_RB_BENCH --rb-sampled --rb-use-output rb-custom)" header "Single-producer, consumer/producer competing on the same CPU, low batch count" for b in rb-libbpf rb-custom pb-libbpf pb-custom; do - summarize $b "$($RUN_BENCH --rb-batch-cnt 1 --rb-sample-rate 1 --prod-affinity 0 --cons-affinity 0 $b)" + summarize $b "$($RUN_RB_BENCH --rb-batch-cnt 1 --rb-sample-rate 1 --prod-affinity 0 --cons-affinity 0 $b)" done header "Ringbuf, multi-producer contention" for b in 1 2 3 4 8 12 16 20 24 28 32 36 40 44 48 52; do - summarize "rb-libbpf nr_prod $b" "$($RUN_BENCH -p$b --rb-batch-cnt 50 rb-libbpf)" + summarize "rb-libbpf nr_prod $b" "$($RUN_RB_BENCH -p$b --rb-batch-cnt 50 rb-libbpf)" done diff --git a/tools/testing/selftests/bpf/bpf_kfuncs.h b/tools/testing/selftests/bpf/bpf_kfuncs.h index 8c993ec8ceea..642dda0e758a 100644 --- a/tools/testing/selftests/bpf/bpf_kfuncs.h +++ b/tools/testing/selftests/bpf/bpf_kfuncs.h @@ -35,4 +35,10 @@ extern void *bpf_dynptr_slice(const struct bpf_dynptr *ptr, __u32 offset, extern void *bpf_dynptr_slice_rdwr(const struct bpf_dynptr *ptr, __u32 offset, void *buffer, __u32 buffer__szk) __ksym; +extern int bpf_dynptr_adjust(const struct bpf_dynptr *ptr, __u32 start, __u32 end) __ksym; +extern bool bpf_dynptr_is_null(const struct bpf_dynptr *ptr) __ksym; +extern bool bpf_dynptr_is_rdonly(const struct bpf_dynptr *ptr) __ksym; +extern __u32 bpf_dynptr_size(const struct bpf_dynptr *ptr) __ksym; +extern int bpf_dynptr_clone(const struct bpf_dynptr *ptr, struct bpf_dynptr *clone__init) __ksym; + #endif diff --git a/tools/testing/selftests/bpf/bpf_testmod/bpf_testmod.c b/tools/testing/selftests/bpf/bpf_testmod/bpf_testmod.c index 52785ba671e6..aaf6ef1201c7 100644 --- a/tools/testing/selftests/bpf/bpf_testmod/bpf_testmod.c +++ b/tools/testing/selftests/bpf/bpf_testmod/bpf_testmod.c @@ -9,6 +9,7 @@ #include <linux/sysfs.h> #include <linux/tracepoint.h> #include "bpf_testmod.h" +#include "bpf_testmod_kfunc.h" #define CREATE_TRACE_POINTS #include "bpf_testmod-events.h" @@ -190,8 +191,6 @@ noinline int bpf_testmod_fentry_test3(char a, int b, u64 c) return a + b + c; } -__diag_pop(); - int bpf_testmod_fentry_ok; noinline ssize_t @@ -272,6 +271,14 @@ bpf_testmod_test_write(struct file *file, struct kobject *kobj, EXPORT_SYMBOL(bpf_testmod_test_write); ALLOW_ERROR_INJECTION(bpf_testmod_test_write, ERRNO); +noinline int bpf_fentry_shadow_test(int a) +{ + return a + 2; +} +EXPORT_SYMBOL_GPL(bpf_fentry_shadow_test); + +__diag_pop(); + static struct bin_attribute bin_attr_bpf_testmod_file __ro_after_init = { .attr = { .name = "bpf_testmod", .mode = 0666, }, .read = bpf_testmod_test_read, @@ -289,8 +296,171 @@ static const struct btf_kfunc_id_set bpf_testmod_common_kfunc_set = { .set = &bpf_testmod_common_kfunc_ids, }; +__bpf_kfunc u64 bpf_kfunc_call_test1(struct sock *sk, u32 a, u64 b, u32 c, u64 d) +{ + return a + b + c + d; +} + +__bpf_kfunc int bpf_kfunc_call_test2(struct sock *sk, u32 a, u32 b) +{ + return a + b; +} + +__bpf_kfunc struct sock *bpf_kfunc_call_test3(struct sock *sk) +{ + return sk; +} + +__bpf_kfunc long noinline bpf_kfunc_call_test4(signed char a, short b, int c, long d) +{ + /* Provoke the compiler to assume that the caller has sign-extended a, + * b and c on platforms where this is required (e.g. s390x). + */ + return (long)a + (long)b + (long)c + d; +} + +static struct prog_test_ref_kfunc prog_test_struct = { + .a = 42, + .b = 108, + .next = &prog_test_struct, + .cnt = REFCOUNT_INIT(1), +}; + +__bpf_kfunc struct prog_test_ref_kfunc * +bpf_kfunc_call_test_acquire(unsigned long *scalar_ptr) +{ + refcount_inc(&prog_test_struct.cnt); + return &prog_test_struct; +} + +__bpf_kfunc void bpf_kfunc_call_test_offset(struct prog_test_ref_kfunc *p) +{ + WARN_ON_ONCE(1); +} + +__bpf_kfunc struct prog_test_member * +bpf_kfunc_call_memb_acquire(void) +{ + WARN_ON_ONCE(1); + return NULL; +} + +__bpf_kfunc void bpf_kfunc_call_memb1_release(struct prog_test_member1 *p) +{ + WARN_ON_ONCE(1); +} + +static int *__bpf_kfunc_call_test_get_mem(struct prog_test_ref_kfunc *p, const int size) +{ + if (size > 2 * sizeof(int)) + return NULL; + + return (int *)p; +} + +__bpf_kfunc int *bpf_kfunc_call_test_get_rdwr_mem(struct prog_test_ref_kfunc *p, + const int rdwr_buf_size) +{ + return __bpf_kfunc_call_test_get_mem(p, rdwr_buf_size); +} + +__bpf_kfunc int *bpf_kfunc_call_test_get_rdonly_mem(struct prog_test_ref_kfunc *p, + const int rdonly_buf_size) +{ + return __bpf_kfunc_call_test_get_mem(p, rdonly_buf_size); +} + +/* the next 2 ones can't be really used for testing expect to ensure + * that the verifier rejects the call. + * Acquire functions must return struct pointers, so these ones are + * failing. + */ +__bpf_kfunc int *bpf_kfunc_call_test_acq_rdonly_mem(struct prog_test_ref_kfunc *p, + const int rdonly_buf_size) +{ + return __bpf_kfunc_call_test_get_mem(p, rdonly_buf_size); +} + +__bpf_kfunc void bpf_kfunc_call_int_mem_release(int *p) +{ +} + +__bpf_kfunc void bpf_kfunc_call_test_pass_ctx(struct __sk_buff *skb) +{ +} + +__bpf_kfunc void bpf_kfunc_call_test_pass1(struct prog_test_pass1 *p) +{ +} + +__bpf_kfunc void bpf_kfunc_call_test_pass2(struct prog_test_pass2 *p) +{ +} + +__bpf_kfunc void bpf_kfunc_call_test_fail1(struct prog_test_fail1 *p) +{ +} + +__bpf_kfunc void bpf_kfunc_call_test_fail2(struct prog_test_fail2 *p) +{ +} + +__bpf_kfunc void bpf_kfunc_call_test_fail3(struct prog_test_fail3 *p) +{ +} + +__bpf_kfunc void bpf_kfunc_call_test_mem_len_pass1(void *mem, int mem__sz) +{ +} + +__bpf_kfunc void bpf_kfunc_call_test_mem_len_fail1(void *mem, int len) +{ +} + +__bpf_kfunc void bpf_kfunc_call_test_mem_len_fail2(u64 *mem, int len) +{ +} + +__bpf_kfunc void bpf_kfunc_call_test_ref(struct prog_test_ref_kfunc *p) +{ + /* p != NULL, but p->cnt could be 0 */ +} + +__bpf_kfunc void bpf_kfunc_call_test_destructive(void) +{ +} + +__bpf_kfunc static u32 bpf_kfunc_call_test_static_unused_arg(u32 arg, u32 unused) +{ + return arg; +} + BTF_SET8_START(bpf_testmod_check_kfunc_ids) BTF_ID_FLAGS(func, bpf_testmod_test_mod_kfunc) +BTF_ID_FLAGS(func, bpf_kfunc_call_test1) +BTF_ID_FLAGS(func, bpf_kfunc_call_test2) +BTF_ID_FLAGS(func, bpf_kfunc_call_test3) +BTF_ID_FLAGS(func, bpf_kfunc_call_test4) +BTF_ID_FLAGS(func, bpf_kfunc_call_test_mem_len_pass1) +BTF_ID_FLAGS(func, bpf_kfunc_call_test_mem_len_fail1) +BTF_ID_FLAGS(func, bpf_kfunc_call_test_mem_len_fail2) +BTF_ID_FLAGS(func, bpf_kfunc_call_test_acquire, KF_ACQUIRE | KF_RET_NULL) +BTF_ID_FLAGS(func, bpf_kfunc_call_memb_acquire, KF_ACQUIRE | KF_RET_NULL) +BTF_ID_FLAGS(func, bpf_kfunc_call_memb1_release, KF_RELEASE) +BTF_ID_FLAGS(func, bpf_kfunc_call_test_get_rdwr_mem, KF_RET_NULL) +BTF_ID_FLAGS(func, bpf_kfunc_call_test_get_rdonly_mem, KF_RET_NULL) +BTF_ID_FLAGS(func, bpf_kfunc_call_test_acq_rdonly_mem, KF_ACQUIRE | KF_RET_NULL) +BTF_ID_FLAGS(func, bpf_kfunc_call_int_mem_release, KF_RELEASE) +BTF_ID_FLAGS(func, bpf_kfunc_call_test_pass_ctx) +BTF_ID_FLAGS(func, bpf_kfunc_call_test_pass1) +BTF_ID_FLAGS(func, bpf_kfunc_call_test_pass2) +BTF_ID_FLAGS(func, bpf_kfunc_call_test_fail1) +BTF_ID_FLAGS(func, bpf_kfunc_call_test_fail2) +BTF_ID_FLAGS(func, bpf_kfunc_call_test_fail3) +BTF_ID_FLAGS(func, bpf_kfunc_call_test_ref, KF_TRUSTED_ARGS | KF_RCU) +BTF_ID_FLAGS(func, bpf_kfunc_call_test_destructive, KF_DESTRUCTIVE) +BTF_ID_FLAGS(func, bpf_kfunc_call_test_static_unused_arg) +BTF_ID_FLAGS(func, bpf_kfunc_call_test_offset) BTF_SET8_END(bpf_testmod_check_kfunc_ids) static const struct btf_kfunc_id_set bpf_testmod_kfunc_set = { @@ -298,12 +468,6 @@ static const struct btf_kfunc_id_set bpf_testmod_kfunc_set = { .set = &bpf_testmod_check_kfunc_ids, }; -noinline int bpf_fentry_shadow_test(int a) -{ - return a + 2; -} -EXPORT_SYMBOL_GPL(bpf_fentry_shadow_test); - extern int bpf_fentry_test1(int a); static int bpf_testmod_init(void) @@ -312,6 +476,8 @@ static int bpf_testmod_init(void) ret = register_btf_kfunc_id_set(BPF_PROG_TYPE_UNSPEC, &bpf_testmod_common_kfunc_set); ret = ret ?: register_btf_kfunc_id_set(BPF_PROG_TYPE_SCHED_CLS, &bpf_testmod_kfunc_set); + ret = ret ?: register_btf_kfunc_id_set(BPF_PROG_TYPE_TRACING, &bpf_testmod_kfunc_set); + ret = ret ?: register_btf_kfunc_id_set(BPF_PROG_TYPE_SYSCALL, &bpf_testmod_kfunc_set); if (ret < 0) return ret; if (bpf_fentry_test1(0) < 0) diff --git a/tools/testing/selftests/bpf/bpf_testmod/bpf_testmod_kfunc.h b/tools/testing/selftests/bpf/bpf_testmod/bpf_testmod_kfunc.h new file mode 100644 index 000000000000..f5c5b1375c24 --- /dev/null +++ b/tools/testing/selftests/bpf/bpf_testmod/bpf_testmod_kfunc.h @@ -0,0 +1,107 @@ +/* SPDX-License-Identifier: GPL-2.0 */ + +#ifndef _BPF_TESTMOD_KFUNC_H +#define _BPF_TESTMOD_KFUNC_H + +#ifndef __KERNEL__ +#include <vmlinux.h> +#include <bpf/bpf_helpers.h> +#else +#define __ksym +struct prog_test_member1 { + int a; +}; + +struct prog_test_member { + struct prog_test_member1 m; + int c; +}; + +struct prog_test_ref_kfunc { + int a; + int b; + struct prog_test_member memb; + struct prog_test_ref_kfunc *next; + refcount_t cnt; +}; +#endif + +struct prog_test_pass1 { + int x0; + struct { + int x1; + struct { + int x2; + struct { + int x3; + }; + }; + }; +}; + +struct prog_test_pass2 { + int len; + short arr1[4]; + struct { + char arr2[4]; + unsigned long arr3[8]; + } x; +}; + +struct prog_test_fail1 { + void *p; + int x; +}; + +struct prog_test_fail2 { + int x8; + struct prog_test_pass1 x; +}; + +struct prog_test_fail3 { + int len; + char arr1[2]; + char arr2[]; +}; + +struct prog_test_ref_kfunc * +bpf_kfunc_call_test_acquire(unsigned long *scalar_ptr) __ksym; +void bpf_kfunc_call_test_release(struct prog_test_ref_kfunc *p) __ksym; +void bpf_kfunc_call_test_ref(struct prog_test_ref_kfunc *p) __ksym; + +void bpf_kfunc_call_test_mem_len_pass1(void *mem, int len) __ksym; +int *bpf_kfunc_call_test_get_rdwr_mem(struct prog_test_ref_kfunc *p, const int rdwr_buf_size) __ksym; +int *bpf_kfunc_call_test_get_rdonly_mem(struct prog_test_ref_kfunc *p, const int rdonly_buf_size) __ksym; +int *bpf_kfunc_call_test_acq_rdonly_mem(struct prog_test_ref_kfunc *p, const int rdonly_buf_size) __ksym; +void bpf_kfunc_call_int_mem_release(int *p) __ksym; + +/* The bpf_kfunc_call_test_static_unused_arg is defined as static, + * but bpf program compilation needs to see it as global symbol. + */ +#ifndef __KERNEL__ +u32 bpf_kfunc_call_test_static_unused_arg(u32 arg, u32 unused) __ksym; +#endif + +void bpf_testmod_test_mod_kfunc(int i) __ksym; + +__u64 bpf_kfunc_call_test1(struct sock *sk, __u32 a, __u64 b, + __u32 c, __u64 d) __ksym; +int bpf_kfunc_call_test2(struct sock *sk, __u32 a, __u32 b) __ksym; +struct sock *bpf_kfunc_call_test3(struct sock *sk) __ksym; +long bpf_kfunc_call_test4(signed char a, short b, int c, long d) __ksym; + +void bpf_kfunc_call_test_pass_ctx(struct __sk_buff *skb) __ksym; +void bpf_kfunc_call_test_pass1(struct prog_test_pass1 *p) __ksym; +void bpf_kfunc_call_test_pass2(struct prog_test_pass2 *p) __ksym; +void bpf_kfunc_call_test_mem_len_fail2(__u64 *mem, int len) __ksym; + +void bpf_kfunc_call_test_destructive(void) __ksym; + +void bpf_kfunc_call_test_offset(struct prog_test_ref_kfunc *p); +struct prog_test_member *bpf_kfunc_call_memb_acquire(void); +void bpf_kfunc_call_memb1_release(struct prog_test_member1 *p); +void bpf_kfunc_call_test_fail1(struct prog_test_fail1 *p); +void bpf_kfunc_call_test_fail2(struct prog_test_fail2 *p); +void bpf_kfunc_call_test_fail3(struct prog_test_fail3 *p); +void bpf_kfunc_call_test_mem_len_fail1(void *mem, int len); +#endif /* _BPF_TESTMOD_KFUNC_H */ diff --git a/tools/testing/selftests/bpf/config b/tools/testing/selftests/bpf/config index 63cd4ab70171..3b350bc31343 100644 --- a/tools/testing/selftests/bpf/config +++ b/tools/testing/selftests/bpf/config @@ -13,6 +13,9 @@ CONFIG_CGROUP_BPF=y CONFIG_CRYPTO_HMAC=y CONFIG_CRYPTO_SHA256=y CONFIG_CRYPTO_USER_API_HASH=y +CONFIG_DEBUG_INFO=y +CONFIG_DEBUG_INFO_BTF=y +CONFIG_DEBUG_INFO_DWARF4=y CONFIG_DYNAMIC_FTRACE=y CONFIG_FPROBE=y CONFIG_FTRACE_SYSCALLS=y @@ -60,6 +63,7 @@ CONFIG_NET_SCH_INGRESS=y CONFIG_NET_SCHED=y CONFIG_NETDEVSIM=y CONFIG_NETFILTER=y +CONFIG_NETFILTER_ADVANCED=y CONFIG_NETFILTER_SYNPROXY=y CONFIG_NETFILTER_XT_CONNMARK=y CONFIG_NETFILTER_XT_MATCH_STATE=y diff --git a/tools/testing/selftests/bpf/network_helpers.c b/tools/testing/selftests/bpf/network_helpers.c index 596caa176582..a105c0cd008a 100644 --- a/tools/testing/selftests/bpf/network_helpers.c +++ b/tools/testing/selftests/bpf/network_helpers.c @@ -427,3 +427,26 @@ void close_netns(struct nstoken *token) close(token->orig_netns_fd); free(token); } + +int get_socket_local_port(int sock_fd) +{ + struct sockaddr_storage addr; + socklen_t addrlen = sizeof(addr); + int err; + + err = getsockname(sock_fd, (struct sockaddr *)&addr, &addrlen); + if (err < 0) + return err; + + if (addr.ss_family == AF_INET) { + struct sockaddr_in *sin = (struct sockaddr_in *)&addr; + + return sin->sin_port; + } else if (addr.ss_family == AF_INET6) { + struct sockaddr_in6 *sin = (struct sockaddr_in6 *)&addr; + + return sin->sin6_port; + } + + return -1; +} diff --git a/tools/testing/selftests/bpf/network_helpers.h b/tools/testing/selftests/bpf/network_helpers.h index f882c691b790..694185644da6 100644 --- a/tools/testing/selftests/bpf/network_helpers.h +++ b/tools/testing/selftests/bpf/network_helpers.h @@ -56,6 +56,7 @@ int fastopen_connect(int server_fd, const char *data, unsigned int data_len, int make_sockaddr(int family, const char *addr_str, __u16 port, struct sockaddr_storage *addr, socklen_t *len); char *ping_command(int family); +int get_socket_local_port(int sock_fd); struct nstoken; /** diff --git a/tools/testing/selftests/bpf/prog_tests/arg_parsing.c b/tools/testing/selftests/bpf/prog_tests/arg_parsing.c index b17bfa0e0aac..bb143de68875 100644 --- a/tools/testing/selftests/bpf/prog_tests/arg_parsing.c +++ b/tools/testing/selftests/bpf/prog_tests/arg_parsing.c @@ -96,12 +96,80 @@ static void test_parse_test_list(void) goto error; ASSERT_OK(strcmp("*bpf_cookie*", set.tests[0].name), "test name"); ASSERT_OK(strcmp("*trace*", set.tests[0].subtests[0]), "subtest name"); + free_test_filter_set(&set); + + ASSERT_OK(parse_test_list("t/subtest1,t/subtest2", &set, true), + "parsing"); + if (!ASSERT_EQ(set.cnt, 1, "count of test filters")) + goto error; + if (!ASSERT_OK_PTR(set.tests, "test filters initialized")) + goto error; + if (!ASSERT_EQ(set.tests[0].subtest_cnt, 2, "subtest filters count")) + goto error; + ASSERT_OK(strcmp("t", set.tests[0].name), "test name"); + ASSERT_OK(strcmp("subtest1", set.tests[0].subtests[0]), "subtest name"); + ASSERT_OK(strcmp("subtest2", set.tests[0].subtests[1]), "subtest name"); error: free_test_filter_set(&set); } +static void test_parse_test_list_file(void) +{ + struct test_filter_set set; + char tmpfile[80]; + FILE *fp; + int fd; + + snprintf(tmpfile, sizeof(tmpfile), "/tmp/bpf_arg_parsing_test.XXXXXX"); + fd = mkstemp(tmpfile); + if (!ASSERT_GE(fd, 0, "create tmp")) + return; + + fp = fdopen(fd, "w"); + if (!ASSERT_NEQ(fp, NULL, "fdopen tmp")) { + close(fd); + goto out_remove; + } + + fprintf(fp, "# comment\n"); + fprintf(fp, " test_with_spaces \n"); + fprintf(fp, "testA/subtest # comment\n"); + fprintf(fp, "testB#comment with no space\n"); + fprintf(fp, "testB # duplicate\n"); + fprintf(fp, "testA/subtest # subtest duplicate\n"); + fprintf(fp, "testA/subtest2\n"); + fprintf(fp, "testC_no_eof_newline"); + fflush(fp); + + if (!ASSERT_OK(ferror(fp), "prepare tmp")) + goto out_fclose; + + init_test_filter_set(&set); + + ASSERT_OK(parse_test_list_file(tmpfile, &set, true), "parse file"); + + ASSERT_EQ(set.cnt, 4, "test count"); + ASSERT_OK(strcmp("test_with_spaces", set.tests[0].name), "test 0 name"); + ASSERT_EQ(set.tests[0].subtest_cnt, 0, "test 0 subtest count"); + ASSERT_OK(strcmp("testA", set.tests[1].name), "test 1 name"); + ASSERT_EQ(set.tests[1].subtest_cnt, 2, "test 1 subtest count"); + ASSERT_OK(strcmp("subtest", set.tests[1].subtests[0]), "test 1 subtest 0"); + ASSERT_OK(strcmp("subtest2", set.tests[1].subtests[1]), "test 1 subtest 1"); + ASSERT_OK(strcmp("testB", set.tests[2].name), "test 2 name"); + ASSERT_OK(strcmp("testC_no_eof_newline", set.tests[3].name), "test 3 name"); + + free_test_filter_set(&set); + +out_fclose: + fclose(fp); +out_remove: + remove(tmpfile); +} + void test_arg_parsing(void) { if (test__start_subtest("test_parse_test_list")) test_parse_test_list(); + if (test__start_subtest("test_parse_test_list_file")) + test_parse_test_list_file(); } diff --git a/tools/testing/selftests/bpf/prog_tests/bpf_mod_race.c b/tools/testing/selftests/bpf/prog_tests/bpf_mod_race.c index a4d0cc9d3367..fe2c502e5089 100644 --- a/tools/testing/selftests/bpf/prog_tests/bpf_mod_race.c +++ b/tools/testing/selftests/bpf/prog_tests/bpf_mod_race.c @@ -11,6 +11,7 @@ #include "ksym_race.skel.h" #include "bpf_mod_race.skel.h" #include "kfunc_call_race.skel.h" +#include "testing_helpers.h" /* This test crafts a race between btf_try_get_module and do_init_module, and * checks whether btf_try_get_module handles the invocation for a well-formed @@ -44,35 +45,10 @@ enum bpf_test_state { static _Atomic enum bpf_test_state state = _TS_INVALID; -static int sys_finit_module(int fd, const char *param_values, int flags) -{ - return syscall(__NR_finit_module, fd, param_values, flags); -} - -static int sys_delete_module(const char *name, unsigned int flags) -{ - return syscall(__NR_delete_module, name, flags); -} - -static int load_module(const char *mod) -{ - int ret, fd; - - fd = open("bpf_testmod.ko", O_RDONLY); - if (fd < 0) - return fd; - - ret = sys_finit_module(fd, "", 0); - close(fd); - if (ret < 0) - return ret; - return 0; -} - static void *load_module_thread(void *p) { - if (!ASSERT_NEQ(load_module("bpf_testmod.ko"), 0, "load_module_thread must fail")) + if (!ASSERT_NEQ(load_bpf_testmod(false), 0, "load_module_thread must fail")) atomic_store(&state, TS_MODULE_LOAD); else atomic_store(&state, TS_MODULE_LOAD_FAIL); @@ -124,7 +100,7 @@ static void test_bpf_mod_race_config(const struct test_config *config) if (!ASSERT_NEQ(fault_addr, MAP_FAILED, "mmap for uffd registration")) return; - if (!ASSERT_OK(sys_delete_module("bpf_testmod", 0), "unload bpf_testmod")) + if (!ASSERT_OK(unload_bpf_testmod(false), "unload bpf_testmod")) goto end_mmap; skel = bpf_mod_race__open(); @@ -202,8 +178,8 @@ end_destroy: bpf_mod_race__destroy(skel); ASSERT_OK(kern_sync_rcu(), "kern_sync_rcu"); end_module: - sys_delete_module("bpf_testmod", 0); - ASSERT_OK(load_module("bpf_testmod.ko"), "restore bpf_testmod"); + unload_bpf_testmod(false); + ASSERT_OK(load_bpf_testmod(false), "restore bpf_testmod"); end_mmap: munmap(fault_addr, 4096); atomic_store(&state, _TS_INVALID); diff --git a/tools/testing/selftests/bpf/prog_tests/bpf_obj_pinning.c b/tools/testing/selftests/bpf/prog_tests/bpf_obj_pinning.c new file mode 100644 index 000000000000..31f1e815f671 --- /dev/null +++ b/tools/testing/selftests/bpf/prog_tests/bpf_obj_pinning.c @@ -0,0 +1,268 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (c) 2023 Meta Platforms, Inc. and affiliates. */ +#define _GNU_SOURCE +#include <test_progs.h> +#include <bpf/btf.h> +#include <fcntl.h> +#include <unistd.h> +#include <linux/unistd.h> +#include <linux/mount.h> +#include <sys/syscall.h> + +static inline int sys_fsopen(const char *fsname, unsigned flags) +{ + return syscall(__NR_fsopen, fsname, flags); +} + +static inline int sys_fsconfig(int fs_fd, unsigned cmd, const char *key, const void *val, int aux) +{ + return syscall(__NR_fsconfig, fs_fd, cmd, key, val, aux); +} + +static inline int sys_fsmount(int fs_fd, unsigned flags, unsigned ms_flags) +{ + return syscall(__NR_fsmount, fs_fd, flags, ms_flags); +} + +__attribute__((unused)) +static inline int sys_move_mount(int from_dfd, const char *from_path, + int to_dfd, const char *to_path, + unsigned int ms_flags) +{ + return syscall(__NR_move_mount, from_dfd, from_path, to_dfd, to_path, ms_flags); +} + +static void bpf_obj_pinning_detached(void) +{ + LIBBPF_OPTS(bpf_obj_pin_opts, pin_opts); + LIBBPF_OPTS(bpf_obj_get_opts, get_opts); + int fs_fd = -1, mnt_fd = -1; + int map_fd = -1, map_fd2 = -1; + int zero = 0, src_value, dst_value, err; + const char *map_name = "fsmount_map"; + + /* A bunch of below UAPI calls are constructed based on reading: + * https://brauner.io/2023/02/28/mounting-into-mount-namespaces.html + */ + + /* create VFS context */ + fs_fd = sys_fsopen("bpf", 0); + if (!ASSERT_GE(fs_fd, 0, "fs_fd")) + goto cleanup; + + /* instantiate FS object */ + err = sys_fsconfig(fs_fd, FSCONFIG_CMD_CREATE, NULL, NULL, 0); + if (!ASSERT_OK(err, "fs_create")) + goto cleanup; + + /* create O_PATH fd for detached mount */ + mnt_fd = sys_fsmount(fs_fd, 0, 0); + if (!ASSERT_GE(mnt_fd, 0, "mnt_fd")) + goto cleanup; + + /* If we wanted to expose detached mount in the file system, we'd do + * something like below. But the whole point is that we actually don't + * even have to expose BPF FS in the file system to be able to work + * (pin/get objects) with it. + * + * err = sys_move_mount(mnt_fd, "", -EBADF, mnt_path, MOVE_MOUNT_F_EMPTY_PATH); + * if (!ASSERT_OK(err, "move_mount")) + * goto cleanup; + */ + + /* create BPF map to pin */ + map_fd = bpf_map_create(BPF_MAP_TYPE_ARRAY, map_name, 4, 4, 1, NULL); + if (!ASSERT_GE(map_fd, 0, "map_fd")) + goto cleanup; + + /* pin BPF map into detached BPF FS through mnt_fd */ + pin_opts.file_flags = BPF_F_PATH_FD; + pin_opts.path_fd = mnt_fd; + err = bpf_obj_pin_opts(map_fd, map_name, &pin_opts); + if (!ASSERT_OK(err, "map_pin")) + goto cleanup; + + /* get BPF map from detached BPF FS through mnt_fd */ + get_opts.file_flags = BPF_F_PATH_FD; + get_opts.path_fd = mnt_fd; + map_fd2 = bpf_obj_get_opts(map_name, &get_opts); + if (!ASSERT_GE(map_fd2, 0, "map_get")) + goto cleanup; + + /* update map through one FD */ + src_value = 0xcafebeef; + err = bpf_map_update_elem(map_fd, &zero, &src_value, 0); + ASSERT_OK(err, "map_update"); + + /* check values written/read through different FDs do match */ + dst_value = 0; + err = bpf_map_lookup_elem(map_fd2, &zero, &dst_value); + ASSERT_OK(err, "map_lookup"); + ASSERT_EQ(dst_value, src_value, "map_value_eq1"); + ASSERT_EQ(dst_value, 0xcafebeef, "map_value_eq2"); + +cleanup: + if (map_fd >= 0) + ASSERT_OK(close(map_fd), "close_map_fd"); + if (map_fd2 >= 0) + ASSERT_OK(close(map_fd2), "close_map_fd2"); + if (fs_fd >= 0) + ASSERT_OK(close(fs_fd), "close_fs_fd"); + if (mnt_fd >= 0) + ASSERT_OK(close(mnt_fd), "close_mnt_fd"); +} + +enum path_kind +{ + PATH_STR_ABS, + PATH_STR_REL, + PATH_FD_REL, +}; + +static void validate_pin(int map_fd, const char *map_name, int src_value, + enum path_kind path_kind) +{ + LIBBPF_OPTS(bpf_obj_pin_opts, pin_opts); + char abs_path[PATH_MAX], old_cwd[PATH_MAX]; + const char *pin_path = NULL; + int zero = 0, dst_value, map_fd2, err; + + snprintf(abs_path, sizeof(abs_path), "/sys/fs/bpf/%s", map_name); + old_cwd[0] = '\0'; + + switch (path_kind) { + case PATH_STR_ABS: + /* absolute path */ + pin_path = abs_path; + break; + case PATH_STR_REL: + /* cwd + relative path */ + ASSERT_OK_PTR(getcwd(old_cwd, sizeof(old_cwd)), "getcwd"); + ASSERT_OK(chdir("/sys/fs/bpf"), "chdir"); + pin_path = map_name; + break; + case PATH_FD_REL: + /* dir fd + relative path */ + pin_opts.file_flags = BPF_F_PATH_FD; + pin_opts.path_fd = open("/sys/fs/bpf", O_PATH); + ASSERT_GE(pin_opts.path_fd, 0, "path_fd"); + pin_path = map_name; + break; + } + + /* pin BPF map using specified path definition */ + err = bpf_obj_pin_opts(map_fd, pin_path, &pin_opts); + ASSERT_OK(err, "obj_pin"); + + /* cleanup */ + if (pin_opts.path_fd >= 0) + close(pin_opts.path_fd); + if (old_cwd[0]) + ASSERT_OK(chdir(old_cwd), "restore_cwd"); + + map_fd2 = bpf_obj_get(abs_path); + if (!ASSERT_GE(map_fd2, 0, "map_get")) + goto cleanup; + + /* update map through one FD */ + err = bpf_map_update_elem(map_fd, &zero, &src_value, 0); + ASSERT_OK(err, "map_update"); + + /* check values written/read through different FDs do match */ + dst_value = 0; + err = bpf_map_lookup_elem(map_fd2, &zero, &dst_value); + ASSERT_OK(err, "map_lookup"); + ASSERT_EQ(dst_value, src_value, "map_value_eq"); +cleanup: + if (map_fd2 >= 0) + ASSERT_OK(close(map_fd2), "close_map_fd2"); + unlink(abs_path); +} + +static void validate_get(int map_fd, const char *map_name, int src_value, + enum path_kind path_kind) +{ + LIBBPF_OPTS(bpf_obj_get_opts, get_opts); + char abs_path[PATH_MAX], old_cwd[PATH_MAX]; + const char *pin_path = NULL; + int zero = 0, dst_value, map_fd2, err; + + snprintf(abs_path, sizeof(abs_path), "/sys/fs/bpf/%s", map_name); + /* pin BPF map using specified path definition */ + err = bpf_obj_pin(map_fd, abs_path); + if (!ASSERT_OK(err, "pin_map")) + return; + + old_cwd[0] = '\0'; + + switch (path_kind) { + case PATH_STR_ABS: + /* absolute path */ + pin_path = abs_path; + break; + case PATH_STR_REL: + /* cwd + relative path */ + ASSERT_OK_PTR(getcwd(old_cwd, sizeof(old_cwd)), "getcwd"); + ASSERT_OK(chdir("/sys/fs/bpf"), "chdir"); + pin_path = map_name; + break; + case PATH_FD_REL: + /* dir fd + relative path */ + get_opts.file_flags = BPF_F_PATH_FD; + get_opts.path_fd = open("/sys/fs/bpf", O_PATH); + ASSERT_GE(get_opts.path_fd, 0, "path_fd"); + pin_path = map_name; + break; + } + + map_fd2 = bpf_obj_get_opts(pin_path, &get_opts); + if (!ASSERT_GE(map_fd2, 0, "map_get")) + goto cleanup; + + /* cleanup */ + if (get_opts.path_fd >= 0) + close(get_opts.path_fd); + if (old_cwd[0]) + ASSERT_OK(chdir(old_cwd), "restore_cwd"); + + /* update map through one FD */ + err = bpf_map_update_elem(map_fd, &zero, &src_value, 0); + ASSERT_OK(err, "map_update"); + + /* check values written/read through different FDs do match */ + dst_value = 0; + err = bpf_map_lookup_elem(map_fd2, &zero, &dst_value); + ASSERT_OK(err, "map_lookup"); + ASSERT_EQ(dst_value, src_value, "map_value_eq"); +cleanup: + if (map_fd2 >= 0) + ASSERT_OK(close(map_fd2), "close_map_fd2"); + unlink(abs_path); +} + +static void bpf_obj_pinning_mounted(enum path_kind path_kind) +{ + const char *map_name = "mounted_map"; + int map_fd; + + /* create BPF map to pin */ + map_fd = bpf_map_create(BPF_MAP_TYPE_ARRAY, map_name, 4, 4, 1, NULL); + if (!ASSERT_GE(map_fd, 0, "map_fd")) + return; + + validate_pin(map_fd, map_name, 100 + (int)path_kind, path_kind); + validate_get(map_fd, map_name, 200 + (int)path_kind, path_kind); + ASSERT_OK(close(map_fd), "close_map_fd"); +} + +void test_bpf_obj_pinning() +{ + if (test__start_subtest("detached")) + bpf_obj_pinning_detached(); + if (test__start_subtest("mounted-str-abs")) + bpf_obj_pinning_mounted(PATH_STR_ABS); + if (test__start_subtest("mounted-str-rel")) + bpf_obj_pinning_mounted(PATH_STR_REL); + if (test__start_subtest("mounted-fd-rel")) + bpf_obj_pinning_mounted(PATH_FD_REL); +} diff --git a/tools/testing/selftests/bpf/prog_tests/btf.c b/tools/testing/selftests/bpf/prog_tests/btf.c index 210d643fda6c..4e0cdb593318 100644 --- a/tools/testing/selftests/bpf/prog_tests/btf.c +++ b/tools/testing/selftests/bpf/prog_tests/btf.c @@ -3991,6 +3991,46 @@ static struct btf_raw_test raw_tests[] = { .err_str = "Invalid arg#1", }, { + .descr = "decl_tag test #18, decl_tag as the map key type", + .raw_types = { + BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4), /* [1] */ + BTF_STRUCT_ENC(0, 2, 8), /* [2] */ + BTF_MEMBER_ENC(NAME_TBD, 1, 0), + BTF_MEMBER_ENC(NAME_TBD, 1, 32), + BTF_DECL_TAG_ENC(NAME_TBD, 2, -1), /* [3] */ + BTF_END_RAW, + }, + BTF_STR_SEC("\0m1\0m2\0tag"), + .map_type = BPF_MAP_TYPE_HASH, + .map_name = "tag_type_check_btf", + .key_size = 8, + .value_size = 4, + .key_type_id = 3, + .value_type_id = 1, + .max_entries = 1, + .map_create_err = true, +}, +{ + .descr = "decl_tag test #19, decl_tag as the map value type", + .raw_types = { + BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4), /* [1] */ + BTF_STRUCT_ENC(0, 2, 8), /* [2] */ + BTF_MEMBER_ENC(NAME_TBD, 1, 0), + BTF_MEMBER_ENC(NAME_TBD, 1, 32), + BTF_DECL_TAG_ENC(NAME_TBD, 2, -1), /* [3] */ + BTF_END_RAW, + }, + BTF_STR_SEC("\0m1\0m2\0tag"), + .map_type = BPF_MAP_TYPE_HASH, + .map_name = "tag_type_check_btf", + .key_size = 4, + .value_size = 8, + .key_type_id = 1, + .value_type_id = 3, + .max_entries = 1, + .map_create_err = true, +}, +{ .descr = "type_tag test #1", .raw_types = { BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4), /* [1] */ diff --git a/tools/testing/selftests/bpf/prog_tests/cgroup_getset_retval.c b/tools/testing/selftests/bpf/prog_tests/cgroup_getset_retval.c index 4d2fa99273d8..2bb5773d6f99 100644 --- a/tools/testing/selftests/bpf/prog_tests/cgroup_getset_retval.c +++ b/tools/testing/selftests/bpf/prog_tests/cgroup_getset_retval.c @@ -25,6 +25,8 @@ static void test_setsockopt_set(int cgroup_fd, int sock_fd) if (!ASSERT_OK_PTR(obj, "skel-load")) return; + obj->bss->page_size = sysconf(_SC_PAGESIZE); + /* Attach setsockopt that sets EUNATCH, assert that * we actually get that error when we run setsockopt() */ @@ -59,6 +61,8 @@ static void test_setsockopt_set_and_get(int cgroup_fd, int sock_fd) if (!ASSERT_OK_PTR(obj, "skel-load")) return; + obj->bss->page_size = sysconf(_SC_PAGESIZE); + /* Attach setsockopt that sets EUNATCH, and one that gets the * previously set errno. Assert that we get the same errno back. */ @@ -100,6 +104,8 @@ static void test_setsockopt_default_zero(int cgroup_fd, int sock_fd) if (!ASSERT_OK_PTR(obj, "skel-load")) return; + obj->bss->page_size = sysconf(_SC_PAGESIZE); + /* Attach setsockopt that gets the previously set errno. * Assert that, without anything setting one, we get 0. */ @@ -134,6 +140,8 @@ static void test_setsockopt_default_zero_and_set(int cgroup_fd, int sock_fd) if (!ASSERT_OK_PTR(obj, "skel-load")) return; + obj->bss->page_size = sysconf(_SC_PAGESIZE); + /* Attach setsockopt that gets the previously set errno, and then * one that sets the errno to EUNATCH. Assert that the get does not * see EUNATCH set later, and does not prevent EUNATCH from being set. @@ -177,6 +185,8 @@ static void test_setsockopt_override(int cgroup_fd, int sock_fd) if (!ASSERT_OK_PTR(obj, "skel-load")) return; + obj->bss->page_size = sysconf(_SC_PAGESIZE); + /* Attach setsockopt that sets EUNATCH, then one that sets EISCONN, * and then one that gets the exported errno. Assert both the syscall * and the helper sees the last set errno. @@ -224,6 +234,8 @@ static void test_setsockopt_legacy_eperm(int cgroup_fd, int sock_fd) if (!ASSERT_OK_PTR(obj, "skel-load")) return; + obj->bss->page_size = sysconf(_SC_PAGESIZE); + /* Attach setsockopt that return a reject without setting errno * (legacy reject), and one that gets the errno. Assert that for * backward compatibility the syscall result in EPERM, and this @@ -268,6 +280,8 @@ static void test_setsockopt_legacy_no_override(int cgroup_fd, int sock_fd) if (!ASSERT_OK_PTR(obj, "skel-load")) return; + obj->bss->page_size = sysconf(_SC_PAGESIZE); + /* Attach setsockopt that sets EUNATCH, then one that return a reject * without setting errno, and then one that gets the exported errno. * Assert both the syscall and the helper's errno are unaffected by @@ -319,6 +333,8 @@ static void test_getsockopt_get(int cgroup_fd, int sock_fd) if (!ASSERT_OK_PTR(obj, "skel-load")) return; + obj->bss->page_size = sysconf(_SC_PAGESIZE); + /* Attach getsockopt that gets previously set errno. Assert that the * error from kernel is in both ctx_retval_value and retval_value. */ @@ -359,6 +375,8 @@ static void test_getsockopt_override(int cgroup_fd, int sock_fd) if (!ASSERT_OK_PTR(obj, "skel-load")) return; + obj->bss->page_size = sysconf(_SC_PAGESIZE); + /* Attach getsockopt that sets retval to -EISCONN. Assert that this * overrides the value from kernel. */ @@ -396,6 +414,8 @@ static void test_getsockopt_retval_sync(int cgroup_fd, int sock_fd) if (!ASSERT_OK_PTR(obj, "skel-load")) return; + obj->bss->page_size = sysconf(_SC_PAGESIZE); + /* Attach getsockopt that sets retval to -EISCONN, and one that clears * ctx retval. Assert that the clearing ctx retval is synced to helper * and clears any errors both from kernel and BPF.. diff --git a/tools/testing/selftests/bpf/prog_tests/check_mtu.c b/tools/testing/selftests/bpf/prog_tests/check_mtu.c index 5338d2ea0460..2a9a30650350 100644 --- a/tools/testing/selftests/bpf/prog_tests/check_mtu.c +++ b/tools/testing/selftests/bpf/prog_tests/check_mtu.c @@ -183,7 +183,7 @@ cleanup: void serial_test_check_mtu(void) { - __u32 mtu_lo; + int mtu_lo; if (test__start_subtest("bpf_check_mtu XDP-attach")) test_check_mtu_xdp_attach(); diff --git a/tools/testing/selftests/bpf/prog_tests/cpumask.c b/tools/testing/selftests/bpf/prog_tests/cpumask.c index cdf4acc18e4c..756ea8b590b6 100644 --- a/tools/testing/selftests/bpf/prog_tests/cpumask.c +++ b/tools/testing/selftests/bpf/prog_tests/cpumask.c @@ -10,6 +10,7 @@ static const char * const cpumask_success_testcases[] = { "test_set_clear_cpu", "test_setall_clear_cpu", "test_first_firstzero_cpu", + "test_firstand_nocpu", "test_test_and_set_clear", "test_and_or_xor", "test_intersects_subset", @@ -70,5 +71,6 @@ void test_cpumask(void) verify_success(cpumask_success_testcases[i]); } + RUN_TESTS(cpumask_success); RUN_TESTS(cpumask_failure); } diff --git a/tools/testing/selftests/bpf/prog_tests/dynptr.c b/tools/testing/selftests/bpf/prog_tests/dynptr.c index d176c34a7d2e..7cfac53c0d58 100644 --- a/tools/testing/selftests/bpf/prog_tests/dynptr.c +++ b/tools/testing/selftests/bpf/prog_tests/dynptr.c @@ -20,6 +20,14 @@ static struct { {"test_ringbuf", SETUP_SYSCALL_SLEEP}, {"test_skb_readonly", SETUP_SKB_PROG}, {"test_dynptr_skb_data", SETUP_SKB_PROG}, + {"test_adjust", SETUP_SYSCALL_SLEEP}, + {"test_adjust_err", SETUP_SYSCALL_SLEEP}, + {"test_zero_size_dynptr", SETUP_SYSCALL_SLEEP}, + {"test_dynptr_is_null", SETUP_SYSCALL_SLEEP}, + {"test_dynptr_is_rdonly", SETUP_SKB_PROG}, + {"test_dynptr_clone", SETUP_SKB_PROG}, + {"test_dynptr_skb_no_buff", SETUP_SKB_PROG}, + {"test_dynptr_skb_strcmp", SETUP_SKB_PROG}, }; static void verify_success(const char *prog_name, enum test_setup_type setup_type) diff --git a/tools/testing/selftests/bpf/prog_tests/fib_lookup.c b/tools/testing/selftests/bpf/prog_tests/fib_lookup.c index a1e712105811..2fd05649bad1 100644 --- a/tools/testing/selftests/bpf/prog_tests/fib_lookup.c +++ b/tools/testing/selftests/bpf/prog_tests/fib_lookup.c @@ -1,6 +1,7 @@ // SPDX-License-Identifier: GPL-2.0 /* Copyright (c) 2023 Meta Platforms, Inc. and affiliates. */ +#include <linux/rtnetlink.h> #include <sys/types.h> #include <net/if.h> @@ -15,14 +16,23 @@ #define IPV4_IFACE_ADDR "10.0.0.254" #define IPV4_NUD_FAILED_ADDR "10.0.0.1" #define IPV4_NUD_STALE_ADDR "10.0.0.2" +#define IPV4_TBID_ADDR "172.0.0.254" +#define IPV4_TBID_NET "172.0.0.0" +#define IPV4_TBID_DST "172.0.0.2" +#define IPV6_TBID_ADDR "fd00::FFFF" +#define IPV6_TBID_NET "fd00::" +#define IPV6_TBID_DST "fd00::2" #define DMAC "11:11:11:11:11:11" #define DMAC_INIT { 0x11, 0x11, 0x11, 0x11, 0x11, 0x11, } +#define DMAC2 "01:01:01:01:01:01" +#define DMAC_INIT2 { 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, } struct fib_lookup_test { const char *desc; const char *daddr; int expected_ret; int lookup_flags; + __u32 tbid; __u8 dmac[6]; }; @@ -43,6 +53,22 @@ static const struct fib_lookup_test tests[] = { { .desc = "IPv4 skip neigh", .daddr = IPV4_NUD_FAILED_ADDR, .expected_ret = BPF_FIB_LKUP_RET_SUCCESS, .lookup_flags = BPF_FIB_LOOKUP_SKIP_NEIGH, }, + { .desc = "IPv4 TBID lookup failure", + .daddr = IPV4_TBID_DST, .expected_ret = BPF_FIB_LKUP_RET_NOT_FWDED, + .lookup_flags = BPF_FIB_LOOKUP_DIRECT | BPF_FIB_LOOKUP_TBID, + .tbid = RT_TABLE_MAIN, }, + { .desc = "IPv4 TBID lookup success", + .daddr = IPV4_TBID_DST, .expected_ret = BPF_FIB_LKUP_RET_SUCCESS, + .lookup_flags = BPF_FIB_LOOKUP_DIRECT | BPF_FIB_LOOKUP_TBID, .tbid = 100, + .dmac = DMAC_INIT2, }, + { .desc = "IPv6 TBID lookup failure", + .daddr = IPV6_TBID_DST, .expected_ret = BPF_FIB_LKUP_RET_NOT_FWDED, + .lookup_flags = BPF_FIB_LOOKUP_DIRECT | BPF_FIB_LOOKUP_TBID, + .tbid = RT_TABLE_MAIN, }, + { .desc = "IPv6 TBID lookup success", + .daddr = IPV6_TBID_DST, .expected_ret = BPF_FIB_LKUP_RET_SUCCESS, + .lookup_flags = BPF_FIB_LOOKUP_DIRECT | BPF_FIB_LOOKUP_TBID, .tbid = 100, + .dmac = DMAC_INIT2, }, }; static int ifindex; @@ -53,6 +79,7 @@ static int setup_netns(void) SYS(fail, "ip link add veth1 type veth peer name veth2"); SYS(fail, "ip link set dev veth1 up"); + SYS(fail, "ip link set dev veth2 up"); err = write_sysctl("/proc/sys/net/ipv4/neigh/veth1/gc_stale_time", "900"); if (!ASSERT_OK(err, "write_sysctl(net.ipv4.neigh.veth1.gc_stale_time)")) @@ -70,6 +97,17 @@ static int setup_netns(void) SYS(fail, "ip neigh add %s dev veth1 nud failed", IPV4_NUD_FAILED_ADDR); SYS(fail, "ip neigh add %s dev veth1 lladdr %s nud stale", IPV4_NUD_STALE_ADDR, DMAC); + /* Setup for tbid lookup tests */ + SYS(fail, "ip addr add %s/24 dev veth2", IPV4_TBID_ADDR); + SYS(fail, "ip route del %s/24 dev veth2", IPV4_TBID_NET); + SYS(fail, "ip route add table 100 %s/24 dev veth2", IPV4_TBID_NET); + SYS(fail, "ip neigh add %s dev veth2 lladdr %s nud stale", IPV4_TBID_DST, DMAC2); + + SYS(fail, "ip addr add %s/64 dev veth2", IPV6_TBID_ADDR); + SYS(fail, "ip -6 route del %s/64 dev veth2", IPV6_TBID_NET); + SYS(fail, "ip -6 route add table 100 %s/64 dev veth2", IPV6_TBID_NET); + SYS(fail, "ip neigh add %s dev veth2 lladdr %s nud stale", IPV6_TBID_DST, DMAC2); + err = write_sysctl("/proc/sys/net/ipv4/conf/veth1/forwarding", "1"); if (!ASSERT_OK(err, "write_sysctl(net.ipv4.conf.veth1.forwarding)")) goto fail; @@ -83,7 +121,7 @@ fail: return -1; } -static int set_lookup_params(struct bpf_fib_lookup *params, const char *daddr) +static int set_lookup_params(struct bpf_fib_lookup *params, const struct fib_lookup_test *test) { int ret; @@ -91,8 +129,9 @@ static int set_lookup_params(struct bpf_fib_lookup *params, const char *daddr) params->l4_protocol = IPPROTO_TCP; params->ifindex = ifindex; + params->tbid = test->tbid; - if (inet_pton(AF_INET6, daddr, params->ipv6_dst) == 1) { + if (inet_pton(AF_INET6, test->daddr, params->ipv6_dst) == 1) { params->family = AF_INET6; ret = inet_pton(AF_INET6, IPV6_IFACE_ADDR, params->ipv6_src); if (!ASSERT_EQ(ret, 1, "inet_pton(IPV6_IFACE_ADDR)")) @@ -100,7 +139,7 @@ static int set_lookup_params(struct bpf_fib_lookup *params, const char *daddr) return 0; } - ret = inet_pton(AF_INET, daddr, ¶ms->ipv4_dst); + ret = inet_pton(AF_INET, test->daddr, ¶ms->ipv4_dst); if (!ASSERT_EQ(ret, 1, "convert IP[46] address")) return -1; params->family = AF_INET; @@ -154,13 +193,12 @@ void test_fib_lookup(void) fib_params = &skel->bss->fib_params; for (i = 0; i < ARRAY_SIZE(tests); i++) { - printf("Testing %s\n", tests[i].desc); + printf("Testing %s ", tests[i].desc); - if (set_lookup_params(fib_params, tests[i].daddr)) + if (set_lookup_params(fib_params, &tests[i])) continue; skel->bss->fib_lookup_ret = -1; - skel->bss->lookup_flags = BPF_FIB_LOOKUP_OUTPUT | - tests[i].lookup_flags; + skel->bss->lookup_flags = tests[i].lookup_flags; err = bpf_prog_test_run_opts(prog_fd, &run_opts); if (!ASSERT_OK(err, "bpf_prog_test_run_opts")) @@ -175,7 +213,14 @@ void test_fib_lookup(void) mac_str(expected, tests[i].dmac); mac_str(actual, fib_params->dmac); - printf("dmac expected %s actual %s\n", expected, actual); + printf("dmac expected %s actual %s ", expected, actual); + } + + // ensure tbid is zero'd out after fib lookup. + if (tests[i].lookup_flags & BPF_FIB_LOOKUP_DIRECT) { + if (!ASSERT_EQ(skel->bss->fib_params.tbid, 0, + "expected fib_params.tbid to be zero")) + goto fail; } } diff --git a/tools/testing/selftests/bpf/prog_tests/global_map_resize.c b/tools/testing/selftests/bpf/prog_tests/global_map_resize.c new file mode 100644 index 000000000000..fd41425d2e5c --- /dev/null +++ b/tools/testing/selftests/bpf/prog_tests/global_map_resize.c @@ -0,0 +1,227 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (c) 2023 Meta Platforms, Inc. and affiliates. */ +#include <errno.h> +#include <sys/syscall.h> +#include <unistd.h> +#include "test_global_map_resize.skel.h" +#include "test_progs.h" + +static void run_prog_bss_array_sum(void) +{ + (void)syscall(__NR_getpid); +} + +static void run_prog_data_array_sum(void) +{ + (void)syscall(__NR_getuid); +} + +static void global_map_resize_bss_subtest(void) +{ + int err; + struct test_global_map_resize *skel; + struct bpf_map *map; + const __u32 desired_sz = sizeof(skel->bss->sum) + sysconf(_SC_PAGE_SIZE) * 2; + size_t array_len, actual_sz; + + skel = test_global_map_resize__open(); + if (!ASSERT_OK_PTR(skel, "test_global_map_resize__open")) + goto teardown; + + /* set some initial value before resizing. + * it is expected this non-zero value will be preserved + * while resizing. + */ + skel->bss->array[0] = 1; + + /* resize map value and verify the new size */ + map = skel->maps.bss; + err = bpf_map__set_value_size(map, desired_sz); + if (!ASSERT_OK(err, "bpf_map__set_value_size")) + goto teardown; + if (!ASSERT_EQ(bpf_map__value_size(map), desired_sz, "resize")) + goto teardown; + + /* set the expected number of elements based on the resized array */ + array_len = (desired_sz - sizeof(skel->bss->sum)) / sizeof(skel->bss->array[0]); + if (!ASSERT_GT(array_len, 1, "array_len")) + goto teardown; + + skel->bss = bpf_map__initial_value(skel->maps.bss, &actual_sz); + if (!ASSERT_OK_PTR(skel->bss, "bpf_map__initial_value (ptr)")) + goto teardown; + if (!ASSERT_EQ(actual_sz, desired_sz, "bpf_map__initial_value (size)")) + goto teardown; + + /* fill the newly resized array with ones, + * skipping the first element which was previously set + */ + for (int i = 1; i < array_len; i++) + skel->bss->array[i] = 1; + + /* set global const values before loading */ + skel->rodata->pid = getpid(); + skel->rodata->bss_array_len = array_len; + skel->rodata->data_array_len = 1; + + err = test_global_map_resize__load(skel); + if (!ASSERT_OK(err, "test_global_map_resize__load")) + goto teardown; + err = test_global_map_resize__attach(skel); + if (!ASSERT_OK(err, "test_global_map_resize__attach")) + goto teardown; + + /* run the bpf program which will sum the contents of the array. + * since the array was filled with ones,verify the sum equals array_len + */ + run_prog_bss_array_sum(); + if (!ASSERT_EQ(skel->bss->sum, array_len, "sum")) + goto teardown; + +teardown: + test_global_map_resize__destroy(skel); +} + +static void global_map_resize_data_subtest(void) +{ + int err; + struct test_global_map_resize *skel; + struct bpf_map *map; + const __u32 desired_sz = sysconf(_SC_PAGE_SIZE) * 2; + size_t array_len, actual_sz; + + skel = test_global_map_resize__open(); + if (!ASSERT_OK_PTR(skel, "test_global_map_resize__open")) + goto teardown; + + /* set some initial value before resizing. + * it is expected this non-zero value will be preserved + * while resizing. + */ + skel->data_custom->my_array[0] = 1; + + /* resize map value and verify the new size */ + map = skel->maps.data_custom; + err = bpf_map__set_value_size(map, desired_sz); + if (!ASSERT_OK(err, "bpf_map__set_value_size")) + goto teardown; + if (!ASSERT_EQ(bpf_map__value_size(map), desired_sz, "resize")) + goto teardown; + + /* set the expected number of elements based on the resized array */ + array_len = (desired_sz - sizeof(skel->bss->sum)) / sizeof(skel->data_custom->my_array[0]); + if (!ASSERT_GT(array_len, 1, "array_len")) + goto teardown; + + skel->data_custom = bpf_map__initial_value(skel->maps.data_custom, &actual_sz); + if (!ASSERT_OK_PTR(skel->data_custom, "bpf_map__initial_value (ptr)")) + goto teardown; + if (!ASSERT_EQ(actual_sz, desired_sz, "bpf_map__initial_value (size)")) + goto teardown; + + /* fill the newly resized array with ones, + * skipping the first element which was previously set + */ + for (int i = 1; i < array_len; i++) + skel->data_custom->my_array[i] = 1; + + /* set global const values before loading */ + skel->rodata->pid = getpid(); + skel->rodata->bss_array_len = 1; + skel->rodata->data_array_len = array_len; + + err = test_global_map_resize__load(skel); + if (!ASSERT_OK(err, "test_global_map_resize__load")) + goto teardown; + err = test_global_map_resize__attach(skel); + if (!ASSERT_OK(err, "test_global_map_resize__attach")) + goto teardown; + + /* run the bpf program which will sum the contents of the array. + * since the array was filled with ones,verify the sum equals array_len + */ + run_prog_data_array_sum(); + if (!ASSERT_EQ(skel->bss->sum, array_len, "sum")) + goto teardown; + +teardown: + test_global_map_resize__destroy(skel); +} + +static void global_map_resize_invalid_subtest(void) +{ + int err; + struct test_global_map_resize *skel; + struct bpf_map *map; + __u32 element_sz, desired_sz; + + skel = test_global_map_resize__open(); + if (!ASSERT_OK_PTR(skel, "test_global_map_resize__open")) + return; + + /* attempt to resize a global datasec map to size + * which does NOT align with array + */ + map = skel->maps.data_custom; + if (!ASSERT_NEQ(bpf_map__btf_value_type_id(map), 0, ".data.custom initial btf")) + goto teardown; + /* set desired size a fraction of element size beyond an aligned size */ + element_sz = sizeof(skel->data_custom->my_array[0]); + desired_sz = element_sz + element_sz / 2; + /* confirm desired size does NOT align with array */ + if (!ASSERT_NEQ(desired_sz % element_sz, 0, "my_array alignment")) + goto teardown; + err = bpf_map__set_value_size(map, desired_sz); + /* confirm resize is OK but BTF info is cleared */ + if (!ASSERT_OK(err, ".data.custom bpf_map__set_value_size") || + !ASSERT_EQ(bpf_map__btf_key_type_id(map), 0, ".data.custom clear btf key") || + !ASSERT_EQ(bpf_map__btf_value_type_id(map), 0, ".data.custom clear btf val")) + goto teardown; + + /* attempt to resize a global datasec map whose only var is NOT an array */ + map = skel->maps.data_non_array; + if (!ASSERT_NEQ(bpf_map__btf_value_type_id(map), 0, ".data.non_array initial btf")) + goto teardown; + /* set desired size to arbitrary value */ + desired_sz = 1024; + err = bpf_map__set_value_size(map, desired_sz); + /* confirm resize is OK but BTF info is cleared */ + if (!ASSERT_OK(err, ".data.non_array bpf_map__set_value_size") || + !ASSERT_EQ(bpf_map__btf_key_type_id(map), 0, ".data.non_array clear btf key") || + !ASSERT_EQ(bpf_map__btf_value_type_id(map), 0, ".data.non_array clear btf val")) + goto teardown; + + /* attempt to resize a global datasec map + * whose last var is NOT an array + */ + map = skel->maps.data_array_not_last; + if (!ASSERT_NEQ(bpf_map__btf_value_type_id(map), 0, ".data.array_not_last initial btf")) + goto teardown; + /* set desired size to a multiple of element size */ + element_sz = sizeof(skel->data_array_not_last->my_array_first[0]); + desired_sz = element_sz * 8; + /* confirm desired size aligns with array */ + if (!ASSERT_EQ(desired_sz % element_sz, 0, "my_array_first alignment")) + goto teardown; + err = bpf_map__set_value_size(map, desired_sz); + /* confirm resize is OK but BTF info is cleared */ + if (!ASSERT_OK(err, ".data.array_not_last bpf_map__set_value_size") || + !ASSERT_EQ(bpf_map__btf_key_type_id(map), 0, ".data.array_not_last clear btf key") || + !ASSERT_EQ(bpf_map__btf_value_type_id(map), 0, ".data.array_not_last clear btf val")) + goto teardown; + +teardown: + test_global_map_resize__destroy(skel); +} + +void test_global_map_resize(void) +{ + if (test__start_subtest("global_map_resize_bss")) + global_map_resize_bss_subtest(); + + if (test__start_subtest("global_map_resize_data")) + global_map_resize_data_subtest(); + + if (test__start_subtest("global_map_resize_invalid")) + global_map_resize_invalid_subtest(); +} diff --git a/tools/testing/selftests/bpf/prog_tests/module_attach.c b/tools/testing/selftests/bpf/prog_tests/module_attach.c index 7fc01ff490db..f53d658ed080 100644 --- a/tools/testing/selftests/bpf/prog_tests/module_attach.c +++ b/tools/testing/selftests/bpf/prog_tests/module_attach.c @@ -4,6 +4,7 @@ #include <test_progs.h> #include <stdbool.h> #include "test_module_attach.skel.h" +#include "testing_helpers.h" static int duration; @@ -32,11 +33,6 @@ static int trigger_module_test_writable(int *val) return 0; } -static int delete_module(const char *name, int flags) -{ - return syscall(__NR_delete_module, name, flags); -} - void test_module_attach(void) { const int READ_SZ = 456; @@ -93,21 +89,21 @@ void test_module_attach(void) if (!ASSERT_OK_PTR(link, "attach_fentry")) goto cleanup; - ASSERT_ERR(delete_module("bpf_testmod", 0), "delete_module"); + ASSERT_ERR(unload_bpf_testmod(false), "unload_bpf_testmod"); bpf_link__destroy(link); link = bpf_program__attach(skel->progs.handle_fexit); if (!ASSERT_OK_PTR(link, "attach_fexit")) goto cleanup; - ASSERT_ERR(delete_module("bpf_testmod", 0), "delete_module"); + ASSERT_ERR(unload_bpf_testmod(false), "unload_bpf_testmod"); bpf_link__destroy(link); link = bpf_program__attach(skel->progs.kprobe_multi); if (!ASSERT_OK_PTR(link, "attach_kprobe_multi")) goto cleanup; - ASSERT_ERR(delete_module("bpf_testmod", 0), "delete_module"); + ASSERT_ERR(unload_bpf_testmod(false), "unload_bpf_testmod"); bpf_link__destroy(link); cleanup: diff --git a/tools/testing/selftests/bpf/prog_tests/netcnt.c b/tools/testing/selftests/bpf/prog_tests/netcnt.c index d3915c58d0e1..c3333edd029f 100644 --- a/tools/testing/selftests/bpf/prog_tests/netcnt.c +++ b/tools/testing/selftests/bpf/prog_tests/netcnt.c @@ -67,12 +67,12 @@ void serial_test_netcnt(void) } /* No packets should be lost */ - ASSERT_EQ(packets, 10000, "packets"); + ASSERT_GE(packets, 10000, "packets"); /* Let's check that bytes counter matches the number of packets * multiplied by the size of ipv6 ICMP packet. */ - ASSERT_EQ(bytes, packets * 104, "bytes"); + ASSERT_GE(bytes, packets * 104, "bytes"); err: if (cg_fd != -1) diff --git a/tools/testing/selftests/bpf/prog_tests/sock_destroy.c b/tools/testing/selftests/bpf/prog_tests/sock_destroy.c new file mode 100644 index 000000000000..b0583309a94e --- /dev/null +++ b/tools/testing/selftests/bpf/prog_tests/sock_destroy.c @@ -0,0 +1,221 @@ +// SPDX-License-Identifier: GPL-2.0 +#include <test_progs.h> +#include <bpf/bpf_endian.h> + +#include "sock_destroy_prog.skel.h" +#include "sock_destroy_prog_fail.skel.h" +#include "network_helpers.h" + +#define TEST_NS "sock_destroy_netns" + +static void start_iter_sockets(struct bpf_program *prog) +{ + struct bpf_link *link; + char buf[50] = {}; + int iter_fd, len; + + link = bpf_program__attach_iter(prog, NULL); + if (!ASSERT_OK_PTR(link, "attach_iter")) + return; + + iter_fd = bpf_iter_create(bpf_link__fd(link)); + if (!ASSERT_GE(iter_fd, 0, "create_iter")) + goto free_link; + + while ((len = read(iter_fd, buf, sizeof(buf))) > 0) + ; + ASSERT_GE(len, 0, "read"); + + close(iter_fd); + +free_link: + bpf_link__destroy(link); +} + +static void test_tcp_client(struct sock_destroy_prog *skel) +{ + int serv = -1, clien = -1, accept_serv = -1, n; + + serv = start_server(AF_INET6, SOCK_STREAM, NULL, 0, 0); + if (!ASSERT_GE(serv, 0, "start_server")) + goto cleanup; + + clien = connect_to_fd(serv, 0); + if (!ASSERT_GE(clien, 0, "connect_to_fd")) + goto cleanup; + + accept_serv = accept(serv, NULL, NULL); + if (!ASSERT_GE(accept_serv, 0, "serv accept")) + goto cleanup; + + n = send(clien, "t", 1, 0); + if (!ASSERT_EQ(n, 1, "client send")) + goto cleanup; + + /* Run iterator program that destroys connected client sockets. */ + start_iter_sockets(skel->progs.iter_tcp6_client); + + n = send(clien, "t", 1, 0); + if (!ASSERT_LT(n, 0, "client_send on destroyed socket")) + goto cleanup; + ASSERT_EQ(errno, ECONNABORTED, "error code on destroyed socket"); + +cleanup: + if (clien != -1) + close(clien); + if (accept_serv != -1) + close(accept_serv); + if (serv != -1) + close(serv); +} + +static void test_tcp_server(struct sock_destroy_prog *skel) +{ + int serv = -1, clien = -1, accept_serv = -1, n, serv_port; + + serv = start_server(AF_INET6, SOCK_STREAM, NULL, 0, 0); + if (!ASSERT_GE(serv, 0, "start_server")) + goto cleanup; + serv_port = get_socket_local_port(serv); + if (!ASSERT_GE(serv_port, 0, "get_sock_local_port")) + goto cleanup; + skel->bss->serv_port = (__be16) serv_port; + + clien = connect_to_fd(serv, 0); + if (!ASSERT_GE(clien, 0, "connect_to_fd")) + goto cleanup; + + accept_serv = accept(serv, NULL, NULL); + if (!ASSERT_GE(accept_serv, 0, "serv accept")) + goto cleanup; + + n = send(clien, "t", 1, 0); + if (!ASSERT_EQ(n, 1, "client send")) + goto cleanup; + + /* Run iterator program that destroys server sockets. */ + start_iter_sockets(skel->progs.iter_tcp6_server); + + n = send(clien, "t", 1, 0); + if (!ASSERT_LT(n, 0, "client_send on destroyed socket")) + goto cleanup; + ASSERT_EQ(errno, ECONNRESET, "error code on destroyed socket"); + +cleanup: + if (clien != -1) + close(clien); + if (accept_serv != -1) + close(accept_serv); + if (serv != -1) + close(serv); +} + +static void test_udp_client(struct sock_destroy_prog *skel) +{ + int serv = -1, clien = -1, n = 0; + + serv = start_server(AF_INET6, SOCK_DGRAM, NULL, 0, 0); + if (!ASSERT_GE(serv, 0, "start_server")) + goto cleanup; + + clien = connect_to_fd(serv, 0); + if (!ASSERT_GE(clien, 0, "connect_to_fd")) + goto cleanup; + + n = send(clien, "t", 1, 0); + if (!ASSERT_EQ(n, 1, "client send")) + goto cleanup; + + /* Run iterator program that destroys sockets. */ + start_iter_sockets(skel->progs.iter_udp6_client); + + n = send(clien, "t", 1, 0); + if (!ASSERT_LT(n, 0, "client_send on destroyed socket")) + goto cleanup; + /* UDP sockets have an overriding error code after they are disconnected, + * so we don't check for ECONNABORTED error code. + */ + +cleanup: + if (clien != -1) + close(clien); + if (serv != -1) + close(serv); +} + +static void test_udp_server(struct sock_destroy_prog *skel) +{ + int *listen_fds = NULL, n, i, serv_port; + unsigned int num_listens = 5; + char buf[1]; + + /* Start reuseport servers. */ + listen_fds = start_reuseport_server(AF_INET6, SOCK_DGRAM, + "::1", 0, 0, num_listens); + if (!ASSERT_OK_PTR(listen_fds, "start_reuseport_server")) + goto cleanup; + serv_port = get_socket_local_port(listen_fds[0]); + if (!ASSERT_GE(serv_port, 0, "get_sock_local_port")) + goto cleanup; + skel->bss->serv_port = (__be16) serv_port; + + /* Run iterator program that destroys server sockets. */ + start_iter_sockets(skel->progs.iter_udp6_server); + + for (i = 0; i < num_listens; ++i) { + n = read(listen_fds[i], buf, sizeof(buf)); + if (!ASSERT_EQ(n, -1, "read") || + !ASSERT_EQ(errno, ECONNABORTED, "error code on destroyed socket")) + break; + } + ASSERT_EQ(i, num_listens, "server socket"); + +cleanup: + free_fds(listen_fds, num_listens); +} + +void test_sock_destroy(void) +{ + struct sock_destroy_prog *skel; + struct nstoken *nstoken = NULL; + int cgroup_fd; + + skel = sock_destroy_prog__open_and_load(); + if (!ASSERT_OK_PTR(skel, "skel_open")) + return; + + cgroup_fd = test__join_cgroup("/sock_destroy"); + if (!ASSERT_GE(cgroup_fd, 0, "join_cgroup")) + goto cleanup; + + skel->links.sock_connect = bpf_program__attach_cgroup( + skel->progs.sock_connect, cgroup_fd); + if (!ASSERT_OK_PTR(skel->links.sock_connect, "prog_attach")) + goto cleanup; + + SYS(cleanup, "ip netns add %s", TEST_NS); + SYS(cleanup, "ip -net %s link set dev lo up", TEST_NS); + + nstoken = open_netns(TEST_NS); + if (!ASSERT_OK_PTR(nstoken, "open_netns")) + goto cleanup; + + if (test__start_subtest("tcp_client")) + test_tcp_client(skel); + if (test__start_subtest("tcp_server")) + test_tcp_server(skel); + if (test__start_subtest("udp_client")) + test_udp_client(skel); + if (test__start_subtest("udp_server")) + test_udp_server(skel); + + RUN_TESTS(sock_destroy_prog_fail); + +cleanup: + if (nstoken) + close_netns(nstoken); + SYS_NOFAIL("ip netns del " TEST_NS " &> /dev/null"); + if (cgroup_fd >= 0) + close(cgroup_fd); + sock_destroy_prog__destroy(skel); +} diff --git a/tools/testing/selftests/bpf/prog_tests/sockopt.c b/tools/testing/selftests/bpf/prog_tests/sockopt.c index aa4debf62fc6..9e6a5e3ed4de 100644 --- a/tools/testing/selftests/bpf/prog_tests/sockopt.c +++ b/tools/testing/selftests/bpf/prog_tests/sockopt.c @@ -5,10 +5,15 @@ static char bpf_log_buf[4096]; static bool verbose; +#ifndef PAGE_SIZE +#define PAGE_SIZE 4096 +#endif + enum sockopt_test_error { OK = 0, DENY_LOAD, DENY_ATTACH, + EOPNOTSUPP_GETSOCKOPT, EPERM_GETSOCKOPT, EFAULT_GETSOCKOPT, EPERM_SETSOCKOPT, @@ -273,10 +278,31 @@ static struct sockopt_test { .error = EFAULT_GETSOCKOPT, }, { - .descr = "getsockopt: deny arbitrary ctx->retval", + .descr = "getsockopt: ignore >PAGE_SIZE optlen", .insns = { - /* ctx->retval = 123 */ - BPF_MOV64_IMM(BPF_REG_0, 123), + /* write 0xFF to the first optval byte */ + + /* r6 = ctx->optval */ + BPF_LDX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, + offsetof(struct bpf_sockopt, optval)), + /* r2 = ctx->optval */ + BPF_MOV64_REG(BPF_REG_2, BPF_REG_6), + /* r6 = ctx->optval + 1 */ + BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, 1), + + /* r7 = ctx->optval_end */ + BPF_LDX_MEM(BPF_DW, BPF_REG_7, BPF_REG_1, + offsetof(struct bpf_sockopt, optval_end)), + + /* if (ctx->optval + 1 <= ctx->optval_end) { */ + BPF_JMP_REG(BPF_JGT, BPF_REG_6, BPF_REG_7, 1), + /* ctx->optval[0] = 0xF0 */ + BPF_ST_MEM(BPF_B, BPF_REG_2, 0, 0xFF), + /* } */ + + /* retval changes are ignored */ + /* ctx->retval = 5 */ + BPF_MOV64_IMM(BPF_REG_0, 5), BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, offsetof(struct bpf_sockopt, retval)), @@ -287,9 +313,11 @@ static struct sockopt_test { .attach_type = BPF_CGROUP_GETSOCKOPT, .expected_attach_type = BPF_CGROUP_GETSOCKOPT, - .get_optlen = 64, - - .error = EFAULT_GETSOCKOPT, + .get_level = 1234, + .get_optname = 5678, + .get_optval = {}, /* the changes are ignored */ + .get_optlen = PAGE_SIZE + 1, + .error = EOPNOTSUPP_GETSOCKOPT, }, { .descr = "getsockopt: support smaller ctx->optlen", @@ -649,6 +677,45 @@ static struct sockopt_test { .error = EFAULT_SETSOCKOPT, }, { + .descr = "setsockopt: ignore >PAGE_SIZE optlen", + .insns = { + /* write 0xFF to the first optval byte */ + + /* r6 = ctx->optval */ + BPF_LDX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, + offsetof(struct bpf_sockopt, optval)), + /* r2 = ctx->optval */ + BPF_MOV64_REG(BPF_REG_2, BPF_REG_6), + /* r6 = ctx->optval + 1 */ + BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, 1), + + /* r7 = ctx->optval_end */ + BPF_LDX_MEM(BPF_DW, BPF_REG_7, BPF_REG_1, + offsetof(struct bpf_sockopt, optval_end)), + + /* if (ctx->optval + 1 <= ctx->optval_end) { */ + BPF_JMP_REG(BPF_JGT, BPF_REG_6, BPF_REG_7, 1), + /* ctx->optval[0] = 0xF0 */ + BPF_ST_MEM(BPF_B, BPF_REG_2, 0, 0xF0), + /* } */ + + BPF_MOV64_IMM(BPF_REG_0, 1), + BPF_EXIT_INSN(), + }, + .attach_type = BPF_CGROUP_SETSOCKOPT, + .expected_attach_type = BPF_CGROUP_SETSOCKOPT, + + .set_level = SOL_IP, + .set_optname = IP_TOS, + .set_optval = {}, + .set_optlen = PAGE_SIZE + 1, + + .get_level = SOL_IP, + .get_optname = IP_TOS, + .get_optval = {}, /* the changes are ignored */ + .get_optlen = 4, + }, + { .descr = "setsockopt: allow changing ctx->optlen within bounds", .insns = { /* r6 = ctx->optval */ @@ -906,6 +973,13 @@ static int run_test(int cgroup_fd, struct sockopt_test *test) } if (test->set_optlen) { + if (test->set_optlen >= PAGE_SIZE) { + int num_pages = test->set_optlen / PAGE_SIZE; + int remainder = test->set_optlen % PAGE_SIZE; + + test->set_optlen = num_pages * sysconf(_SC_PAGESIZE) + remainder; + } + err = setsockopt(sock_fd, test->set_level, test->set_optname, test->set_optval, test->set_optlen); if (err) { @@ -921,7 +995,15 @@ static int run_test(int cgroup_fd, struct sockopt_test *test) } if (test->get_optlen) { + if (test->get_optlen >= PAGE_SIZE) { + int num_pages = test->get_optlen / PAGE_SIZE; + int remainder = test->get_optlen % PAGE_SIZE; + + test->get_optlen = num_pages * sysconf(_SC_PAGESIZE) + remainder; + } + optval = malloc(test->get_optlen); + memset(optval, 0, test->get_optlen); socklen_t optlen = test->get_optlen; socklen_t expected_get_optlen = test->get_optlen_ret ?: test->get_optlen; @@ -929,6 +1011,8 @@ static int run_test(int cgroup_fd, struct sockopt_test *test) err = getsockopt(sock_fd, test->get_level, test->get_optname, optval, &optlen); if (err) { + if (errno == EOPNOTSUPP && test->error == EOPNOTSUPP_GETSOCKOPT) + goto free_optval; if (errno == EPERM && test->error == EPERM_GETSOCKOPT) goto free_optval; if (errno == EFAULT && test->error == EFAULT_GETSOCKOPT) @@ -976,7 +1060,9 @@ void test_sockopt(void) return; for (i = 0; i < ARRAY_SIZE(tests); i++) { - test__start_subtest(tests[i].descr); + if (!test__start_subtest(tests[i].descr)) + continue; + ASSERT_OK(run_test(cgroup_fd, &tests[i]), tests[i].descr); } diff --git a/tools/testing/selftests/bpf/prog_tests/sockopt_inherit.c b/tools/testing/selftests/bpf/prog_tests/sockopt_inherit.c index 60c17a8e2789..917f486db826 100644 --- a/tools/testing/selftests/bpf/prog_tests/sockopt_inherit.c +++ b/tools/testing/selftests/bpf/prog_tests/sockopt_inherit.c @@ -2,6 +2,8 @@ #include <test_progs.h> #include "cgroup_helpers.h" +#include "sockopt_inherit.skel.h" + #define SOL_CUSTOM 0xdeadbeef #define CUSTOM_INHERIT1 0 #define CUSTOM_INHERIT2 1 @@ -132,58 +134,30 @@ static int start_server(void) return fd; } -static int prog_attach(struct bpf_object *obj, int cgroup_fd, const char *title, - const char *prog_name) -{ - enum bpf_attach_type attach_type; - enum bpf_prog_type prog_type; - struct bpf_program *prog; - int err; - - err = libbpf_prog_type_by_name(title, &prog_type, &attach_type); - if (err) { - log_err("Failed to deduct types for %s BPF program", prog_name); - return -1; - } - - prog = bpf_object__find_program_by_name(obj, prog_name); - if (!prog) { - log_err("Failed to find %s BPF program", prog_name); - return -1; - } - - err = bpf_prog_attach(bpf_program__fd(prog), cgroup_fd, - attach_type, 0); - if (err) { - log_err("Failed to attach %s BPF program", prog_name); - return -1; - } - - return 0; -} - static void run_test(int cgroup_fd) { + struct bpf_link *link_getsockopt = NULL; + struct bpf_link *link_setsockopt = NULL; int server_fd = -1, client_fd; - struct bpf_object *obj; + struct sockopt_inherit *obj; void *server_err; pthread_t tid; int err; - obj = bpf_object__open_file("sockopt_inherit.bpf.o", NULL); - if (!ASSERT_OK_PTR(obj, "obj_open")) + obj = sockopt_inherit__open_and_load(); + if (!ASSERT_OK_PTR(obj, "skel-load")) return; - err = bpf_object__load(obj); - if (!ASSERT_OK(err, "obj_load")) - goto close_bpf_object; + obj->bss->page_size = sysconf(_SC_PAGESIZE); - err = prog_attach(obj, cgroup_fd, "cgroup/getsockopt", "_getsockopt"); - if (!ASSERT_OK(err, "prog_attach _getsockopt")) + link_getsockopt = bpf_program__attach_cgroup(obj->progs._getsockopt, + cgroup_fd); + if (!ASSERT_OK_PTR(link_getsockopt, "cg-attach-getsockopt")) goto close_bpf_object; - err = prog_attach(obj, cgroup_fd, "cgroup/setsockopt", "_setsockopt"); - if (!ASSERT_OK(err, "prog_attach _setsockopt")) + link_setsockopt = bpf_program__attach_cgroup(obj->progs._setsockopt, + cgroup_fd); + if (!ASSERT_OK_PTR(link_setsockopt, "cg-attach-setsockopt")) goto close_bpf_object; server_fd = start_server(); @@ -217,7 +191,10 @@ static void run_test(int cgroup_fd) close_server_fd: close(server_fd); close_bpf_object: - bpf_object__close(obj); + bpf_link__destroy(link_getsockopt); + bpf_link__destroy(link_setsockopt); + + sockopt_inherit__destroy(obj); } void test_sockopt_inherit(void) diff --git a/tools/testing/selftests/bpf/prog_tests/sockopt_multi.c b/tools/testing/selftests/bpf/prog_tests/sockopt_multi.c index 7f5659349011..759bbb6f8c5f 100644 --- a/tools/testing/selftests/bpf/prog_tests/sockopt_multi.c +++ b/tools/testing/selftests/bpf/prog_tests/sockopt_multi.c @@ -2,61 +2,13 @@ #include <test_progs.h> #include "cgroup_helpers.h" -static int prog_attach(struct bpf_object *obj, int cgroup_fd, const char *title, const char *name) -{ - enum bpf_attach_type attach_type; - enum bpf_prog_type prog_type; - struct bpf_program *prog; - int err; - - err = libbpf_prog_type_by_name(title, &prog_type, &attach_type); - if (err) { - log_err("Failed to deduct types for %s BPF program", title); - return -1; - } - - prog = bpf_object__find_program_by_name(obj, name); - if (!prog) { - log_err("Failed to find %s BPF program", name); - return -1; - } - - err = bpf_prog_attach(bpf_program__fd(prog), cgroup_fd, - attach_type, BPF_F_ALLOW_MULTI); - if (err) { - log_err("Failed to attach %s BPF program", name); - return -1; - } - - return 0; -} +#include "sockopt_multi.skel.h" -static int prog_detach(struct bpf_object *obj, int cgroup_fd, const char *title, const char *name) -{ - enum bpf_attach_type attach_type; - enum bpf_prog_type prog_type; - struct bpf_program *prog; - int err; - - err = libbpf_prog_type_by_name(title, &prog_type, &attach_type); - if (err) - return -1; - - prog = bpf_object__find_program_by_name(obj, name); - if (!prog) - return -1; - - err = bpf_prog_detach2(bpf_program__fd(prog), cgroup_fd, - attach_type); - if (err) - return -1; - - return 0; -} - -static int run_getsockopt_test(struct bpf_object *obj, int cg_parent, +static int run_getsockopt_test(struct sockopt_multi *obj, int cg_parent, int cg_child, int sock_fd) { + struct bpf_link *link_parent = NULL; + struct bpf_link *link_child = NULL; socklen_t optlen; __u8 buf; int err; @@ -89,8 +41,9 @@ static int run_getsockopt_test(struct bpf_object *obj, int cg_parent, * - child: 0x80 -> 0x90 */ - err = prog_attach(obj, cg_child, "cgroup/getsockopt", "_getsockopt_child"); - if (err) + link_child = bpf_program__attach_cgroup(obj->progs._getsockopt_child, + cg_child); + if (!ASSERT_OK_PTR(link_child, "cg-attach-getsockopt_child")) goto detach; buf = 0x00; @@ -113,8 +66,9 @@ static int run_getsockopt_test(struct bpf_object *obj, int cg_parent, * - parent: 0x90 -> 0xA0 */ - err = prog_attach(obj, cg_parent, "cgroup/getsockopt", "_getsockopt_parent"); - if (err) + link_parent = bpf_program__attach_cgroup(obj->progs._getsockopt_parent, + cg_parent); + if (!ASSERT_OK_PTR(link_parent, "cg-attach-getsockopt_parent")) goto detach; buf = 0x00; @@ -157,11 +111,8 @@ static int run_getsockopt_test(struct bpf_object *obj, int cg_parent, * - parent: unexpected 0x40, EPERM */ - err = prog_detach(obj, cg_child, "cgroup/getsockopt", "_getsockopt_child"); - if (err) { - log_err("Failed to detach child program"); - goto detach; - } + bpf_link__destroy(link_child); + link_child = NULL; buf = 0x00; optlen = 1; @@ -198,15 +149,17 @@ static int run_getsockopt_test(struct bpf_object *obj, int cg_parent, } detach: - prog_detach(obj, cg_child, "cgroup/getsockopt", "_getsockopt_child"); - prog_detach(obj, cg_parent, "cgroup/getsockopt", "_getsockopt_parent"); + bpf_link__destroy(link_child); + bpf_link__destroy(link_parent); return err; } -static int run_setsockopt_test(struct bpf_object *obj, int cg_parent, +static int run_setsockopt_test(struct sockopt_multi *obj, int cg_parent, int cg_child, int sock_fd) { + struct bpf_link *link_parent = NULL; + struct bpf_link *link_child = NULL; socklen_t optlen; __u8 buf; int err; @@ -236,8 +189,9 @@ static int run_setsockopt_test(struct bpf_object *obj, int cg_parent, /* Attach child program and make sure it adds 0x10. */ - err = prog_attach(obj, cg_child, "cgroup/setsockopt", "_setsockopt"); - if (err) + link_child = bpf_program__attach_cgroup(obj->progs._setsockopt, + cg_child); + if (!ASSERT_OK_PTR(link_child, "cg-attach-setsockopt_child")) goto detach; buf = 0x80; @@ -263,8 +217,9 @@ static int run_setsockopt_test(struct bpf_object *obj, int cg_parent, /* Attach parent program and make sure it adds another 0x10. */ - err = prog_attach(obj, cg_parent, "cgroup/setsockopt", "_setsockopt"); - if (err) + link_parent = bpf_program__attach_cgroup(obj->progs._setsockopt, + cg_parent); + if (!ASSERT_OK_PTR(link_parent, "cg-attach-setsockopt_parent")) goto detach; buf = 0x80; @@ -289,8 +244,8 @@ static int run_setsockopt_test(struct bpf_object *obj, int cg_parent, } detach: - prog_detach(obj, cg_child, "cgroup/setsockopt", "_setsockopt"); - prog_detach(obj, cg_parent, "cgroup/setsockopt", "_setsockopt"); + bpf_link__destroy(link_child); + bpf_link__destroy(link_parent); return err; } @@ -298,9 +253,8 @@ detach: void test_sockopt_multi(void) { int cg_parent = -1, cg_child = -1; - struct bpf_object *obj = NULL; + struct sockopt_multi *obj = NULL; int sock_fd = -1; - int err = -1; cg_parent = test__join_cgroup("/parent"); if (!ASSERT_GE(cg_parent, 0, "join_cgroup /parent")) @@ -310,13 +264,11 @@ void test_sockopt_multi(void) if (!ASSERT_GE(cg_child, 0, "join_cgroup /parent/child")) goto out; - obj = bpf_object__open_file("sockopt_multi.bpf.o", NULL); - if (!ASSERT_OK_PTR(obj, "obj_load")) + obj = sockopt_multi__open_and_load(); + if (!ASSERT_OK_PTR(obj, "skel-load")) goto out; - err = bpf_object__load(obj); - if (!ASSERT_OK(err, "obj_load")) - goto out; + obj->bss->page_size = sysconf(_SC_PAGESIZE); sock_fd = socket(AF_INET, SOCK_STREAM, 0); if (!ASSERT_GE(sock_fd, 0, "socket")) @@ -327,7 +279,7 @@ void test_sockopt_multi(void) out: close(sock_fd); - bpf_object__close(obj); + sockopt_multi__destroy(obj); close(cg_child); close(cg_parent); } diff --git a/tools/testing/selftests/bpf/prog_tests/sockopt_qos_to_cc.c b/tools/testing/selftests/bpf/prog_tests/sockopt_qos_to_cc.c index 6b53b3cb8dad..6b2d300e9fd4 100644 --- a/tools/testing/selftests/bpf/prog_tests/sockopt_qos_to_cc.c +++ b/tools/testing/selftests/bpf/prog_tests/sockopt_qos_to_cc.c @@ -42,6 +42,8 @@ void test_sockopt_qos_to_cc(void) if (!ASSERT_OK_PTR(skel, "skel")) goto done; + skel->bss->page_size = sysconf(_SC_PAGESIZE); + sock_fd = socket(AF_INET6, SOCK_STREAM, 0); if (!ASSERT_GE(sock_fd, 0, "v6 socket open")) goto done; diff --git a/tools/testing/selftests/bpf/prog_tests/task_under_cgroup.c b/tools/testing/selftests/bpf/prog_tests/task_under_cgroup.c new file mode 100644 index 000000000000..4224727fb364 --- /dev/null +++ b/tools/testing/selftests/bpf/prog_tests/task_under_cgroup.c @@ -0,0 +1,53 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (c) 2023 Bytedance */ + +#include <sys/syscall.h> +#include <test_progs.h> +#include <cgroup_helpers.h> +#include "test_task_under_cgroup.skel.h" + +#define FOO "/foo" + +void test_task_under_cgroup(void) +{ + struct test_task_under_cgroup *skel; + int ret, foo; + pid_t pid; + + foo = test__join_cgroup(FOO); + if (!ASSERT_OK(foo < 0, "cgroup_join_foo")) + return; + + skel = test_task_under_cgroup__open(); + if (!ASSERT_OK_PTR(skel, "test_task_under_cgroup__open")) + goto cleanup; + + skel->rodata->local_pid = getpid(); + skel->bss->remote_pid = getpid(); + skel->rodata->cgid = get_cgroup_id(FOO); + + ret = test_task_under_cgroup__load(skel); + if (!ASSERT_OK(ret, "test_task_under_cgroup__load")) + goto cleanup; + + ret = test_task_under_cgroup__attach(skel); + if (!ASSERT_OK(ret, "test_task_under_cgroup__attach")) + goto cleanup; + + pid = fork(); + if (pid == 0) + exit(0); + + ret = (pid == -1); + if (ASSERT_OK(ret, "fork process")) + wait(NULL); + + test_task_under_cgroup__detach(skel); + + ASSERT_NEQ(skel->bss->remote_pid, skel->rodata->local_pid, + "test task_under_cgroup"); + +cleanup: + test_task_under_cgroup__destroy(skel); + close(foo); +} diff --git a/tools/testing/selftests/bpf/prog_tests/unpriv_bpf_disabled.c b/tools/testing/selftests/bpf/prog_tests/unpriv_bpf_disabled.c index 8383a99f610f..0adf8d9475cb 100644 --- a/tools/testing/selftests/bpf/prog_tests/unpriv_bpf_disabled.c +++ b/tools/testing/selftests/bpf/prog_tests/unpriv_bpf_disabled.c @@ -171,7 +171,11 @@ static void test_unpriv_bpf_disabled_negative(struct test_unpriv_bpf_disabled *s prog_insns, prog_insn_cnt, &load_opts), -EPERM, "prog_load_fails"); - for (i = BPF_MAP_TYPE_HASH; i <= BPF_MAP_TYPE_BLOOM_FILTER; i++) + /* some map types require particular correct parameters which could be + * sanity-checked before enforcing -EPERM, so only validate that + * the simple ARRAY and HASH maps are failing with -EPERM + */ + for (i = BPF_MAP_TYPE_HASH; i <= BPF_MAP_TYPE_ARRAY; i++) ASSERT_EQ(bpf_map_create(i, NULL, sizeof(int), sizeof(int), 1, NULL), -EPERM, "map_create_fails"); diff --git a/tools/testing/selftests/bpf/prog_tests/verifier.c b/tools/testing/selftests/bpf/prog_tests/verifier.c index 2497716ee379..070a13833c3f 100644 --- a/tools/testing/selftests/bpf/prog_tests/verifier.c +++ b/tools/testing/selftests/bpf/prog_tests/verifier.c @@ -50,11 +50,13 @@ #include "verifier_regalloc.skel.h" #include "verifier_ringbuf.skel.h" #include "verifier_runtime_jit.skel.h" +#include "verifier_scalar_ids.skel.h" #include "verifier_search_pruning.skel.h" #include "verifier_sock.skel.h" #include "verifier_spill_fill.skel.h" #include "verifier_spin_lock.skel.h" #include "verifier_stack_ptr.skel.h" +#include "verifier_subprog_precision.skel.h" #include "verifier_subreg.skel.h" #include "verifier_uninit.skel.h" #include "verifier_unpriv.skel.h" @@ -149,11 +151,13 @@ void test_verifier_ref_tracking(void) { RUN(verifier_ref_tracking); } void test_verifier_regalloc(void) { RUN(verifier_regalloc); } void test_verifier_ringbuf(void) { RUN(verifier_ringbuf); } void test_verifier_runtime_jit(void) { RUN(verifier_runtime_jit); } +void test_verifier_scalar_ids(void) { RUN(verifier_scalar_ids); } void test_verifier_search_pruning(void) { RUN(verifier_search_pruning); } void test_verifier_sock(void) { RUN(verifier_sock); } void test_verifier_spill_fill(void) { RUN(verifier_spill_fill); } void test_verifier_spin_lock(void) { RUN(verifier_spin_lock); } void test_verifier_stack_ptr(void) { RUN(verifier_stack_ptr); } +void test_verifier_subprog_precision(void) { RUN(verifier_subprog_precision); } void test_verifier_subreg(void) { RUN(verifier_subreg); } void test_verifier_uninit(void) { RUN(verifier_uninit); } void test_verifier_unpriv(void) { RUN(verifier_unpriv); } diff --git a/tools/testing/selftests/bpf/prog_tests/vrf_socket_lookup.c b/tools/testing/selftests/bpf/prog_tests/vrf_socket_lookup.c new file mode 100644 index 000000000000..2a5e207edad6 --- /dev/null +++ b/tools/testing/selftests/bpf/prog_tests/vrf_socket_lookup.c @@ -0,0 +1,312 @@ +// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause + +/* + * Topology: + * --------- + * NS0 namespace | NS1 namespace + * | + * +--------------+ | +--------------+ + * | veth01 |----------| veth10 | + * | 172.16.1.100 | | | 172.16.1.200 | + * | bpf | | +--------------+ + * +--------------+ | + * server(UDP/TCP) | + * +-------------------+ | + * | vrf1 | | + * | +--------------+ | | +--------------+ + * | | veth02 |----------| veth20 | + * | | 172.16.2.100 | | | | 172.16.2.200 | + * | | bpf | | | +--------------+ + * | +--------------+ | | + * | server(UDP/TCP) | | + * +-------------------+ | + * + * Test flow + * ----------- + * The tests verifies that socket lookup via TC is VRF aware: + * 1) Creates two veth pairs between NS0 and NS1: + * a) veth01 <-> veth10 outside the VRF + * b) veth02 <-> veth20 in the VRF + * 2) Attaches to veth01 and veth02 a program that calls: + * a) bpf_skc_lookup_tcp() with TCP and tcp_skc is true + * b) bpf_sk_lookup_tcp() with TCP and tcp_skc is false + * c) bpf_sk_lookup_udp() with UDP + * The program stores the lookup result in bss->lookup_status. + * 3) Creates a socket TCP/UDP server in/outside the VRF. + * 4) The test expects lookup_status to be: + * a) 0 from device in VRF to server outside VRF + * b) 0 from device outside VRF to server in VRF + * c) 1 from device in VRF to server in VRF + * d) 1 from device outside VRF to server outside VRF + */ + +#include <net/if.h> + +#include "test_progs.h" +#include "network_helpers.h" +#include "vrf_socket_lookup.skel.h" + +#define NS0 "vrf_socket_lookup_0" +#define NS1 "vrf_socket_lookup_1" + +#define IP4_ADDR_VETH01 "172.16.1.100" +#define IP4_ADDR_VETH10 "172.16.1.200" +#define IP4_ADDR_VETH02 "172.16.2.100" +#define IP4_ADDR_VETH20 "172.16.2.200" + +#define NON_VRF_PORT 5000 +#define IN_VRF_PORT 5001 + +#define TIMEOUT_MS 3000 + +static int make_socket(int sotype, const char *ip, int port, + struct sockaddr_storage *addr) +{ + int err, fd; + + err = make_sockaddr(AF_INET, ip, port, addr, NULL); + if (!ASSERT_OK(err, "make_address")) + return -1; + + fd = socket(AF_INET, sotype, 0); + if (!ASSERT_GE(fd, 0, "socket")) + return -1; + + if (!ASSERT_OK(settimeo(fd, TIMEOUT_MS), "settimeo")) + goto fail; + + return fd; +fail: + close(fd); + return -1; +} + +static int make_server(int sotype, const char *ip, int port, const char *ifname) +{ + int err, fd = -1; + + fd = start_server(AF_INET, sotype, ip, port, TIMEOUT_MS); + if (!ASSERT_GE(fd, 0, "start_server")) + return -1; + + if (ifname) { + err = setsockopt(fd, SOL_SOCKET, SO_BINDTODEVICE, + ifname, strlen(ifname) + 1); + if (!ASSERT_OK(err, "setsockopt(SO_BINDTODEVICE)")) + goto fail; + } + + return fd; +fail: + close(fd); + return -1; +} + +static int attach_progs(char *ifname, int tc_prog_fd, int xdp_prog_fd) +{ + LIBBPF_OPTS(bpf_tc_hook, hook, .attach_point = BPF_TC_INGRESS); + LIBBPF_OPTS(bpf_tc_opts, opts, .handle = 1, .priority = 1, + .prog_fd = tc_prog_fd); + int ret, ifindex; + + ifindex = if_nametoindex(ifname); + if (!ASSERT_NEQ(ifindex, 0, "if_nametoindex")) + return -1; + hook.ifindex = ifindex; + + ret = bpf_tc_hook_create(&hook); + if (!ASSERT_OK(ret, "bpf_tc_hook_create")) + return ret; + + ret = bpf_tc_attach(&hook, &opts); + if (!ASSERT_OK(ret, "bpf_tc_attach")) { + bpf_tc_hook_destroy(&hook); + return ret; + } + ret = bpf_xdp_attach(ifindex, xdp_prog_fd, 0, NULL); + if (!ASSERT_OK(ret, "bpf_xdp_attach")) { + bpf_tc_hook_destroy(&hook); + return ret; + } + + return 0; +} + +static void cleanup(void) +{ + SYS_NOFAIL("test -f /var/run/netns/" NS0 " && ip netns delete " + NS0); + SYS_NOFAIL("test -f /var/run/netns/" NS1 " && ip netns delete " + NS1); +} + +static int setup(struct vrf_socket_lookup *skel) +{ + int tc_prog_fd, xdp_prog_fd, ret = 0; + struct nstoken *nstoken = NULL; + + SYS(fail, "ip netns add " NS0); + SYS(fail, "ip netns add " NS1); + + /* NS0 <-> NS1 [veth01 <-> veth10] */ + SYS(fail, "ip link add veth01 netns " NS0 " type veth peer name veth10" + " netns " NS1); + SYS(fail, "ip -net " NS0 " addr add " IP4_ADDR_VETH01 "/24 dev veth01"); + SYS(fail, "ip -net " NS0 " link set dev veth01 up"); + SYS(fail, "ip -net " NS1 " addr add " IP4_ADDR_VETH10 "/24 dev veth10"); + SYS(fail, "ip -net " NS1 " link set dev veth10 up"); + + /* NS0 <-> NS1 [veth02 <-> veth20] */ + SYS(fail, "ip link add veth02 netns " NS0 " type veth peer name veth20" + " netns " NS1); + SYS(fail, "ip -net " NS0 " addr add " IP4_ADDR_VETH02 "/24 dev veth02"); + SYS(fail, "ip -net " NS0 " link set dev veth02 up"); + SYS(fail, "ip -net " NS1 " addr add " IP4_ADDR_VETH20 "/24 dev veth20"); + SYS(fail, "ip -net " NS1 " link set dev veth20 up"); + + /* veth02 -> vrf1 */ + SYS(fail, "ip -net " NS0 " link add vrf1 type vrf table 11"); + SYS(fail, "ip -net " NS0 " route add vrf vrf1 unreachable default" + " metric 4278198272"); + SYS(fail, "ip -net " NS0 " link set vrf1 alias vrf"); + SYS(fail, "ip -net " NS0 " link set vrf1 up"); + SYS(fail, "ip -net " NS0 " link set veth02 master vrf1"); + + /* Attach TC and XDP progs to veth devices in NS0 */ + nstoken = open_netns(NS0); + if (!ASSERT_OK_PTR(nstoken, "setns " NS0)) + goto fail; + tc_prog_fd = bpf_program__fd(skel->progs.tc_socket_lookup); + if (!ASSERT_GE(tc_prog_fd, 0, "bpf_program__tc_fd")) + goto fail; + xdp_prog_fd = bpf_program__fd(skel->progs.xdp_socket_lookup); + if (!ASSERT_GE(xdp_prog_fd, 0, "bpf_program__xdp_fd")) + goto fail; + + if (attach_progs("veth01", tc_prog_fd, xdp_prog_fd)) + goto fail; + + if (attach_progs("veth02", tc_prog_fd, xdp_prog_fd)) + goto fail; + + goto close; +fail: + ret = -1; +close: + if (nstoken) + close_netns(nstoken); + return ret; +} + +static int test_lookup(struct vrf_socket_lookup *skel, int sotype, + const char *ip, int port, bool test_xdp, bool tcp_skc, + int lookup_status_exp) +{ + static const char msg[] = "Hello Server"; + struct sockaddr_storage addr = {}; + int fd, ret = 0; + + fd = make_socket(sotype, ip, port, &addr); + if (fd < 0) + return -1; + + skel->bss->test_xdp = test_xdp; + skel->bss->tcp_skc = tcp_skc; + skel->bss->lookup_status = -1; + + if (sotype == SOCK_STREAM) + connect(fd, (void *)&addr, sizeof(struct sockaddr_in)); + else + sendto(fd, msg, sizeof(msg), 0, (void *)&addr, + sizeof(struct sockaddr_in)); + + if (!ASSERT_EQ(skel->bss->lookup_status, lookup_status_exp, + "lookup_status")) + goto fail; + + goto close; + +fail: + ret = -1; +close: + close(fd); + return ret; +} + +static void _test_vrf_socket_lookup(struct vrf_socket_lookup *skel, int sotype, + bool test_xdp, bool tcp_skc) +{ + int in_vrf_server = -1, non_vrf_server = -1; + struct nstoken *nstoken = NULL; + + nstoken = open_netns(NS0); + if (!ASSERT_OK_PTR(nstoken, "setns " NS0)) + goto done; + + /* Open sockets in and outside VRF */ + non_vrf_server = make_server(sotype, "0.0.0.0", NON_VRF_PORT, NULL); + if (!ASSERT_GE(non_vrf_server, 0, "make_server__outside_vrf_fd")) + goto done; + + in_vrf_server = make_server(sotype, "0.0.0.0", IN_VRF_PORT, "veth02"); + if (!ASSERT_GE(in_vrf_server, 0, "make_server__in_vrf_fd")) + goto done; + + /* Perform test from NS1 */ + close_netns(nstoken); + nstoken = open_netns(NS1); + if (!ASSERT_OK_PTR(nstoken, "setns " NS1)) + goto done; + + if (!ASSERT_OK(test_lookup(skel, sotype, IP4_ADDR_VETH02, NON_VRF_PORT, + test_xdp, tcp_skc, 0), "in_to_out")) + goto done; + if (!ASSERT_OK(test_lookup(skel, sotype, IP4_ADDR_VETH02, IN_VRF_PORT, + test_xdp, tcp_skc, 1), "in_to_in")) + goto done; + if (!ASSERT_OK(test_lookup(skel, sotype, IP4_ADDR_VETH01, NON_VRF_PORT, + test_xdp, tcp_skc, 1), "out_to_out")) + goto done; + if (!ASSERT_OK(test_lookup(skel, sotype, IP4_ADDR_VETH01, IN_VRF_PORT, + test_xdp, tcp_skc, 0), "out_to_in")) + goto done; + +done: + if (non_vrf_server >= 0) + close(non_vrf_server); + if (in_vrf_server >= 0) + close(in_vrf_server); + if (nstoken) + close_netns(nstoken); +} + +void test_vrf_socket_lookup(void) +{ + struct vrf_socket_lookup *skel; + + cleanup(); + + skel = vrf_socket_lookup__open_and_load(); + if (!ASSERT_OK_PTR(skel, "vrf_socket_lookup__open_and_load")) + return; + + if (!ASSERT_OK(setup(skel), "setup")) + goto done; + + if (test__start_subtest("tc_socket_lookup_tcp")) + _test_vrf_socket_lookup(skel, SOCK_STREAM, false, false); + if (test__start_subtest("tc_socket_lookup_tcp_skc")) + _test_vrf_socket_lookup(skel, SOCK_STREAM, false, false); + if (test__start_subtest("tc_socket_lookup_udp")) + _test_vrf_socket_lookup(skel, SOCK_STREAM, false, false); + if (test__start_subtest("xdp_socket_lookup_tcp")) + _test_vrf_socket_lookup(skel, SOCK_STREAM, true, false); + if (test__start_subtest("xdp_socket_lookup_tcp_skc")) + _test_vrf_socket_lookup(skel, SOCK_STREAM, true, false); + if (test__start_subtest("xdp_socket_lookup_udp")) + _test_vrf_socket_lookup(skel, SOCK_STREAM, true, false); + +done: + vrf_socket_lookup__destroy(skel); + cleanup(); +} diff --git a/tools/testing/selftests/bpf/prog_tests/xdp_bonding.c b/tools/testing/selftests/bpf/prog_tests/xdp_bonding.c index d19f79048ff6..c3b45745cbcc 100644 --- a/tools/testing/selftests/bpf/prog_tests/xdp_bonding.c +++ b/tools/testing/selftests/bpf/prog_tests/xdp_bonding.c @@ -18,6 +18,7 @@ #include <linux/if_bonding.h> #include <linux/limits.h> #include <linux/udp.h> +#include <uapi/linux/netdev.h> #include "xdp_dummy.skel.h" #include "xdp_redirect_multi_kern.skel.h" @@ -492,6 +493,123 @@ out: system("ip link del bond_nest2"); } +static void test_xdp_bonding_features(struct skeletons *skeletons) +{ + LIBBPF_OPTS(bpf_xdp_query_opts, query_opts); + int bond_idx, veth1_idx, err; + struct bpf_link *link = NULL; + + if (!ASSERT_OK(system("ip link add bond type bond"), "add bond")) + goto out; + + bond_idx = if_nametoindex("bond"); + if (!ASSERT_GE(bond_idx, 0, "if_nametoindex bond")) + goto out; + + /* query default xdp-feature for bond device */ + err = bpf_xdp_query(bond_idx, XDP_FLAGS_DRV_MODE, &query_opts); + if (!ASSERT_OK(err, "bond bpf_xdp_query")) + goto out; + + if (!ASSERT_EQ(query_opts.feature_flags, NETDEV_XDP_ACT_MASK, + "bond query_opts.feature_flags")) + goto out; + + if (!ASSERT_OK(system("ip link add veth0 type veth peer name veth1"), + "add veth{0,1} pair")) + goto out; + + if (!ASSERT_OK(system("ip link add veth2 type veth peer name veth3"), + "add veth{2,3} pair")) + goto out; + + if (!ASSERT_OK(system("ip link set veth0 master bond"), + "add veth0 to master bond")) + goto out; + + /* xdp-feature for bond device should be obtained from the single slave + * device (veth0) + */ + err = bpf_xdp_query(bond_idx, XDP_FLAGS_DRV_MODE, &query_opts); + if (!ASSERT_OK(err, "bond bpf_xdp_query")) + goto out; + + if (!ASSERT_EQ(query_opts.feature_flags, + NETDEV_XDP_ACT_BASIC | NETDEV_XDP_ACT_REDIRECT | + NETDEV_XDP_ACT_RX_SG, + "bond query_opts.feature_flags")) + goto out; + + veth1_idx = if_nametoindex("veth1"); + if (!ASSERT_GE(veth1_idx, 0, "if_nametoindex veth1")) + goto out; + + link = bpf_program__attach_xdp(skeletons->xdp_dummy->progs.xdp_dummy_prog, + veth1_idx); + if (!ASSERT_OK_PTR(link, "attach program to veth1")) + goto out; + + /* xdp-feature for veth0 are changed */ + err = bpf_xdp_query(bond_idx, XDP_FLAGS_DRV_MODE, &query_opts); + if (!ASSERT_OK(err, "bond bpf_xdp_query")) + goto out; + + if (!ASSERT_EQ(query_opts.feature_flags, + NETDEV_XDP_ACT_BASIC | NETDEV_XDP_ACT_REDIRECT | + NETDEV_XDP_ACT_RX_SG | NETDEV_XDP_ACT_NDO_XMIT | + NETDEV_XDP_ACT_NDO_XMIT_SG, + "bond query_opts.feature_flags")) + goto out; + + if (!ASSERT_OK(system("ip link set veth2 master bond"), + "add veth2 to master bond")) + goto out; + + err = bpf_xdp_query(bond_idx, XDP_FLAGS_DRV_MODE, &query_opts); + if (!ASSERT_OK(err, "bond bpf_xdp_query")) + goto out; + + /* xdp-feature for bond device should be set to the most restrict + * value obtained from attached slave devices (veth0 and veth2) + */ + if (!ASSERT_EQ(query_opts.feature_flags, + NETDEV_XDP_ACT_BASIC | NETDEV_XDP_ACT_REDIRECT | + NETDEV_XDP_ACT_RX_SG, + "bond query_opts.feature_flags")) + goto out; + + if (!ASSERT_OK(system("ip link set veth2 nomaster"), + "del veth2 to master bond")) + goto out; + + err = bpf_xdp_query(bond_idx, XDP_FLAGS_DRV_MODE, &query_opts); + if (!ASSERT_OK(err, "bond bpf_xdp_query")) + goto out; + + if (!ASSERT_EQ(query_opts.feature_flags, + NETDEV_XDP_ACT_BASIC | NETDEV_XDP_ACT_REDIRECT | + NETDEV_XDP_ACT_RX_SG | NETDEV_XDP_ACT_NDO_XMIT | + NETDEV_XDP_ACT_NDO_XMIT_SG, + "bond query_opts.feature_flags")) + goto out; + + if (!ASSERT_OK(system("ip link set veth0 nomaster"), + "del veth0 to master bond")) + goto out; + + err = bpf_xdp_query(bond_idx, XDP_FLAGS_DRV_MODE, &query_opts); + if (!ASSERT_OK(err, "bond bpf_xdp_query")) + goto out; + + ASSERT_EQ(query_opts.feature_flags, NETDEV_XDP_ACT_MASK, + "bond query_opts.feature_flags"); +out: + bpf_link__destroy(link); + system("ip link del veth0"); + system("ip link del veth2"); + system("ip link del bond"); +} + static int libbpf_debug_print(enum libbpf_print_level level, const char *format, va_list args) { @@ -546,6 +664,9 @@ void serial_test_xdp_bonding(void) if (test__start_subtest("xdp_bonding_nested")) test_xdp_bonding_nested(&skeletons); + if (test__start_subtest("xdp_bonding_features")) + test_xdp_bonding_features(&skeletons); + for (i = 0; i < ARRAY_SIZE(bond_test_cases); i++) { struct bond_test_case *test_case = &bond_test_cases[i]; diff --git a/tools/testing/selftests/bpf/progs/bpf_misc.h b/tools/testing/selftests/bpf/progs/bpf_misc.h index d3c1217ba79a..38a57a2e70db 100644 --- a/tools/testing/selftests/bpf/progs/bpf_misc.h +++ b/tools/testing/selftests/bpf/progs/bpf_misc.h @@ -86,6 +86,10 @@ #define POINTER_VALUE 0xcafe4all #define TEST_DATA_LEN 64 +#ifndef __used +#define __used __attribute__((used)) +#endif + #if defined(__TARGET_ARCH_x86) #define SYSCALL_WRAPPER 1 #define SYS_PREFIX "__x64_" diff --git a/tools/testing/selftests/bpf/progs/cb_refs.c b/tools/testing/selftests/bpf/progs/cb_refs.c index 50f95ec61165..76d661b20e87 100644 --- a/tools/testing/selftests/bpf/progs/cb_refs.c +++ b/tools/testing/selftests/bpf/progs/cb_refs.c @@ -2,6 +2,7 @@ #include <vmlinux.h> #include <bpf/bpf_tracing.h> #include <bpf/bpf_helpers.h> +#include "../bpf_testmod/bpf_testmod_kfunc.h" struct map_value { struct prog_test_ref_kfunc __kptr *ptr; @@ -14,9 +15,6 @@ struct { __uint(max_entries, 16); } array_map SEC(".maps"); -extern struct prog_test_ref_kfunc *bpf_kfunc_call_test_acquire(unsigned long *sp) __ksym; -extern void bpf_kfunc_call_test_release(struct prog_test_ref_kfunc *p) __ksym; - static __noinline int cb1(void *map, void *key, void *value, void *ctx) { void *p = *(void **)ctx; diff --git a/tools/testing/selftests/bpf/progs/cgroup_getset_retval_getsockopt.c b/tools/testing/selftests/bpf/progs/cgroup_getset_retval_getsockopt.c index b2a409e6382a..932b8ecd4ae3 100644 --- a/tools/testing/selftests/bpf/progs/cgroup_getset_retval_getsockopt.c +++ b/tools/testing/selftests/bpf/progs/cgroup_getset_retval_getsockopt.c @@ -12,6 +12,7 @@ __u32 invocations = 0; __u32 assertion_error = 0; __u32 retval_value = 0; __u32 ctx_retval_value = 0; +__u32 page_size = 0; SEC("cgroup/getsockopt") int get_retval(struct bpf_sockopt *ctx) @@ -20,6 +21,10 @@ int get_retval(struct bpf_sockopt *ctx) ctx_retval_value = ctx->retval; __sync_fetch_and_add(&invocations, 1); + /* optval larger than PAGE_SIZE use kernel's buffer. */ + if (ctx->optlen > page_size) + ctx->optlen = 0; + return 1; } @@ -31,6 +36,10 @@ int set_eisconn(struct bpf_sockopt *ctx) if (bpf_set_retval(-EISCONN)) assertion_error = 1; + /* optval larger than PAGE_SIZE use kernel's buffer. */ + if (ctx->optlen > page_size) + ctx->optlen = 0; + return 1; } @@ -41,5 +50,9 @@ int clear_retval(struct bpf_sockopt *ctx) ctx->retval = 0; + /* optval larger than PAGE_SIZE use kernel's buffer. */ + if (ctx->optlen > page_size) + ctx->optlen = 0; + return 1; } diff --git a/tools/testing/selftests/bpf/progs/cgroup_getset_retval_setsockopt.c b/tools/testing/selftests/bpf/progs/cgroup_getset_retval_setsockopt.c index d6e5903e06ba..b7fa8804e19d 100644 --- a/tools/testing/selftests/bpf/progs/cgroup_getset_retval_setsockopt.c +++ b/tools/testing/selftests/bpf/progs/cgroup_getset_retval_setsockopt.c @@ -11,6 +11,7 @@ __u32 invocations = 0; __u32 assertion_error = 0; __u32 retval_value = 0; +__u32 page_size = 0; SEC("cgroup/setsockopt") int get_retval(struct bpf_sockopt *ctx) @@ -18,6 +19,10 @@ int get_retval(struct bpf_sockopt *ctx) retval_value = bpf_get_retval(); __sync_fetch_and_add(&invocations, 1); + /* optval larger than PAGE_SIZE use kernel's buffer. */ + if (ctx->optlen > page_size) + ctx->optlen = 0; + return 1; } @@ -29,6 +34,10 @@ int set_eunatch(struct bpf_sockopt *ctx) if (bpf_set_retval(-EUNATCH)) assertion_error = 1; + /* optval larger than PAGE_SIZE use kernel's buffer. */ + if (ctx->optlen > page_size) + ctx->optlen = 0; + return 0; } @@ -40,6 +49,10 @@ int set_eisconn(struct bpf_sockopt *ctx) if (bpf_set_retval(-EISCONN)) assertion_error = 1; + /* optval larger than PAGE_SIZE use kernel's buffer. */ + if (ctx->optlen > page_size) + ctx->optlen = 0; + return 0; } @@ -48,5 +61,9 @@ int legacy_eperm(struct bpf_sockopt *ctx) { __sync_fetch_and_add(&invocations, 1); + /* optval larger than PAGE_SIZE use kernel's buffer. */ + if (ctx->optlen > page_size) + ctx->optlen = 0; + return 0; } diff --git a/tools/testing/selftests/bpf/progs/cpumask_common.h b/tools/testing/selftests/bpf/progs/cpumask_common.h index 0c5b785a93e4..b15c588ace15 100644 --- a/tools/testing/selftests/bpf/progs/cpumask_common.h +++ b/tools/testing/selftests/bpf/progs/cpumask_common.h @@ -28,6 +28,8 @@ void bpf_cpumask_release(struct bpf_cpumask *cpumask) __ksym; struct bpf_cpumask *bpf_cpumask_acquire(struct bpf_cpumask *cpumask) __ksym; u32 bpf_cpumask_first(const struct cpumask *cpumask) __ksym; u32 bpf_cpumask_first_zero(const struct cpumask *cpumask) __ksym; +u32 bpf_cpumask_first_and(const struct cpumask *src1, + const struct cpumask *src2) __ksym; void bpf_cpumask_set_cpu(u32 cpu, struct bpf_cpumask *cpumask) __ksym; void bpf_cpumask_clear_cpu(u32 cpu, struct bpf_cpumask *cpumask) __ksym; bool bpf_cpumask_test_cpu(u32 cpu, const struct cpumask *cpumask) __ksym; @@ -50,8 +52,8 @@ bool bpf_cpumask_subset(const struct cpumask *src1, const struct cpumask *src2) bool bpf_cpumask_empty(const struct cpumask *cpumask) __ksym; bool bpf_cpumask_full(const struct cpumask *cpumask) __ksym; void bpf_cpumask_copy(struct bpf_cpumask *dst, const struct cpumask *src) __ksym; -u32 bpf_cpumask_any(const struct cpumask *src) __ksym; -u32 bpf_cpumask_any_and(const struct cpumask *src1, const struct cpumask *src2) __ksym; +u32 bpf_cpumask_any_distribute(const struct cpumask *src) __ksym; +u32 bpf_cpumask_any_and_distribute(const struct cpumask *src1, const struct cpumask *src2) __ksym; void bpf_rcu_read_lock(void) __ksym; void bpf_rcu_read_unlock(void) __ksym; diff --git a/tools/testing/selftests/bpf/progs/cpumask_success.c b/tools/testing/selftests/bpf/progs/cpumask_success.c index 2fcdd7f68ac7..674a63424dee 100644 --- a/tools/testing/selftests/bpf/progs/cpumask_success.c +++ b/tools/testing/selftests/bpf/progs/cpumask_success.c @@ -5,6 +5,7 @@ #include <bpf/bpf_tracing.h> #include <bpf/bpf_helpers.h> +#include "bpf_misc.h" #include "cpumask_common.h" char _license[] SEC("license") = "GPL"; @@ -175,6 +176,38 @@ release_exit: } SEC("tp_btf/task_newtask") +int BPF_PROG(test_firstand_nocpu, struct task_struct *task, u64 clone_flags) +{ + struct bpf_cpumask *mask1, *mask2; + u32 first; + + if (!is_test_task()) + return 0; + + mask1 = create_cpumask(); + if (!mask1) + return 0; + + mask2 = create_cpumask(); + if (!mask2) + goto release_exit; + + bpf_cpumask_set_cpu(0, mask1); + bpf_cpumask_set_cpu(1, mask2); + + first = bpf_cpumask_first_and(cast(mask1), cast(mask2)); + if (first <= 1) + err = 3; + +release_exit: + if (mask1) + bpf_cpumask_release(mask1); + if (mask2) + bpf_cpumask_release(mask2); + return 0; +} + +SEC("tp_btf/task_newtask") int BPF_PROG(test_test_and_set_clear, struct task_struct *task, u64 clone_flags) { struct bpf_cpumask *cpumask; @@ -311,13 +344,13 @@ int BPF_PROG(test_copy_any_anyand, struct task_struct *task, u64 clone_flags) bpf_cpumask_set_cpu(1, mask2); bpf_cpumask_or(dst1, cast(mask1), cast(mask2)); - cpu = bpf_cpumask_any(cast(mask1)); + cpu = bpf_cpumask_any_distribute(cast(mask1)); if (cpu != 0) { err = 6; goto release_exit; } - cpu = bpf_cpumask_any(cast(dst2)); + cpu = bpf_cpumask_any_distribute(cast(dst2)); if (cpu < nr_cpus) { err = 7; goto release_exit; @@ -329,13 +362,13 @@ int BPF_PROG(test_copy_any_anyand, struct task_struct *task, u64 clone_flags) goto release_exit; } - cpu = bpf_cpumask_any(cast(dst2)); + cpu = bpf_cpumask_any_distribute(cast(dst2)); if (cpu > 1) { err = 9; goto release_exit; } - cpu = bpf_cpumask_any_and(cast(mask1), cast(mask2)); + cpu = bpf_cpumask_any_and_distribute(cast(mask1), cast(mask2)); if (cpu < nr_cpus) { err = 10; goto release_exit; @@ -426,3 +459,26 @@ int BPF_PROG(test_global_mask_rcu, struct task_struct *task, u64 clone_flags) return 0; } + +SEC("tp_btf/task_newtask") +__success +int BPF_PROG(test_refcount_null_tracking, struct task_struct *task, u64 clone_flags) +{ + struct bpf_cpumask *mask1, *mask2; + + mask1 = bpf_cpumask_create(); + mask2 = bpf_cpumask_create(); + + if (!mask1 || !mask2) + goto free_masks_return; + + bpf_cpumask_test_cpu(0, (const struct cpumask *)mask1); + bpf_cpumask_test_cpu(0, (const struct cpumask *)mask2); + +free_masks_return: + if (mask1) + bpf_cpumask_release(mask1); + if (mask2) + bpf_cpumask_release(mask2); + return 0; +} diff --git a/tools/testing/selftests/bpf/progs/dynptr_fail.c b/tools/testing/selftests/bpf/progs/dynptr_fail.c index 759eb5c245cd..7ce7e827d5f0 100644 --- a/tools/testing/selftests/bpf/progs/dynptr_fail.c +++ b/tools/testing/selftests/bpf/progs/dynptr_fail.c @@ -3,6 +3,7 @@ #include <errno.h> #include <string.h> +#include <stdbool.h> #include <linux/bpf.h> #include <bpf/bpf_helpers.h> #include <linux/if_ether.h> @@ -1378,3 +1379,310 @@ int invalid_slice_rdwr_rdonly(struct __sk_buff *skb) return 0; } + +/* bpf_dynptr_adjust can only be called on initialized dynptrs */ +SEC("?raw_tp") +__failure __msg("Expected an initialized dynptr as arg #1") +int dynptr_adjust_invalid(void *ctx) +{ + struct bpf_dynptr ptr; + + /* this should fail */ + bpf_dynptr_adjust(&ptr, 1, 2); + + return 0; +} + +/* bpf_dynptr_is_null can only be called on initialized dynptrs */ +SEC("?raw_tp") +__failure __msg("Expected an initialized dynptr as arg #1") +int dynptr_is_null_invalid(void *ctx) +{ + struct bpf_dynptr ptr; + + /* this should fail */ + bpf_dynptr_is_null(&ptr); + + return 0; +} + +/* bpf_dynptr_is_rdonly can only be called on initialized dynptrs */ +SEC("?raw_tp") +__failure __msg("Expected an initialized dynptr as arg #1") +int dynptr_is_rdonly_invalid(void *ctx) +{ + struct bpf_dynptr ptr; + + /* this should fail */ + bpf_dynptr_is_rdonly(&ptr); + + return 0; +} + +/* bpf_dynptr_size can only be called on initialized dynptrs */ +SEC("?raw_tp") +__failure __msg("Expected an initialized dynptr as arg #1") +int dynptr_size_invalid(void *ctx) +{ + struct bpf_dynptr ptr; + + /* this should fail */ + bpf_dynptr_size(&ptr); + + return 0; +} + +/* Only initialized dynptrs can be cloned */ +SEC("?raw_tp") +__failure __msg("Expected an initialized dynptr as arg #1") +int clone_invalid1(void *ctx) +{ + struct bpf_dynptr ptr1; + struct bpf_dynptr ptr2; + + /* this should fail */ + bpf_dynptr_clone(&ptr1, &ptr2); + + return 0; +} + +/* Can't overwrite an existing dynptr when cloning */ +SEC("?xdp") +__failure __msg("cannot overwrite referenced dynptr") +int clone_invalid2(struct xdp_md *xdp) +{ + struct bpf_dynptr ptr1; + struct bpf_dynptr clone; + + bpf_dynptr_from_xdp(xdp, 0, &ptr1); + + bpf_ringbuf_reserve_dynptr(&ringbuf, 64, 0, &clone); + + /* this should fail */ + bpf_dynptr_clone(&ptr1, &clone); + + bpf_ringbuf_submit_dynptr(&clone, 0); + + return 0; +} + +/* Invalidating a dynptr should invalidate its clones */ +SEC("?raw_tp") +__failure __msg("Expected an initialized dynptr as arg #3") +int clone_invalidate1(void *ctx) +{ + struct bpf_dynptr clone; + struct bpf_dynptr ptr; + char read_data[64]; + + bpf_ringbuf_reserve_dynptr(&ringbuf, val, 0, &ptr); + + bpf_dynptr_clone(&ptr, &clone); + + bpf_ringbuf_submit_dynptr(&ptr, 0); + + /* this should fail */ + bpf_dynptr_read(read_data, sizeof(read_data), &clone, 0, 0); + + return 0; +} + +/* Invalidating a dynptr should invalidate its parent */ +SEC("?raw_tp") +__failure __msg("Expected an initialized dynptr as arg #3") +int clone_invalidate2(void *ctx) +{ + struct bpf_dynptr ptr; + struct bpf_dynptr clone; + char read_data[64]; + + bpf_ringbuf_reserve_dynptr(&ringbuf, val, 0, &ptr); + + bpf_dynptr_clone(&ptr, &clone); + + bpf_ringbuf_submit_dynptr(&clone, 0); + + /* this should fail */ + bpf_dynptr_read(read_data, sizeof(read_data), &ptr, 0, 0); + + return 0; +} + +/* Invalidating a dynptr should invalidate its siblings */ +SEC("?raw_tp") +__failure __msg("Expected an initialized dynptr as arg #3") +int clone_invalidate3(void *ctx) +{ + struct bpf_dynptr ptr; + struct bpf_dynptr clone1; + struct bpf_dynptr clone2; + char read_data[64]; + + bpf_ringbuf_reserve_dynptr(&ringbuf, val, 0, &ptr); + + bpf_dynptr_clone(&ptr, &clone1); + + bpf_dynptr_clone(&ptr, &clone2); + + bpf_ringbuf_submit_dynptr(&clone2, 0); + + /* this should fail */ + bpf_dynptr_read(read_data, sizeof(read_data), &clone1, 0, 0); + + return 0; +} + +/* Invalidating a dynptr should invalidate any data slices + * of its clones + */ +SEC("?raw_tp") +__failure __msg("invalid mem access 'scalar'") +int clone_invalidate4(void *ctx) +{ + struct bpf_dynptr ptr; + struct bpf_dynptr clone; + int *data; + + bpf_ringbuf_reserve_dynptr(&ringbuf, val, 0, &ptr); + + bpf_dynptr_clone(&ptr, &clone); + data = bpf_dynptr_data(&clone, 0, sizeof(val)); + if (!data) + return 0; + + bpf_ringbuf_submit_dynptr(&ptr, 0); + + /* this should fail */ + *data = 123; + + return 0; +} + +/* Invalidating a dynptr should invalidate any data slices + * of its parent + */ +SEC("?raw_tp") +__failure __msg("invalid mem access 'scalar'") +int clone_invalidate5(void *ctx) +{ + struct bpf_dynptr ptr; + struct bpf_dynptr clone; + int *data; + + bpf_ringbuf_reserve_dynptr(&ringbuf, val, 0, &ptr); + data = bpf_dynptr_data(&ptr, 0, sizeof(val)); + if (!data) + return 0; + + bpf_dynptr_clone(&ptr, &clone); + + bpf_ringbuf_submit_dynptr(&clone, 0); + + /* this should fail */ + *data = 123; + + return 0; +} + +/* Invalidating a dynptr should invalidate any data slices + * of its sibling + */ +SEC("?raw_tp") +__failure __msg("invalid mem access 'scalar'") +int clone_invalidate6(void *ctx) +{ + struct bpf_dynptr ptr; + struct bpf_dynptr clone1; + struct bpf_dynptr clone2; + int *data; + + bpf_ringbuf_reserve_dynptr(&ringbuf, val, 0, &ptr); + + bpf_dynptr_clone(&ptr, &clone1); + + bpf_dynptr_clone(&ptr, &clone2); + + data = bpf_dynptr_data(&clone1, 0, sizeof(val)); + if (!data) + return 0; + + bpf_ringbuf_submit_dynptr(&clone2, 0); + + /* this should fail */ + *data = 123; + + return 0; +} + +/* A skb clone's data slices should be invalid anytime packet data changes */ +SEC("?tc") +__failure __msg("invalid mem access 'scalar'") +int clone_skb_packet_data(struct __sk_buff *skb) +{ + char buffer[sizeof(__u32)] = {}; + struct bpf_dynptr clone; + struct bpf_dynptr ptr; + __u32 *data; + + bpf_dynptr_from_skb(skb, 0, &ptr); + + bpf_dynptr_clone(&ptr, &clone); + data = bpf_dynptr_slice_rdwr(&clone, 0, buffer, sizeof(buffer)); + if (!data) + return XDP_DROP; + + if (bpf_skb_pull_data(skb, skb->len)) + return SK_DROP; + + /* this should fail */ + *data = 123; + + return 0; +} + +/* A xdp clone's data slices should be invalid anytime packet data changes */ +SEC("?xdp") +__failure __msg("invalid mem access 'scalar'") +int clone_xdp_packet_data(struct xdp_md *xdp) +{ + char buffer[sizeof(__u32)] = {}; + struct bpf_dynptr clone; + struct bpf_dynptr ptr; + struct ethhdr *hdr; + __u32 *data; + + bpf_dynptr_from_xdp(xdp, 0, &ptr); + + bpf_dynptr_clone(&ptr, &clone); + data = bpf_dynptr_slice_rdwr(&clone, 0, buffer, sizeof(buffer)); + if (!data) + return XDP_DROP; + + if (bpf_xdp_adjust_head(xdp, 0 - (int)sizeof(*hdr))) + return XDP_DROP; + + /* this should fail */ + *data = 123; + + return 0; +} + +/* Buffers that are provided must be sufficiently long */ +SEC("?cgroup_skb/egress") +__failure __msg("memory, len pair leads to invalid memory access") +int test_dynptr_skb_small_buff(struct __sk_buff *skb) +{ + struct bpf_dynptr ptr; + char buffer[8] = {}; + __u64 *data; + + if (bpf_dynptr_from_skb(skb, 0, &ptr)) { + err = 1; + return 1; + } + + /* This may return NULL. SKB may require a buffer */ + data = bpf_dynptr_slice(&ptr, 0, buffer, 9); + + return !!data; +} diff --git a/tools/testing/selftests/bpf/progs/dynptr_success.c b/tools/testing/selftests/bpf/progs/dynptr_success.c index b2fa6c47ecc0..5985920d162e 100644 --- a/tools/testing/selftests/bpf/progs/dynptr_success.c +++ b/tools/testing/selftests/bpf/progs/dynptr_success.c @@ -2,6 +2,7 @@ /* Copyright (c) 2022 Facebook */ #include <string.h> +#include <stdbool.h> #include <linux/bpf.h> #include <bpf/bpf_helpers.h> #include "bpf_misc.h" @@ -207,3 +208,339 @@ int test_dynptr_skb_data(struct __sk_buff *skb) return 1; } + +SEC("tp/syscalls/sys_enter_nanosleep") +int test_adjust(void *ctx) +{ + struct bpf_dynptr ptr; + __u32 bytes = 64; + __u32 off = 10; + __u32 trim = 15; + + if (bpf_get_current_pid_tgid() >> 32 != pid) + return 0; + + err = bpf_ringbuf_reserve_dynptr(&ringbuf, bytes, 0, &ptr); + if (err) { + err = 1; + goto done; + } + + if (bpf_dynptr_size(&ptr) != bytes) { + err = 2; + goto done; + } + + /* Advance the dynptr by off */ + err = bpf_dynptr_adjust(&ptr, off, bpf_dynptr_size(&ptr)); + if (err) { + err = 3; + goto done; + } + + if (bpf_dynptr_size(&ptr) != bytes - off) { + err = 4; + goto done; + } + + /* Trim the dynptr */ + err = bpf_dynptr_adjust(&ptr, off, 15); + if (err) { + err = 5; + goto done; + } + + /* Check that the size was adjusted correctly */ + if (bpf_dynptr_size(&ptr) != trim - off) { + err = 6; + goto done; + } + +done: + bpf_ringbuf_discard_dynptr(&ptr, 0); + return 0; +} + +SEC("tp/syscalls/sys_enter_nanosleep") +int test_adjust_err(void *ctx) +{ + char write_data[45] = "hello there, world!!"; + struct bpf_dynptr ptr; + __u32 size = 64; + __u32 off = 20; + + if (bpf_get_current_pid_tgid() >> 32 != pid) + return 0; + + if (bpf_ringbuf_reserve_dynptr(&ringbuf, size, 0, &ptr)) { + err = 1; + goto done; + } + + /* Check that start can't be greater than end */ + if (bpf_dynptr_adjust(&ptr, 5, 1) != -EINVAL) { + err = 2; + goto done; + } + + /* Check that start can't be greater than size */ + if (bpf_dynptr_adjust(&ptr, size + 1, size + 1) != -ERANGE) { + err = 3; + goto done; + } + + /* Check that end can't be greater than size */ + if (bpf_dynptr_adjust(&ptr, 0, size + 1) != -ERANGE) { + err = 4; + goto done; + } + + if (bpf_dynptr_adjust(&ptr, off, size)) { + err = 5; + goto done; + } + + /* Check that you can't write more bytes than available into the dynptr + * after you've adjusted it + */ + if (bpf_dynptr_write(&ptr, 0, &write_data, sizeof(write_data), 0) != -E2BIG) { + err = 6; + goto done; + } + + /* Check that even after adjusting, submitting/discarding + * a ringbuf dynptr works + */ + bpf_ringbuf_submit_dynptr(&ptr, 0); + return 0; + +done: + bpf_ringbuf_discard_dynptr(&ptr, 0); + return 0; +} + +SEC("tp/syscalls/sys_enter_nanosleep") +int test_zero_size_dynptr(void *ctx) +{ + char write_data = 'x', read_data; + struct bpf_dynptr ptr; + __u32 size = 64; + + if (bpf_get_current_pid_tgid() >> 32 != pid) + return 0; + + if (bpf_ringbuf_reserve_dynptr(&ringbuf, size, 0, &ptr)) { + err = 1; + goto done; + } + + /* After this, the dynptr has a size of 0 */ + if (bpf_dynptr_adjust(&ptr, size, size)) { + err = 2; + goto done; + } + + /* Test that reading + writing non-zero bytes is not ok */ + if (bpf_dynptr_read(&read_data, sizeof(read_data), &ptr, 0, 0) != -E2BIG) { + err = 3; + goto done; + } + + if (bpf_dynptr_write(&ptr, 0, &write_data, sizeof(write_data), 0) != -E2BIG) { + err = 4; + goto done; + } + + /* Test that reading + writing 0 bytes from a 0-size dynptr is ok */ + if (bpf_dynptr_read(&read_data, 0, &ptr, 0, 0)) { + err = 5; + goto done; + } + + if (bpf_dynptr_write(&ptr, 0, &write_data, 0, 0)) { + err = 6; + goto done; + } + + err = 0; + +done: + bpf_ringbuf_discard_dynptr(&ptr, 0); + return 0; +} + +SEC("tp/syscalls/sys_enter_nanosleep") +int test_dynptr_is_null(void *ctx) +{ + struct bpf_dynptr ptr1; + struct bpf_dynptr ptr2; + __u64 size = 4; + + if (bpf_get_current_pid_tgid() >> 32 != pid) + return 0; + + /* Pass in invalid flags, get back an invalid dynptr */ + if (bpf_ringbuf_reserve_dynptr(&ringbuf, size, 123, &ptr1) != -EINVAL) { + err = 1; + goto exit_early; + } + + /* Test that the invalid dynptr is null */ + if (!bpf_dynptr_is_null(&ptr1)) { + err = 2; + goto exit_early; + } + + /* Get a valid dynptr */ + if (bpf_ringbuf_reserve_dynptr(&ringbuf, size, 0, &ptr2)) { + err = 3; + goto exit; + } + + /* Test that the valid dynptr is not null */ + if (bpf_dynptr_is_null(&ptr2)) { + err = 4; + goto exit; + } + +exit: + bpf_ringbuf_discard_dynptr(&ptr2, 0); +exit_early: + bpf_ringbuf_discard_dynptr(&ptr1, 0); + return 0; +} + +SEC("cgroup_skb/egress") +int test_dynptr_is_rdonly(struct __sk_buff *skb) +{ + struct bpf_dynptr ptr1; + struct bpf_dynptr ptr2; + struct bpf_dynptr ptr3; + + /* Pass in invalid flags, get back an invalid dynptr */ + if (bpf_dynptr_from_skb(skb, 123, &ptr1) != -EINVAL) { + err = 1; + return 0; + } + + /* Test that an invalid dynptr is_rdonly returns false */ + if (bpf_dynptr_is_rdonly(&ptr1)) { + err = 2; + return 0; + } + + /* Get a read-only dynptr */ + if (bpf_dynptr_from_skb(skb, 0, &ptr2)) { + err = 3; + return 0; + } + + /* Test that the dynptr is read-only */ + if (!bpf_dynptr_is_rdonly(&ptr2)) { + err = 4; + return 0; + } + + /* Get a read-writeable dynptr */ + if (bpf_ringbuf_reserve_dynptr(&ringbuf, 64, 0, &ptr3)) { + err = 5; + goto done; + } + + /* Test that the dynptr is read-only */ + if (bpf_dynptr_is_rdonly(&ptr3)) { + err = 6; + goto done; + } + +done: + bpf_ringbuf_discard_dynptr(&ptr3, 0); + return 0; +} + +SEC("cgroup_skb/egress") +int test_dynptr_clone(struct __sk_buff *skb) +{ + struct bpf_dynptr ptr1; + struct bpf_dynptr ptr2; + __u32 off = 2, size; + + /* Get a dynptr */ + if (bpf_dynptr_from_skb(skb, 0, &ptr1)) { + err = 1; + return 0; + } + + if (bpf_dynptr_adjust(&ptr1, off, bpf_dynptr_size(&ptr1))) { + err = 2; + return 0; + } + + /* Clone the dynptr */ + if (bpf_dynptr_clone(&ptr1, &ptr2)) { + err = 3; + return 0; + } + + size = bpf_dynptr_size(&ptr1); + + /* Check that the clone has the same size and rd-only */ + if (bpf_dynptr_size(&ptr2) != size) { + err = 4; + return 0; + } + + if (bpf_dynptr_is_rdonly(&ptr2) != bpf_dynptr_is_rdonly(&ptr1)) { + err = 5; + return 0; + } + + /* Advance and trim the original dynptr */ + bpf_dynptr_adjust(&ptr1, 5, 5); + + /* Check that only original dynptr was affected, and the clone wasn't */ + if (bpf_dynptr_size(&ptr2) != size) { + err = 6; + return 0; + } + + return 0; +} + +SEC("?cgroup_skb/egress") +int test_dynptr_skb_no_buff(struct __sk_buff *skb) +{ + struct bpf_dynptr ptr; + __u64 *data; + + if (bpf_dynptr_from_skb(skb, 0, &ptr)) { + err = 1; + return 1; + } + + /* This may return NULL. SKB may require a buffer */ + data = bpf_dynptr_slice(&ptr, 0, NULL, 1); + + return !!data; +} + +SEC("?cgroup_skb/egress") +int test_dynptr_skb_strcmp(struct __sk_buff *skb) +{ + struct bpf_dynptr ptr; + char *data; + + if (bpf_dynptr_from_skb(skb, 0, &ptr)) { + err = 1; + return 1; + } + + /* This may return NULL. SKB may require a buffer */ + data = bpf_dynptr_slice(&ptr, 0, NULL, 10); + if (data) { + bpf_strncmp(data, 10, "foo"); + return 1; + } + + return 1; +} diff --git a/tools/testing/selftests/bpf/progs/iters.c b/tools/testing/selftests/bpf/progs/iters.c index be16143ae292..6b9b3c56f009 100644 --- a/tools/testing/selftests/bpf/progs/iters.c +++ b/tools/testing/selftests/bpf/progs/iters.c @@ -651,29 +651,25 @@ int iter_stack_array_loop(const void *ctx) return sum; } -#define ARR_SZ 16 - -static __noinline void fill(struct bpf_iter_num *it, int *arr, int mul) +static __noinline void fill(struct bpf_iter_num *it, int *arr, __u32 n, int mul) { - int *t; - __u64 i; + int *t, i; while ((t = bpf_iter_num_next(it))) { i = *t; - if (i >= ARR_SZ) + if (i >= n) break; arr[i] = i * mul; } } -static __noinline int sum(struct bpf_iter_num *it, int *arr) +static __noinline int sum(struct bpf_iter_num *it, int *arr, __u32 n) { - int *t, sum = 0;; - __u64 i; + int *t, i, sum = 0;; while ((t = bpf_iter_num_next(it))) { i = *t; - if (i >= ARR_SZ) + if (i >= n) break; sum += arr[i]; } @@ -685,7 +681,7 @@ SEC("raw_tp") __success int iter_pass_iter_ptr_to_subprog(const void *ctx) { - int arr1[ARR_SZ], arr2[ARR_SZ]; + int arr1[16], arr2[32]; struct bpf_iter_num it; int n, sum1, sum2; @@ -694,25 +690,25 @@ int iter_pass_iter_ptr_to_subprog(const void *ctx) /* fill arr1 */ n = ARRAY_SIZE(arr1); bpf_iter_num_new(&it, 0, n); - fill(&it, arr1, 2); + fill(&it, arr1, n, 2); bpf_iter_num_destroy(&it); /* fill arr2 */ n = ARRAY_SIZE(arr2); bpf_iter_num_new(&it, 0, n); - fill(&it, arr2, 10); + fill(&it, arr2, n, 10); bpf_iter_num_destroy(&it); /* sum arr1 */ n = ARRAY_SIZE(arr1); bpf_iter_num_new(&it, 0, n); - sum1 = sum(&it, arr1); + sum1 = sum(&it, arr1, n); bpf_iter_num_destroy(&it); /* sum arr2 */ n = ARRAY_SIZE(arr2); bpf_iter_num_new(&it, 0, n); - sum2 = sum(&it, arr2); + sum2 = sum(&it, arr2, n); bpf_iter_num_destroy(&it); bpf_printk("sum1=%d, sum2=%d", sum1, sum2); diff --git a/tools/testing/selftests/bpf/progs/jit_probe_mem.c b/tools/testing/selftests/bpf/progs/jit_probe_mem.c index 13f00ca2ed0a..f9789e668297 100644 --- a/tools/testing/selftests/bpf/progs/jit_probe_mem.c +++ b/tools/testing/selftests/bpf/progs/jit_probe_mem.c @@ -3,13 +3,11 @@ #include <vmlinux.h> #include <bpf/bpf_tracing.h> #include <bpf/bpf_helpers.h> +#include "../bpf_testmod/bpf_testmod_kfunc.h" static struct prog_test_ref_kfunc __kptr *v; long total_sum = -1; -extern struct prog_test_ref_kfunc *bpf_kfunc_call_test_acquire(unsigned long *sp) __ksym; -extern void bpf_kfunc_call_test_release(struct prog_test_ref_kfunc *p) __ksym; - SEC("tc") int test_jit_probe_mem(struct __sk_buff *ctx) { diff --git a/tools/testing/selftests/bpf/progs/kfunc_call_destructive.c b/tools/testing/selftests/bpf/progs/kfunc_call_destructive.c index 767472bc5a97..7632d9ecb253 100644 --- a/tools/testing/selftests/bpf/progs/kfunc_call_destructive.c +++ b/tools/testing/selftests/bpf/progs/kfunc_call_destructive.c @@ -1,8 +1,7 @@ // SPDX-License-Identifier: GPL-2.0 #include <vmlinux.h> #include <bpf/bpf_helpers.h> - -extern void bpf_kfunc_call_test_destructive(void) __ksym; +#include "../bpf_testmod/bpf_testmod_kfunc.h" SEC("tc") int kfunc_destructive_test(void) diff --git a/tools/testing/selftests/bpf/progs/kfunc_call_fail.c b/tools/testing/selftests/bpf/progs/kfunc_call_fail.c index b98313d391c6..4b0b7b79cdfb 100644 --- a/tools/testing/selftests/bpf/progs/kfunc_call_fail.c +++ b/tools/testing/selftests/bpf/progs/kfunc_call_fail.c @@ -2,14 +2,7 @@ /* Copyright (c) 2021 Facebook */ #include <vmlinux.h> #include <bpf/bpf_helpers.h> - -extern struct prog_test_ref_kfunc *bpf_kfunc_call_test_acquire(unsigned long *sp) __ksym; -extern void bpf_kfunc_call_test_release(struct prog_test_ref_kfunc *p) __ksym; -extern void bpf_kfunc_call_test_mem_len_pass1(void *mem, int len) __ksym; -extern int *bpf_kfunc_call_test_get_rdwr_mem(struct prog_test_ref_kfunc *p, const int rdwr_buf_size) __ksym; -extern int *bpf_kfunc_call_test_get_rdonly_mem(struct prog_test_ref_kfunc *p, const int rdonly_buf_size) __ksym; -extern int *bpf_kfunc_call_test_acq_rdonly_mem(struct prog_test_ref_kfunc *p, const int rdonly_buf_size) __ksym; -extern void bpf_kfunc_call_int_mem_release(int *p) __ksym; +#include "../bpf_testmod/bpf_testmod_kfunc.h" struct syscall_test_args { __u8 data[16]; diff --git a/tools/testing/selftests/bpf/progs/kfunc_call_race.c b/tools/testing/selftests/bpf/progs/kfunc_call_race.c index 4e8fed75a4e0..d532af07decf 100644 --- a/tools/testing/selftests/bpf/progs/kfunc_call_race.c +++ b/tools/testing/selftests/bpf/progs/kfunc_call_race.c @@ -1,8 +1,7 @@ // SPDX-License-Identifier: GPL-2.0 #include <vmlinux.h> #include <bpf/bpf_helpers.h> - -extern void bpf_testmod_test_mod_kfunc(int i) __ksym; +#include "../bpf_testmod/bpf_testmod_kfunc.h" SEC("tc") int kfunc_call_fail(struct __sk_buff *ctx) diff --git a/tools/testing/selftests/bpf/progs/kfunc_call_test.c b/tools/testing/selftests/bpf/progs/kfunc_call_test.c index 7daa8f5720b9..cf68d1e48a0f 100644 --- a/tools/testing/selftests/bpf/progs/kfunc_call_test.c +++ b/tools/testing/selftests/bpf/progs/kfunc_call_test.c @@ -2,22 +2,7 @@ /* Copyright (c) 2021 Facebook */ #include <vmlinux.h> #include <bpf/bpf_helpers.h> - -extern long bpf_kfunc_call_test4(signed char a, short b, int c, long d) __ksym; -extern int bpf_kfunc_call_test2(struct sock *sk, __u32 a, __u32 b) __ksym; -extern __u64 bpf_kfunc_call_test1(struct sock *sk, __u32 a, __u64 b, - __u32 c, __u64 d) __ksym; - -extern struct prog_test_ref_kfunc *bpf_kfunc_call_test_acquire(unsigned long *sp) __ksym; -extern void bpf_kfunc_call_test_release(struct prog_test_ref_kfunc *p) __ksym; -extern void bpf_kfunc_call_test_pass_ctx(struct __sk_buff *skb) __ksym; -extern void bpf_kfunc_call_test_pass1(struct prog_test_pass1 *p) __ksym; -extern void bpf_kfunc_call_test_pass2(struct prog_test_pass2 *p) __ksym; -extern void bpf_kfunc_call_test_mem_len_pass1(void *mem, int len) __ksym; -extern void bpf_kfunc_call_test_mem_len_fail2(__u64 *mem, int len) __ksym; -extern int *bpf_kfunc_call_test_get_rdwr_mem(struct prog_test_ref_kfunc *p, const int rdwr_buf_size) __ksym; -extern int *bpf_kfunc_call_test_get_rdonly_mem(struct prog_test_ref_kfunc *p, const int rdonly_buf_size) __ksym; -extern u32 bpf_kfunc_call_test_static_unused_arg(u32 arg, u32 unused) __ksym; +#include "../bpf_testmod/bpf_testmod_kfunc.h" SEC("tc") int kfunc_call_test4(struct __sk_buff *skb) diff --git a/tools/testing/selftests/bpf/progs/kfunc_call_test_subprog.c b/tools/testing/selftests/bpf/progs/kfunc_call_test_subprog.c index c1fdecabeabf..2380c75e74ce 100644 --- a/tools/testing/selftests/bpf/progs/kfunc_call_test_subprog.c +++ b/tools/testing/selftests/bpf/progs/kfunc_call_test_subprog.c @@ -1,13 +1,8 @@ // SPDX-License-Identifier: GPL-2.0 /* Copyright (c) 2021 Facebook */ -#include <linux/bpf.h> -#include <bpf/bpf_helpers.h> -#include "bpf_tcp_helpers.h" +#include "../bpf_testmod/bpf_testmod_kfunc.h" extern const int bpf_prog_active __ksym; -extern __u64 bpf_kfunc_call_test1(struct sock *sk, __u32 a, __u64 b, - __u32 c, __u64 d) __ksym; -extern struct sock *bpf_kfunc_call_test3(struct sock *sk) __ksym; int active_res = -1; int sk_state_res = -1; @@ -28,7 +23,7 @@ int __noinline f1(struct __sk_buff *skb) if (active) active_res = *active; - sk_state_res = bpf_kfunc_call_test3((struct sock *)sk)->sk_state; + sk_state_res = bpf_kfunc_call_test3((struct sock *)sk)->__sk_common.skc_state; return (__u32)bpf_kfunc_call_test1((struct sock *)sk, 1, 2, 3, 4); } diff --git a/tools/testing/selftests/bpf/progs/local_kptr_stash.c b/tools/testing/selftests/bpf/progs/local_kptr_stash.c index 0ef286da092b..06838083079c 100644 --- a/tools/testing/selftests/bpf/progs/local_kptr_stash.c +++ b/tools/testing/selftests/bpf/progs/local_kptr_stash.c @@ -5,7 +5,8 @@ #include <bpf/bpf_tracing.h> #include <bpf/bpf_helpers.h> #include <bpf/bpf_core_read.h> -#include "bpf_experimental.h" +#include "../bpf_experimental.h" +#include "../bpf_testmod/bpf_testmod_kfunc.h" struct node_data { long key; @@ -32,8 +33,6 @@ struct map_value { */ struct node_data *just_here_because_btf_bug; -extern void bpf_kfunc_call_test_release(struct prog_test_ref_kfunc *p) __ksym; - struct { __uint(type, BPF_MAP_TYPE_ARRAY); __type(key, int); diff --git a/tools/testing/selftests/bpf/progs/map_kptr.c b/tools/testing/selftests/bpf/progs/map_kptr.c index d7150041e5d1..da30f0d59364 100644 --- a/tools/testing/selftests/bpf/progs/map_kptr.c +++ b/tools/testing/selftests/bpf/progs/map_kptr.c @@ -2,6 +2,7 @@ #include <vmlinux.h> #include <bpf/bpf_tracing.h> #include <bpf/bpf_helpers.h> +#include "../bpf_testmod/bpf_testmod_kfunc.h" struct map_value { struct prog_test_ref_kfunc __kptr_untrusted *unref_ptr; @@ -114,10 +115,6 @@ DEFINE_MAP_OF_MAP(BPF_MAP_TYPE_HASH_OF_MAPS, hash_map, hash_of_hash_maps); DEFINE_MAP_OF_MAP(BPF_MAP_TYPE_HASH_OF_MAPS, hash_malloc_map, hash_of_hash_malloc_maps); DEFINE_MAP_OF_MAP(BPF_MAP_TYPE_HASH_OF_MAPS, lru_hash_map, hash_of_lru_hash_maps); -extern struct prog_test_ref_kfunc *bpf_kfunc_call_test_acquire(unsigned long *sp) __ksym; -extern void bpf_kfunc_call_test_release(struct prog_test_ref_kfunc *p) __ksym; -void bpf_kfunc_call_test_ref(struct prog_test_ref_kfunc *p) __ksym; - #define WRITE_ONCE(x, val) ((*(volatile typeof(x) *) &(x)) = (val)) static void test_kptr_unref(struct map_value *v) diff --git a/tools/testing/selftests/bpf/progs/map_kptr_fail.c b/tools/testing/selftests/bpf/progs/map_kptr_fail.c index da8c724f839b..450bb373b179 100644 --- a/tools/testing/selftests/bpf/progs/map_kptr_fail.c +++ b/tools/testing/selftests/bpf/progs/map_kptr_fail.c @@ -4,6 +4,7 @@ #include <bpf/bpf_helpers.h> #include <bpf/bpf_core_read.h> #include "bpf_misc.h" +#include "../bpf_testmod/bpf_testmod_kfunc.h" struct map_value { char buf[8]; @@ -19,9 +20,6 @@ struct array_map { __uint(max_entries, 1); } array_map SEC(".maps"); -extern struct prog_test_ref_kfunc *bpf_kfunc_call_test_acquire(unsigned long *sp) __ksym; -extern void bpf_kfunc_call_test_release(struct prog_test_ref_kfunc *p) __ksym; - SEC("?tc") __failure __msg("kptr access size must be BPF_DW") int size_not_bpf_dw(struct __sk_buff *ctx) diff --git a/tools/testing/selftests/bpf/progs/refcounted_kptr.c b/tools/testing/selftests/bpf/progs/refcounted_kptr.c index 1d348a225140..a3da610b1e6b 100644 --- a/tools/testing/selftests/bpf/progs/refcounted_kptr.c +++ b/tools/testing/selftests/bpf/progs/refcounted_kptr.c @@ -375,6 +375,8 @@ long rbtree_refcounted_node_ref_escapes(void *ctx) bpf_rbtree_add(&aroot, &n->node, less_a); m = bpf_refcount_acquire(n); bpf_spin_unlock(&alock); + if (!m) + return 2; m->key = 2; bpf_obj_drop(m); diff --git a/tools/testing/selftests/bpf/progs/refcounted_kptr_fail.c b/tools/testing/selftests/bpf/progs/refcounted_kptr_fail.c index efcb308f80ad..0b09e5c915b1 100644 --- a/tools/testing/selftests/bpf/progs/refcounted_kptr_fail.c +++ b/tools/testing/selftests/bpf/progs/refcounted_kptr_fail.c @@ -29,7 +29,7 @@ static bool less(struct bpf_rb_node *a, const struct bpf_rb_node *b) } SEC("?tc") -__failure __msg("Unreleased reference id=3 alloc_insn=21") +__failure __msg("Unreleased reference id=4 alloc_insn=21") long rbtree_refcounted_node_ref_escapes(void *ctx) { struct node_acquire *n, *m; @@ -43,6 +43,8 @@ long rbtree_refcounted_node_ref_escapes(void *ctx) /* m becomes an owning ref but is never drop'd or added to a tree */ m = bpf_refcount_acquire(n); bpf_spin_unlock(&glock); + if (!m) + return 2; m->key = 2; return 0; diff --git a/tools/testing/selftests/bpf/progs/sock_destroy_prog.c b/tools/testing/selftests/bpf/progs/sock_destroy_prog.c new file mode 100644 index 000000000000..9e0bf7a54cec --- /dev/null +++ b/tools/testing/selftests/bpf/progs/sock_destroy_prog.c @@ -0,0 +1,145 @@ +// SPDX-License-Identifier: GPL-2.0 + +#include "vmlinux.h" +#include <bpf/bpf_helpers.h> +#include <bpf/bpf_endian.h> + +#include "bpf_tracing_net.h" + +__be16 serv_port = 0; + +int bpf_sock_destroy(struct sock_common *sk) __ksym; + +struct { + __uint(type, BPF_MAP_TYPE_ARRAY); + __uint(max_entries, 1); + __type(key, __u32); + __type(value, __u64); +} tcp_conn_sockets SEC(".maps"); + +struct { + __uint(type, BPF_MAP_TYPE_ARRAY); + __uint(max_entries, 1); + __type(key, __u32); + __type(value, __u64); +} udp_conn_sockets SEC(".maps"); + +SEC("cgroup/connect6") +int sock_connect(struct bpf_sock_addr *ctx) +{ + __u64 sock_cookie = 0; + int key = 0; + __u32 keyc = 0; + + if (ctx->family != AF_INET6 || ctx->user_family != AF_INET6) + return 1; + + sock_cookie = bpf_get_socket_cookie(ctx); + if (ctx->protocol == IPPROTO_TCP) + bpf_map_update_elem(&tcp_conn_sockets, &key, &sock_cookie, 0); + else if (ctx->protocol == IPPROTO_UDP) + bpf_map_update_elem(&udp_conn_sockets, &keyc, &sock_cookie, 0); + else + return 1; + + return 1; +} + +SEC("iter/tcp") +int iter_tcp6_client(struct bpf_iter__tcp *ctx) +{ + struct sock_common *sk_common = ctx->sk_common; + __u64 sock_cookie = 0; + __u64 *val; + int key = 0; + + if (!sk_common) + return 0; + + if (sk_common->skc_family != AF_INET6) + return 0; + + sock_cookie = bpf_get_socket_cookie(sk_common); + val = bpf_map_lookup_elem(&tcp_conn_sockets, &key); + if (!val) + return 0; + /* Destroy connected client sockets. */ + if (sock_cookie == *val) + bpf_sock_destroy(sk_common); + + return 0; +} + +SEC("iter/tcp") +int iter_tcp6_server(struct bpf_iter__tcp *ctx) +{ + struct sock_common *sk_common = ctx->sk_common; + const struct inet_connection_sock *icsk; + const struct inet_sock *inet; + struct tcp6_sock *tcp_sk; + __be16 srcp; + + if (!sk_common) + return 0; + + if (sk_common->skc_family != AF_INET6) + return 0; + + tcp_sk = bpf_skc_to_tcp6_sock(sk_common); + if (!tcp_sk) + return 0; + + icsk = &tcp_sk->tcp.inet_conn; + inet = &icsk->icsk_inet; + srcp = inet->inet_sport; + + /* Destroy server sockets. */ + if (srcp == serv_port) + bpf_sock_destroy(sk_common); + + return 0; +} + + +SEC("iter/udp") +int iter_udp6_client(struct bpf_iter__udp *ctx) +{ + struct udp_sock *udp_sk = ctx->udp_sk; + struct sock *sk = (struct sock *) udp_sk; + __u64 sock_cookie = 0, *val; + int key = 0; + + if (!sk) + return 0; + + sock_cookie = bpf_get_socket_cookie(sk); + val = bpf_map_lookup_elem(&udp_conn_sockets, &key); + if (!val) + return 0; + /* Destroy connected client sockets. */ + if (sock_cookie == *val) + bpf_sock_destroy((struct sock_common *)sk); + + return 0; +} + +SEC("iter/udp") +int iter_udp6_server(struct bpf_iter__udp *ctx) +{ + struct udp_sock *udp_sk = ctx->udp_sk; + struct sock *sk = (struct sock *) udp_sk; + struct inet_sock *inet; + __be16 srcp; + + if (!sk) + return 0; + + inet = &udp_sk->inet; + srcp = inet->inet_sport; + if (srcp == serv_port) + bpf_sock_destroy((struct sock_common *)sk); + + return 0; +} + +char _license[] SEC("license") = "GPL"; diff --git a/tools/testing/selftests/bpf/progs/sock_destroy_prog_fail.c b/tools/testing/selftests/bpf/progs/sock_destroy_prog_fail.c new file mode 100644 index 000000000000..dd6850b58e25 --- /dev/null +++ b/tools/testing/selftests/bpf/progs/sock_destroy_prog_fail.c @@ -0,0 +1,22 @@ +// SPDX-License-Identifier: GPL-2.0 + +#include "vmlinux.h" +#include <bpf/bpf_tracing.h> +#include <bpf/bpf_helpers.h> + +#include "bpf_misc.h" + +char _license[] SEC("license") = "GPL"; + +int bpf_sock_destroy(struct sock_common *sk) __ksym; + +SEC("tp_btf/tcp_destroy_sock") +__failure __msg("calling kernel function bpf_sock_destroy is not allowed") +int BPF_PROG(trace_tcp_destroy_sock, struct sock *sk) +{ + /* should not load */ + bpf_sock_destroy((struct sock_common *)sk); + + return 0; +} + diff --git a/tools/testing/selftests/bpf/progs/sockopt_inherit.c b/tools/testing/selftests/bpf/progs/sockopt_inherit.c index 9fb241b97291..c8f59caa4639 100644 --- a/tools/testing/selftests/bpf/progs/sockopt_inherit.c +++ b/tools/testing/selftests/bpf/progs/sockopt_inherit.c @@ -9,6 +9,8 @@ char _license[] SEC("license") = "GPL"; #define CUSTOM_INHERIT2 1 #define CUSTOM_LISTENER 2 +__u32 page_size = 0; + struct sockopt_inherit { __u8 val; }; @@ -55,7 +57,7 @@ int _getsockopt(struct bpf_sockopt *ctx) __u8 *optval = ctx->optval; if (ctx->level != SOL_CUSTOM) - return 1; /* only interested in SOL_CUSTOM */ + goto out; /* only interested in SOL_CUSTOM */ if (optval + 1 > optval_end) return 0; /* EPERM, bounds check */ @@ -70,6 +72,12 @@ int _getsockopt(struct bpf_sockopt *ctx) ctx->optlen = 1; return 1; + +out: + /* optval larger than PAGE_SIZE use kernel's buffer. */ + if (ctx->optlen > page_size) + ctx->optlen = 0; + return 1; } SEC("cgroup/setsockopt") @@ -80,7 +88,7 @@ int _setsockopt(struct bpf_sockopt *ctx) __u8 *optval = ctx->optval; if (ctx->level != SOL_CUSTOM) - return 1; /* only interested in SOL_CUSTOM */ + goto out; /* only interested in SOL_CUSTOM */ if (optval + 1 > optval_end) return 0; /* EPERM, bounds check */ @@ -93,4 +101,10 @@ int _setsockopt(struct bpf_sockopt *ctx) ctx->optlen = -1; return 1; + +out: + /* optval larger than PAGE_SIZE use kernel's buffer. */ + if (ctx->optlen > page_size) + ctx->optlen = 0; + return 1; } diff --git a/tools/testing/selftests/bpf/progs/sockopt_multi.c b/tools/testing/selftests/bpf/progs/sockopt_multi.c index 177a59069dae..96f29fce050b 100644 --- a/tools/testing/selftests/bpf/progs/sockopt_multi.c +++ b/tools/testing/selftests/bpf/progs/sockopt_multi.c @@ -5,6 +5,8 @@ char _license[] SEC("license") = "GPL"; +__u32 page_size = 0; + SEC("cgroup/getsockopt") int _getsockopt_child(struct bpf_sockopt *ctx) { @@ -12,7 +14,7 @@ int _getsockopt_child(struct bpf_sockopt *ctx) __u8 *optval = ctx->optval; if (ctx->level != SOL_IP || ctx->optname != IP_TOS) - return 1; + goto out; if (optval + 1 > optval_end) return 0; /* EPERM, bounds check */ @@ -26,6 +28,12 @@ int _getsockopt_child(struct bpf_sockopt *ctx) ctx->optlen = 1; return 1; + +out: + /* optval larger than PAGE_SIZE use kernel's buffer. */ + if (ctx->optlen > page_size) + ctx->optlen = 0; + return 1; } SEC("cgroup/getsockopt") @@ -35,7 +43,7 @@ int _getsockopt_parent(struct bpf_sockopt *ctx) __u8 *optval = ctx->optval; if (ctx->level != SOL_IP || ctx->optname != IP_TOS) - return 1; + goto out; if (optval + 1 > optval_end) return 0; /* EPERM, bounds check */ @@ -49,6 +57,12 @@ int _getsockopt_parent(struct bpf_sockopt *ctx) ctx->optlen = 1; return 1; + +out: + /* optval larger than PAGE_SIZE use kernel's buffer. */ + if (ctx->optlen > page_size) + ctx->optlen = 0; + return 1; } SEC("cgroup/setsockopt") @@ -58,7 +72,7 @@ int _setsockopt(struct bpf_sockopt *ctx) __u8 *optval = ctx->optval; if (ctx->level != SOL_IP || ctx->optname != IP_TOS) - return 1; + goto out; if (optval + 1 > optval_end) return 0; /* EPERM, bounds check */ @@ -67,4 +81,10 @@ int _setsockopt(struct bpf_sockopt *ctx) ctx->optlen = 1; return 1; + +out: + /* optval larger than PAGE_SIZE use kernel's buffer. */ + if (ctx->optlen > page_size) + ctx->optlen = 0; + return 1; } diff --git a/tools/testing/selftests/bpf/progs/sockopt_qos_to_cc.c b/tools/testing/selftests/bpf/progs/sockopt_qos_to_cc.c index 1bce83b6e3a7..dbe235ede7f3 100644 --- a/tools/testing/selftests/bpf/progs/sockopt_qos_to_cc.c +++ b/tools/testing/selftests/bpf/progs/sockopt_qos_to_cc.c @@ -9,6 +9,8 @@ char _license[] SEC("license") = "GPL"; +__u32 page_size = 0; + SEC("cgroup/setsockopt") int sockopt_qos_to_cc(struct bpf_sockopt *ctx) { @@ -19,7 +21,7 @@ int sockopt_qos_to_cc(struct bpf_sockopt *ctx) char cc_cubic[TCP_CA_NAME_MAX] = "cubic"; if (ctx->level != SOL_IPV6 || ctx->optname != IPV6_TCLASS) - return 1; + goto out; if (optval + 1 > optval_end) return 0; /* EPERM, bounds check */ @@ -36,4 +38,10 @@ int sockopt_qos_to_cc(struct bpf_sockopt *ctx) return 0; } return 1; + +out: + /* optval larger than PAGE_SIZE use kernel's buffer. */ + if (ctx->optlen > page_size) + ctx->optlen = 0; + return 1; } diff --git a/tools/testing/selftests/bpf/progs/sockopt_sk.c b/tools/testing/selftests/bpf/progs/sockopt_sk.c index fe1df4cd206e..cb990a7d3d45 100644 --- a/tools/testing/selftests/bpf/progs/sockopt_sk.c +++ b/tools/testing/selftests/bpf/progs/sockopt_sk.c @@ -37,7 +37,7 @@ int _getsockopt(struct bpf_sockopt *ctx) /* Bypass AF_NETLINK. */ sk = ctx->sk; if (sk && sk->family == AF_NETLINK) - return 1; + goto out; /* Make sure bpf_get_netns_cookie is callable. */ @@ -52,8 +52,7 @@ int _getsockopt(struct bpf_sockopt *ctx) * let next BPF program in the cgroup chain or kernel * handle it. */ - ctx->optlen = 0; /* bypass optval>PAGE_SIZE */ - return 1; + goto out; } if (ctx->level == SOL_SOCKET && ctx->optname == SO_SNDBUF) { @@ -61,7 +60,7 @@ int _getsockopt(struct bpf_sockopt *ctx) * let next BPF program in the cgroup chain or kernel * handle it. */ - return 1; + goto out; } if (ctx->level == SOL_TCP && ctx->optname == TCP_CONGESTION) { @@ -69,7 +68,7 @@ int _getsockopt(struct bpf_sockopt *ctx) * let next BPF program in the cgroup chain or kernel * handle it. */ - return 1; + goto out; } if (ctx->level == SOL_TCP && ctx->optname == TCP_ZEROCOPY_RECEIVE) { @@ -85,7 +84,7 @@ int _getsockopt(struct bpf_sockopt *ctx) if (((struct tcp_zerocopy_receive *)optval)->address != 0) return 0; /* unexpected data */ - return 1; + goto out; } if (ctx->level == SOL_IP && ctx->optname == IP_FREEBIND) { @@ -129,6 +128,12 @@ int _getsockopt(struct bpf_sockopt *ctx) ctx->optlen = 1; return 1; + +out: + /* optval larger than PAGE_SIZE use kernel's buffer. */ + if (ctx->optlen > page_size) + ctx->optlen = 0; + return 1; } SEC("cgroup/setsockopt") @@ -142,7 +147,7 @@ int _setsockopt(struct bpf_sockopt *ctx) /* Bypass AF_NETLINK. */ sk = ctx->sk; if (sk && sk->family == AF_NETLINK) - return 1; + goto out; /* Make sure bpf_get_netns_cookie is callable. */ @@ -224,4 +229,10 @@ int _setsockopt(struct bpf_sockopt *ctx) */ return 1; + +out: + /* optval larger than PAGE_SIZE use kernel's buffer. */ + if (ctx->optlen > page_size) + ctx->optlen = 0; + return 1; } diff --git a/tools/testing/selftests/bpf/progs/test_global_func1.c b/tools/testing/selftests/bpf/progs/test_global_func1.c index b85fc8c423ba..17a9f59bf5f3 100644 --- a/tools/testing/selftests/bpf/progs/test_global_func1.c +++ b/tools/testing/selftests/bpf/progs/test_global_func1.c @@ -10,6 +10,8 @@ static __attribute__ ((noinline)) int f0(int var, struct __sk_buff *skb) { + asm volatile (""); + return skb->len; } diff --git a/tools/testing/selftests/bpf/progs/test_global_map_resize.c b/tools/testing/selftests/bpf/progs/test_global_map_resize.c new file mode 100644 index 000000000000..2588f2384246 --- /dev/null +++ b/tools/testing/selftests/bpf/progs/test_global_map_resize.c @@ -0,0 +1,58 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (c) 2023 Meta Platforms, Inc. and affiliates. */ + +#include "vmlinux.h" +#include <bpf/bpf_helpers.h> + +char _license[] SEC("license") = "GPL"; + +/* rodata section */ +const volatile pid_t pid; +const volatile size_t bss_array_len; +const volatile size_t data_array_len; + +/* bss section */ +int sum = 0; +int array[1]; + +/* custom data secton */ +int my_array[1] SEC(".data.custom"); + +/* custom data section which should NOT be resizable, + * since it contains a single var which is not an array + */ +int my_int SEC(".data.non_array"); + +/* custom data section which should NOT be resizable, + * since its last var is not an array + */ +int my_array_first[1] SEC(".data.array_not_last"); +int my_int_last SEC(".data.array_not_last"); + +SEC("tp/syscalls/sys_enter_getpid") +int bss_array_sum(void *ctx) +{ + if (pid != (bpf_get_current_pid_tgid() >> 32)) + return 0; + + sum = 0; + + for (size_t i = 0; i < bss_array_len; ++i) + sum += array[i]; + + return 0; +} + +SEC("tp/syscalls/sys_enter_getuid") +int data_array_sum(void *ctx) +{ + if (pid != (bpf_get_current_pid_tgid() >> 32)) + return 0; + + sum = 0; + + for (size_t i = 0; i < data_array_len; ++i) + sum += my_array[i]; + + return 0; +} diff --git a/tools/testing/selftests/bpf/progs/test_sock_fields.c b/tools/testing/selftests/bpf/progs/test_sock_fields.c index bbad3c2d9aa5..f75e531bf36f 100644 --- a/tools/testing/selftests/bpf/progs/test_sock_fields.c +++ b/tools/testing/selftests/bpf/progs/test_sock_fields.c @@ -265,7 +265,10 @@ static __noinline bool sk_dst_port__load_word(struct bpf_sock *sk) static __noinline bool sk_dst_port__load_half(struct bpf_sock *sk) { - __u16 *half = (__u16 *)&sk->dst_port; + __u16 *half; + + asm volatile (""); + half = (__u16 *)&sk->dst_port; return half[0] == bpf_htons(0xcafe); } diff --git a/tools/testing/selftests/bpf/progs/test_task_under_cgroup.c b/tools/testing/selftests/bpf/progs/test_task_under_cgroup.c new file mode 100644 index 000000000000..56cdc0a553f0 --- /dev/null +++ b/tools/testing/selftests/bpf/progs/test_task_under_cgroup.c @@ -0,0 +1,51 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (c) 2023 Bytedance */ + +#include <vmlinux.h> +#include <bpf/bpf_tracing.h> +#include <bpf/bpf_helpers.h> + +#include "bpf_misc.h" + +struct cgroup *bpf_cgroup_from_id(u64 cgid) __ksym; +long bpf_task_under_cgroup(struct task_struct *task, struct cgroup *ancestor) __ksym; +void bpf_cgroup_release(struct cgroup *p) __ksym; +struct task_struct *bpf_task_acquire(struct task_struct *p) __ksym; +void bpf_task_release(struct task_struct *p) __ksym; + +const volatile int local_pid; +const volatile __u64 cgid; +int remote_pid; + +SEC("tp_btf/task_newtask") +int BPF_PROG(handle__task_newtask, struct task_struct *task, u64 clone_flags) +{ + struct cgroup *cgrp = NULL; + struct task_struct *acquired; + + if (local_pid != (bpf_get_current_pid_tgid() >> 32)) + return 0; + + acquired = bpf_task_acquire(task); + if (!acquired) + return 0; + + if (local_pid == acquired->tgid) + goto out; + + cgrp = bpf_cgroup_from_id(cgid); + if (!cgrp) + goto out; + + if (bpf_task_under_cgroup(acquired, cgrp)) + remote_pid = acquired->tgid; + +out: + if (cgrp) + bpf_cgroup_release(cgrp); + bpf_task_release(acquired); + + return 0; +} + +char _license[] SEC("license") = "GPL"; diff --git a/tools/testing/selftests/bpf/progs/test_xdp_dynptr.c b/tools/testing/selftests/bpf/progs/test_xdp_dynptr.c index 25ee4a22e48d..78c368e71797 100644 --- a/tools/testing/selftests/bpf/progs/test_xdp_dynptr.c +++ b/tools/testing/selftests/bpf/progs/test_xdp_dynptr.c @@ -2,6 +2,7 @@ /* Copyright (c) 2022 Meta */ #include <stddef.h> #include <string.h> +#include <stdbool.h> #include <linux/bpf.h> #include <linux/if_ether.h> #include <linux/if_packet.h> diff --git a/tools/testing/selftests/bpf/progs/verifier_scalar_ids.c b/tools/testing/selftests/bpf/progs/verifier_scalar_ids.c new file mode 100644 index 000000000000..13b29a7faa71 --- /dev/null +++ b/tools/testing/selftests/bpf/progs/verifier_scalar_ids.c @@ -0,0 +1,659 @@ +// SPDX-License-Identifier: GPL-2.0 + +#include <linux/bpf.h> +#include <bpf/bpf_helpers.h> +#include "bpf_misc.h" + +/* Check that precision marks propagate through scalar IDs. + * Registers r{0,1,2} have the same scalar ID at the moment when r0 is + * marked to be precise, this mark is immediately propagated to r{1,2}. + */ +SEC("socket") +__success __log_level(2) +__msg("frame0: regs=r0,r1,r2 stack= before 4: (bf) r3 = r10") +__msg("frame0: regs=r0,r1,r2 stack= before 3: (bf) r2 = r0") +__msg("frame0: regs=r0,r1 stack= before 2: (bf) r1 = r0") +__msg("frame0: regs=r0 stack= before 1: (57) r0 &= 255") +__msg("frame0: regs=r0 stack= before 0: (85) call bpf_ktime_get_ns") +__flag(BPF_F_TEST_STATE_FREQ) +__naked void precision_same_state(void) +{ + asm volatile ( + /* r0 = random number up to 0xff */ + "call %[bpf_ktime_get_ns];" + "r0 &= 0xff;" + /* tie r0.id == r1.id == r2.id */ + "r1 = r0;" + "r2 = r0;" + /* force r0 to be precise, this immediately marks r1 and r2 as + * precise as well because of shared IDs + */ + "r3 = r10;" + "r3 += r0;" + "r0 = 0;" + "exit;" + : + : __imm(bpf_ktime_get_ns) + : __clobber_all); +} + +/* Same as precision_same_state, but mark propagates through state / + * parent state boundary. + */ +SEC("socket") +__success __log_level(2) +__msg("frame0: last_idx 6 first_idx 5 subseq_idx -1") +__msg("frame0: regs=r0,r1,r2 stack= before 5: (bf) r3 = r10") +__msg("frame0: parent state regs=r0,r1,r2 stack=:") +__msg("frame0: regs=r0,r1,r2 stack= before 4: (05) goto pc+0") +__msg("frame0: regs=r0,r1,r2 stack= before 3: (bf) r2 = r0") +__msg("frame0: regs=r0,r1 stack= before 2: (bf) r1 = r0") +__msg("frame0: regs=r0 stack= before 1: (57) r0 &= 255") +__msg("frame0: parent state regs=r0 stack=:") +__msg("frame0: regs=r0 stack= before 0: (85) call bpf_ktime_get_ns") +__flag(BPF_F_TEST_STATE_FREQ) +__naked void precision_cross_state(void) +{ + asm volatile ( + /* r0 = random number up to 0xff */ + "call %[bpf_ktime_get_ns];" + "r0 &= 0xff;" + /* tie r0.id == r1.id == r2.id */ + "r1 = r0;" + "r2 = r0;" + /* force checkpoint */ + "goto +0;" + /* force r0 to be precise, this immediately marks r1 and r2 as + * precise as well because of shared IDs + */ + "r3 = r10;" + "r3 += r0;" + "r0 = 0;" + "exit;" + : + : __imm(bpf_ktime_get_ns) + : __clobber_all); +} + +/* Same as precision_same_state, but break one of the + * links, note that r1 is absent from regs=... in __msg below. + */ +SEC("socket") +__success __log_level(2) +__msg("frame0: regs=r0,r2 stack= before 5: (bf) r3 = r10") +__msg("frame0: regs=r0,r2 stack= before 4: (b7) r1 = 0") +__msg("frame0: regs=r0,r2 stack= before 3: (bf) r2 = r0") +__msg("frame0: regs=r0 stack= before 2: (bf) r1 = r0") +__msg("frame0: regs=r0 stack= before 1: (57) r0 &= 255") +__msg("frame0: regs=r0 stack= before 0: (85) call bpf_ktime_get_ns") +__flag(BPF_F_TEST_STATE_FREQ) +__naked void precision_same_state_broken_link(void) +{ + asm volatile ( + /* r0 = random number up to 0xff */ + "call %[bpf_ktime_get_ns];" + "r0 &= 0xff;" + /* tie r0.id == r1.id == r2.id */ + "r1 = r0;" + "r2 = r0;" + /* break link for r1, this is the only line that differs + * compared to the previous test + */ + "r1 = 0;" + /* force r0 to be precise, this immediately marks r1 and r2 as + * precise as well because of shared IDs + */ + "r3 = r10;" + "r3 += r0;" + "r0 = 0;" + "exit;" + : + : __imm(bpf_ktime_get_ns) + : __clobber_all); +} + +/* Same as precision_same_state_broken_link, but with state / + * parent state boundary. + */ +SEC("socket") +__success __log_level(2) +__msg("frame0: regs=r0,r2 stack= before 6: (bf) r3 = r10") +__msg("frame0: regs=r0,r2 stack= before 5: (b7) r1 = 0") +__msg("frame0: parent state regs=r0,r2 stack=:") +__msg("frame0: regs=r0,r1,r2 stack= before 4: (05) goto pc+0") +__msg("frame0: regs=r0,r1,r2 stack= before 3: (bf) r2 = r0") +__msg("frame0: regs=r0,r1 stack= before 2: (bf) r1 = r0") +__msg("frame0: regs=r0 stack= before 1: (57) r0 &= 255") +__msg("frame0: parent state regs=r0 stack=:") +__msg("frame0: regs=r0 stack= before 0: (85) call bpf_ktime_get_ns") +__flag(BPF_F_TEST_STATE_FREQ) +__naked void precision_cross_state_broken_link(void) +{ + asm volatile ( + /* r0 = random number up to 0xff */ + "call %[bpf_ktime_get_ns];" + "r0 &= 0xff;" + /* tie r0.id == r1.id == r2.id */ + "r1 = r0;" + "r2 = r0;" + /* force checkpoint, although link between r1 and r{0,2} is + * broken by the next statement current precision tracking + * algorithm can't react to it and propagates mark for r1 to + * the parent state. + */ + "goto +0;" + /* break link for r1, this is the only line that differs + * compared to precision_cross_state() + */ + "r1 = 0;" + /* force r0 to be precise, this immediately marks r1 and r2 as + * precise as well because of shared IDs + */ + "r3 = r10;" + "r3 += r0;" + "r0 = 0;" + "exit;" + : + : __imm(bpf_ktime_get_ns) + : __clobber_all); +} + +/* Check that precision marks propagate through scalar IDs. + * Use the same scalar ID in multiple stack frames, check that + * precision information is propagated up the call stack. + */ +SEC("socket") +__success __log_level(2) +__msg("11: (0f) r2 += r1") +/* Current state */ +__msg("frame2: last_idx 11 first_idx 10 subseq_idx -1") +__msg("frame2: regs=r1 stack= before 10: (bf) r2 = r10") +__msg("frame2: parent state regs=r1 stack=") +/* frame1.r{6,7} are marked because mark_precise_scalar_ids() + * looks for all registers with frame2.r1.id in the current state + */ +__msg("frame1: parent state regs=r6,r7 stack=") +__msg("frame0: parent state regs=r6 stack=") +/* Parent state */ +__msg("frame2: last_idx 8 first_idx 8 subseq_idx 10") +__msg("frame2: regs=r1 stack= before 8: (85) call pc+1") +/* frame1.r1 is marked because of backtracking of call instruction */ +__msg("frame1: parent state regs=r1,r6,r7 stack=") +__msg("frame0: parent state regs=r6 stack=") +/* Parent state */ +__msg("frame1: last_idx 7 first_idx 6 subseq_idx 8") +__msg("frame1: regs=r1,r6,r7 stack= before 7: (bf) r7 = r1") +__msg("frame1: regs=r1,r6 stack= before 6: (bf) r6 = r1") +__msg("frame1: parent state regs=r1 stack=") +__msg("frame0: parent state regs=r6 stack=") +/* Parent state */ +__msg("frame1: last_idx 4 first_idx 4 subseq_idx 6") +__msg("frame1: regs=r1 stack= before 4: (85) call pc+1") +__msg("frame0: parent state regs=r1,r6 stack=") +/* Parent state */ +__msg("frame0: last_idx 3 first_idx 1 subseq_idx 4") +__msg("frame0: regs=r0,r1,r6 stack= before 3: (bf) r6 = r0") +__msg("frame0: regs=r0,r1 stack= before 2: (bf) r1 = r0") +__msg("frame0: regs=r0 stack= before 1: (57) r0 &= 255") +__flag(BPF_F_TEST_STATE_FREQ) +__naked void precision_many_frames(void) +{ + asm volatile ( + /* r0 = random number up to 0xff */ + "call %[bpf_ktime_get_ns];" + "r0 &= 0xff;" + /* tie r0.id == r1.id == r6.id */ + "r1 = r0;" + "r6 = r0;" + "call precision_many_frames__foo;" + "exit;" + : + : __imm(bpf_ktime_get_ns) + : __clobber_all); +} + +static __naked __noinline __used +void precision_many_frames__foo(void) +{ + asm volatile ( + /* conflate one of the register numbers (r6) with outer frame, + * to verify that those are tracked independently + */ + "r6 = r1;" + "r7 = r1;" + "call precision_many_frames__bar;" + "exit" + ::: __clobber_all); +} + +static __naked __noinline __used +void precision_many_frames__bar(void) +{ + asm volatile ( + /* force r1 to be precise, this immediately marks: + * - bar frame r1 + * - foo frame r{1,6,7} + * - main frame r{1,6} + */ + "r2 = r10;" + "r2 += r1;" + "r0 = 0;" + "exit;" + ::: __clobber_all); +} + +/* Check that scalars with the same IDs are marked precise on stack as + * well as in registers. + */ +SEC("socket") +__success __log_level(2) +/* foo frame */ +__msg("frame1: regs=r1 stack=-8,-16 before 9: (bf) r2 = r10") +__msg("frame1: regs=r1 stack=-8,-16 before 8: (7b) *(u64 *)(r10 -16) = r1") +__msg("frame1: regs=r1 stack=-8 before 7: (7b) *(u64 *)(r10 -8) = r1") +__msg("frame1: regs=r1 stack= before 4: (85) call pc+2") +/* main frame */ +__msg("frame0: regs=r0,r1 stack=-8 before 3: (7b) *(u64 *)(r10 -8) = r1") +__msg("frame0: regs=r0,r1 stack= before 2: (bf) r1 = r0") +__msg("frame0: regs=r0 stack= before 1: (57) r0 &= 255") +__flag(BPF_F_TEST_STATE_FREQ) +__naked void precision_stack(void) +{ + asm volatile ( + /* r0 = random number up to 0xff */ + "call %[bpf_ktime_get_ns];" + "r0 &= 0xff;" + /* tie r0.id == r1.id == fp[-8].id */ + "r1 = r0;" + "*(u64*)(r10 - 8) = r1;" + "call precision_stack__foo;" + "r0 = 0;" + "exit;" + : + : __imm(bpf_ktime_get_ns) + : __clobber_all); +} + +static __naked __noinline __used +void precision_stack__foo(void) +{ + asm volatile ( + /* conflate one of the register numbers (r6) with outer frame, + * to verify that those are tracked independently + */ + "*(u64*)(r10 - 8) = r1;" + "*(u64*)(r10 - 16) = r1;" + /* force r1 to be precise, this immediately marks: + * - foo frame r1,fp{-8,-16} + * - main frame r1,fp{-8} + */ + "r2 = r10;" + "r2 += r1;" + "exit" + ::: __clobber_all); +} + +/* Use two separate scalar IDs to check that these are propagated + * independently. + */ +SEC("socket") +__success __log_level(2) +/* r{6,7} */ +__msg("11: (0f) r3 += r7") +__msg("frame0: regs=r6,r7 stack= before 10: (bf) r3 = r10") +/* ... skip some insns ... */ +__msg("frame0: regs=r6,r7 stack= before 3: (bf) r7 = r0") +__msg("frame0: regs=r0,r6 stack= before 2: (bf) r6 = r0") +/* r{8,9} */ +__msg("12: (0f) r3 += r9") +__msg("frame0: regs=r8,r9 stack= before 11: (0f) r3 += r7") +/* ... skip some insns ... */ +__msg("frame0: regs=r8,r9 stack= before 7: (bf) r9 = r0") +__msg("frame0: regs=r0,r8 stack= before 6: (bf) r8 = r0") +__flag(BPF_F_TEST_STATE_FREQ) +__naked void precision_two_ids(void) +{ + asm volatile ( + /* r6 = random number up to 0xff + * r6.id == r7.id + */ + "call %[bpf_ktime_get_ns];" + "r0 &= 0xff;" + "r6 = r0;" + "r7 = r0;" + /* same, but for r{8,9} */ + "call %[bpf_ktime_get_ns];" + "r0 &= 0xff;" + "r8 = r0;" + "r9 = r0;" + /* clear r0 id */ + "r0 = 0;" + /* force checkpoint */ + "goto +0;" + "r3 = r10;" + /* force r7 to be precise, this also marks r6 */ + "r3 += r7;" + /* force r9 to be precise, this also marks r8 */ + "r3 += r9;" + "exit;" + : + : __imm(bpf_ktime_get_ns) + : __clobber_all); +} + +/* Verify that check_ids() is used by regsafe() for scalars. + * + * r9 = ... some pointer with range X ... + * r6 = ... unbound scalar ID=a ... + * r7 = ... unbound scalar ID=b ... + * if (r6 > r7) goto +1 + * r7 = r6 + * if (r7 > X) goto exit + * r9 += r6 + * ... access memory using r9 ... + * + * The memory access is safe only if r7 is bounded, + * which is true for one branch and not true for another. + */ +SEC("socket") +__failure __msg("register with unbounded min value") +__flag(BPF_F_TEST_STATE_FREQ) +__naked void check_ids_in_regsafe(void) +{ + asm volatile ( + /* Bump allocated stack */ + "r1 = 0;" + "*(u64*)(r10 - 8) = r1;" + /* r9 = pointer to stack */ + "r9 = r10;" + "r9 += -8;" + /* r7 = ktime_get_ns() */ + "call %[bpf_ktime_get_ns];" + "r7 = r0;" + /* r6 = ktime_get_ns() */ + "call %[bpf_ktime_get_ns];" + "r6 = r0;" + /* if r6 > r7 is an unpredictable jump */ + "if r6 > r7 goto l1_%=;" + "r7 = r6;" +"l1_%=:" + /* if r7 > 4 ...; transfers range to r6 on one execution path + * but does not transfer on another + */ + "if r7 > 4 goto l2_%=;" + /* Access memory at r9[r6], r6 is not always bounded */ + "r9 += r6;" + "r0 = *(u8*)(r9 + 0);" +"l2_%=:" + "r0 = 0;" + "exit;" + : + : __imm(bpf_ktime_get_ns) + : __clobber_all); +} + +/* Similar to check_ids_in_regsafe. + * The l0 could be reached in two states: + * + * (1) r6{.id=A}, r7{.id=A}, r8{.id=B} + * (2) r6{.id=B}, r7{.id=A}, r8{.id=B} + * + * Where (2) is not safe, as "r7 > 4" check won't propagate range for it. + * This example would be considered safe without changes to + * mark_chain_precision() to track scalar values with equal IDs. + */ +SEC("socket") +__failure __msg("register with unbounded min value") +__flag(BPF_F_TEST_STATE_FREQ) +__naked void check_ids_in_regsafe_2(void) +{ + asm volatile ( + /* Bump allocated stack */ + "r1 = 0;" + "*(u64*)(r10 - 8) = r1;" + /* r9 = pointer to stack */ + "r9 = r10;" + "r9 += -8;" + /* r8 = ktime_get_ns() */ + "call %[bpf_ktime_get_ns];" + "r8 = r0;" + /* r7 = ktime_get_ns() */ + "call %[bpf_ktime_get_ns];" + "r7 = r0;" + /* r6 = ktime_get_ns() */ + "call %[bpf_ktime_get_ns];" + "r6 = r0;" + /* scratch .id from r0 */ + "r0 = 0;" + /* if r6 > r7 is an unpredictable jump */ + "if r6 > r7 goto l1_%=;" + /* tie r6 and r7 .id */ + "r6 = r7;" +"l0_%=:" + /* if r7 > 4 exit(0) */ + "if r7 > 4 goto l2_%=;" + /* Access memory at r9[r6] */ + "r9 += r6;" + "r0 = *(u8*)(r9 + 0);" +"l2_%=:" + "r0 = 0;" + "exit;" +"l1_%=:" + /* tie r6 and r8 .id */ + "r6 = r8;" + "goto l0_%=;" + : + : __imm(bpf_ktime_get_ns) + : __clobber_all); +} + +/* Check that scalar IDs *are not* generated on register to register + * assignments if source register is a constant. + * + * If such IDs *are* generated the 'l1' below would be reached in + * two states: + * + * (1) r1{.id=A}, r2{.id=A} + * (2) r1{.id=C}, r2{.id=C} + * + * Thus forcing 'if r1 == r2' verification twice. + */ +SEC("socket") +__success __log_level(2) +__msg("11: (1d) if r3 == r4 goto pc+0") +__msg("frame 0: propagating r3,r4") +__msg("11: safe") +__msg("processed 15 insns") +__flag(BPF_F_TEST_STATE_FREQ) +__naked void no_scalar_id_for_const(void) +{ + asm volatile ( + "call %[bpf_ktime_get_ns];" + /* unpredictable jump */ + "if r0 > 7 goto l0_%=;" + /* possibly generate same scalar ids for r3 and r4 */ + "r1 = 0;" + "r1 = r1;" + "r3 = r1;" + "r4 = r1;" + "goto l1_%=;" +"l0_%=:" + /* possibly generate different scalar ids for r3 and r4 */ + "r1 = 0;" + "r2 = 0;" + "r3 = r1;" + "r4 = r2;" +"l1_%=:" + /* predictable jump, marks r3 and r4 precise */ + "if r3 == r4 goto +0;" + "r0 = 0;" + "exit;" + : + : __imm(bpf_ktime_get_ns) + : __clobber_all); +} + +/* Same as no_scalar_id_for_const() but for 32-bit values */ +SEC("socket") +__success __log_level(2) +__msg("11: (1e) if w3 == w4 goto pc+0") +__msg("frame 0: propagating r3,r4") +__msg("11: safe") +__msg("processed 15 insns") +__flag(BPF_F_TEST_STATE_FREQ) +__naked void no_scalar_id_for_const32(void) +{ + asm volatile ( + "call %[bpf_ktime_get_ns];" + /* unpredictable jump */ + "if r0 > 7 goto l0_%=;" + /* possibly generate same scalar ids for r3 and r4 */ + "w1 = 0;" + "w1 = w1;" + "w3 = w1;" + "w4 = w1;" + "goto l1_%=;" +"l0_%=:" + /* possibly generate different scalar ids for r3 and r4 */ + "w1 = 0;" + "w2 = 0;" + "w3 = w1;" + "w4 = w2;" +"l1_%=:" + /* predictable jump, marks r1 and r2 precise */ + "if w3 == w4 goto +0;" + "r0 = 0;" + "exit;" + : + : __imm(bpf_ktime_get_ns) + : __clobber_all); +} + +/* Check that unique scalar IDs are ignored when new verifier state is + * compared to cached verifier state. For this test: + * - cached state has no id on r1 + * - new state has a unique id on r1 + */ +SEC("socket") +__success __log_level(2) +__msg("6: (25) if r6 > 0x7 goto pc+1") +__msg("7: (57) r1 &= 255") +__msg("8: (bf) r2 = r10") +__msg("from 6 to 8: safe") +__msg("processed 12 insns") +__flag(BPF_F_TEST_STATE_FREQ) +__naked void ignore_unique_scalar_ids_cur(void) +{ + asm volatile ( + "call %[bpf_ktime_get_ns];" + "r6 = r0;" + "call %[bpf_ktime_get_ns];" + "r0 &= 0xff;" + /* r1.id == r0.id */ + "r1 = r0;" + /* make r1.id unique */ + "r0 = 0;" + "if r6 > 7 goto l0_%=;" + /* clear r1 id, but keep the range compatible */ + "r1 &= 0xff;" +"l0_%=:" + /* get here in two states: + * - first: r1 has no id (cached state) + * - second: r1 has a unique id (should be considered equivalent) + */ + "r2 = r10;" + "r2 += r1;" + "exit;" + : + : __imm(bpf_ktime_get_ns) + : __clobber_all); +} + +/* Check that unique scalar IDs are ignored when new verifier state is + * compared to cached verifier state. For this test: + * - cached state has a unique id on r1 + * - new state has no id on r1 + */ +SEC("socket") +__success __log_level(2) +__msg("6: (25) if r6 > 0x7 goto pc+1") +__msg("7: (05) goto pc+1") +__msg("9: (bf) r2 = r10") +__msg("9: safe") +__msg("processed 13 insns") +__flag(BPF_F_TEST_STATE_FREQ) +__naked void ignore_unique_scalar_ids_old(void) +{ + asm volatile ( + "call %[bpf_ktime_get_ns];" + "r6 = r0;" + "call %[bpf_ktime_get_ns];" + "r0 &= 0xff;" + /* r1.id == r0.id */ + "r1 = r0;" + /* make r1.id unique */ + "r0 = 0;" + "if r6 > 7 goto l1_%=;" + "goto l0_%=;" +"l1_%=:" + /* clear r1 id, but keep the range compatible */ + "r1 &= 0xff;" +"l0_%=:" + /* get here in two states: + * - first: r1 has a unique id (cached state) + * - second: r1 has no id (should be considered equivalent) + */ + "r2 = r10;" + "r2 += r1;" + "exit;" + : + : __imm(bpf_ktime_get_ns) + : __clobber_all); +} + +/* Check that two different scalar IDs in a verified state can't be + * mapped to the same scalar ID in current state. + */ +SEC("socket") +__success __log_level(2) +/* The exit instruction should be reachable from two states, + * use two matches and "processed .. insns" to ensure this. + */ +__msg("13: (95) exit") +__msg("13: (95) exit") +__msg("processed 18 insns") +__flag(BPF_F_TEST_STATE_FREQ) +__naked void two_old_ids_one_cur_id(void) +{ + asm volatile ( + /* Give unique scalar IDs to r{6,7} */ + "call %[bpf_ktime_get_ns];" + "r0 &= 0xff;" + "r6 = r0;" + "call %[bpf_ktime_get_ns];" + "r0 &= 0xff;" + "r7 = r0;" + "r0 = 0;" + /* Maybe make r{6,7} IDs identical */ + "if r6 > r7 goto l0_%=;" + "goto l1_%=;" +"l0_%=:" + "r6 = r7;" +"l1_%=:" + /* Mark r{6,7} precise. + * Get here in two states: + * - first: r6{.id=A}, r7{.id=B} (cached state) + * - second: r6{.id=A}, r7{.id=A} + * Currently we don't want to consider such states equivalent. + * Thus "exit;" would be verified twice. + */ + "r2 = r10;" + "r2 += r6;" + "r2 += r7;" + "exit;" + : + : __imm(bpf_ktime_get_ns) + : __clobber_all); +} + +char _license[] SEC("license") = "GPL"; diff --git a/tools/testing/selftests/bpf/progs/verifier_subprog_precision.c b/tools/testing/selftests/bpf/progs/verifier_subprog_precision.c new file mode 100644 index 000000000000..db6b3143338b --- /dev/null +++ b/tools/testing/selftests/bpf/progs/verifier_subprog_precision.c @@ -0,0 +1,536 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (c) 2023 Meta Platforms, Inc. and affiliates. */ + +#include <errno.h> +#include <string.h> +#include <linux/bpf.h> +#include <bpf/bpf_helpers.h> +#include "bpf_misc.h" + +#define ARRAY_SIZE(x) (sizeof(x) / sizeof(x[0])) + +int vals[] SEC(".data.vals") = {1, 2, 3, 4}; + +__naked __noinline __used +static unsigned long identity_subprog() +{ + /* the simplest *static* 64-bit identity function */ + asm volatile ( + "r0 = r1;" + "exit;" + ); +} + +__noinline __used +unsigned long global_identity_subprog(__u64 x) +{ + /* the simplest *global* 64-bit identity function */ + return x; +} + +__naked __noinline __used +static unsigned long callback_subprog() +{ + /* the simplest callback function */ + asm volatile ( + "r0 = 0;" + "exit;" + ); +} + +SEC("?raw_tp") +__success __log_level(2) +__msg("7: (0f) r1 += r0") +__msg("mark_precise: frame0: regs=r0 stack= before 6: (bf) r1 = r7") +__msg("mark_precise: frame0: regs=r0 stack= before 5: (27) r0 *= 4") +__msg("mark_precise: frame0: regs=r0 stack= before 11: (95) exit") +__msg("mark_precise: frame1: regs=r0 stack= before 10: (bf) r0 = r1") +__msg("mark_precise: frame1: regs=r1 stack= before 4: (85) call pc+5") +__msg("mark_precise: frame0: regs=r1 stack= before 3: (bf) r1 = r6") +__msg("mark_precise: frame0: regs=r6 stack= before 2: (b7) r6 = 3") +__naked int subprog_result_precise(void) +{ + asm volatile ( + "r6 = 3;" + /* pass r6 through r1 into subprog to get it back as r0; + * this whole chain will have to be marked as precise later + */ + "r1 = r6;" + "call identity_subprog;" + /* now use subprog's returned value (which is a + * r6 -> r1 -> r0 chain), as index into vals array, forcing + * all of that to be known precisely + */ + "r0 *= 4;" + "r1 = %[vals];" + /* here r0->r1->r6 chain is forced to be precise and has to be + * propagated back to the beginning, including through the + * subprog call + */ + "r1 += r0;" + "r0 = *(u32 *)(r1 + 0);" + "exit;" + : + : __imm_ptr(vals) + : __clobber_common, "r6" + ); +} + +SEC("?raw_tp") +__success __log_level(2) +__msg("9: (0f) r1 += r0") +__msg("mark_precise: frame0: last_idx 9 first_idx 0") +__msg("mark_precise: frame0: regs=r0 stack= before 8: (bf) r1 = r7") +__msg("mark_precise: frame0: regs=r0 stack= before 7: (27) r0 *= 4") +__msg("mark_precise: frame0: regs=r0 stack= before 5: (a5) if r0 < 0x4 goto pc+1") +__msg("mark_precise: frame0: regs=r0 stack= before 4: (85) call pc+7") +__naked int global_subprog_result_precise(void) +{ + asm volatile ( + "r6 = 3;" + /* pass r6 through r1 into subprog to get it back as r0; + * given global_identity_subprog is global, precision won't + * propagate all the way back to r6 + */ + "r1 = r6;" + "call global_identity_subprog;" + /* now use subprog's returned value (which is unknown now, so + * we need to clamp it), as index into vals array, forcing r0 + * to be marked precise (with no effect on r6, though) + */ + "if r0 < %[vals_arr_sz] goto 1f;" + "r0 = %[vals_arr_sz] - 1;" + "1:" + "r0 *= 4;" + "r1 = %[vals];" + /* here r0 is forced to be precise and has to be + * propagated back to the global subprog call, but it + * shouldn't go all the way to mark r6 as precise + */ + "r1 += r0;" + "r0 = *(u32 *)(r1 + 0);" + "exit;" + : + : __imm_ptr(vals), + __imm_const(vals_arr_sz, ARRAY_SIZE(vals)) + : __clobber_common, "r6" + ); +} + +SEC("?raw_tp") +__success __log_level(2) +__msg("14: (0f) r1 += r6") +__msg("mark_precise: frame0: last_idx 14 first_idx 10") +__msg("mark_precise: frame0: regs=r6 stack= before 13: (bf) r1 = r7") +__msg("mark_precise: frame0: regs=r6 stack= before 12: (27) r6 *= 4") +__msg("mark_precise: frame0: regs=r6 stack= before 11: (25) if r6 > 0x3 goto pc+4") +__msg("mark_precise: frame0: regs=r6 stack= before 10: (bf) r6 = r0") +__msg("mark_precise: frame0: parent state regs=r0 stack=:") +__msg("mark_precise: frame0: last_idx 18 first_idx 0") +__msg("mark_precise: frame0: regs=r0 stack= before 18: (95) exit") +__naked int callback_result_precise(void) +{ + asm volatile ( + "r6 = 3;" + + /* call subprog and use result; r0 shouldn't propagate back to + * callback_subprog + */ + "r1 = r6;" /* nr_loops */ + "r2 = %[callback_subprog];" /* callback_fn */ + "r3 = 0;" /* callback_ctx */ + "r4 = 0;" /* flags */ + "call %[bpf_loop];" + + "r6 = r0;" + "if r6 > 3 goto 1f;" + "r6 *= 4;" + "r1 = %[vals];" + /* here r6 is forced to be precise and has to be propagated + * back to the bpf_loop() call, but not beyond + */ + "r1 += r6;" + "r0 = *(u32 *)(r1 + 0);" + "1:" + "exit;" + : + : __imm_ptr(vals), + __imm_ptr(callback_subprog), + __imm(bpf_loop) + : __clobber_common, "r6" + ); +} + +SEC("?raw_tp") +__success __log_level(2) +__msg("7: (0f) r1 += r6") +__msg("mark_precise: frame0: last_idx 7 first_idx 0") +__msg("mark_precise: frame0: regs=r6 stack= before 6: (bf) r1 = r7") +__msg("mark_precise: frame0: regs=r6 stack= before 5: (27) r6 *= 4") +__msg("mark_precise: frame0: regs=r6 stack= before 11: (95) exit") +__msg("mark_precise: frame1: regs= stack= before 10: (bf) r0 = r1") +__msg("mark_precise: frame1: regs= stack= before 4: (85) call pc+5") +__msg("mark_precise: frame0: regs=r6 stack= before 3: (b7) r1 = 0") +__msg("mark_precise: frame0: regs=r6 stack= before 2: (b7) r6 = 3") +__naked int parent_callee_saved_reg_precise(void) +{ + asm volatile ( + "r6 = 3;" + + /* call subprog and ignore result; we need this call only to + * complicate jump history + */ + "r1 = 0;" + "call identity_subprog;" + + "r6 *= 4;" + "r1 = %[vals];" + /* here r6 is forced to be precise and has to be propagated + * back to the beginning, handling (and ignoring) subprog call + */ + "r1 += r6;" + "r0 = *(u32 *)(r1 + 0);" + "exit;" + : + : __imm_ptr(vals) + : __clobber_common, "r6" + ); +} + +SEC("?raw_tp") +__success __log_level(2) +__msg("7: (0f) r1 += r6") +__msg("mark_precise: frame0: last_idx 7 first_idx 0") +__msg("mark_precise: frame0: regs=r6 stack= before 6: (bf) r1 = r7") +__msg("mark_precise: frame0: regs=r6 stack= before 5: (27) r6 *= 4") +__msg("mark_precise: frame0: regs=r6 stack= before 4: (85) call pc+5") +__msg("mark_precise: frame0: regs=r6 stack= before 3: (b7) r1 = 0") +__msg("mark_precise: frame0: regs=r6 stack= before 2: (b7) r6 = 3") +__naked int parent_callee_saved_reg_precise_global(void) +{ + asm volatile ( + "r6 = 3;" + + /* call subprog and ignore result; we need this call only to + * complicate jump history + */ + "r1 = 0;" + "call global_identity_subprog;" + + "r6 *= 4;" + "r1 = %[vals];" + /* here r6 is forced to be precise and has to be propagated + * back to the beginning, handling (and ignoring) subprog call + */ + "r1 += r6;" + "r0 = *(u32 *)(r1 + 0);" + "exit;" + : + : __imm_ptr(vals) + : __clobber_common, "r6" + ); +} + +SEC("?raw_tp") +__success __log_level(2) +__msg("12: (0f) r1 += r6") +__msg("mark_precise: frame0: last_idx 12 first_idx 10") +__msg("mark_precise: frame0: regs=r6 stack= before 11: (bf) r1 = r7") +__msg("mark_precise: frame0: regs=r6 stack= before 10: (27) r6 *= 4") +__msg("mark_precise: frame0: parent state regs=r6 stack=:") +__msg("mark_precise: frame0: last_idx 16 first_idx 0") +__msg("mark_precise: frame0: regs=r6 stack= before 16: (95) exit") +__msg("mark_precise: frame1: regs= stack= before 15: (b7) r0 = 0") +__msg("mark_precise: frame1: regs= stack= before 9: (85) call bpf_loop#181") +__msg("mark_precise: frame0: regs=r6 stack= before 8: (b7) r4 = 0") +__msg("mark_precise: frame0: regs=r6 stack= before 7: (b7) r3 = 0") +__msg("mark_precise: frame0: regs=r6 stack= before 6: (bf) r2 = r8") +__msg("mark_precise: frame0: regs=r6 stack= before 5: (b7) r1 = 1") +__msg("mark_precise: frame0: regs=r6 stack= before 4: (b7) r6 = 3") +__naked int parent_callee_saved_reg_precise_with_callback(void) +{ + asm volatile ( + "r6 = 3;" + + /* call subprog and ignore result; we need this call only to + * complicate jump history + */ + "r1 = 1;" /* nr_loops */ + "r2 = %[callback_subprog];" /* callback_fn */ + "r3 = 0;" /* callback_ctx */ + "r4 = 0;" /* flags */ + "call %[bpf_loop];" + + "r6 *= 4;" + "r1 = %[vals];" + /* here r6 is forced to be precise and has to be propagated + * back to the beginning, handling (and ignoring) callback call + */ + "r1 += r6;" + "r0 = *(u32 *)(r1 + 0);" + "exit;" + : + : __imm_ptr(vals), + __imm_ptr(callback_subprog), + __imm(bpf_loop) + : __clobber_common, "r6" + ); +} + +SEC("?raw_tp") +__success __log_level(2) +__msg("9: (0f) r1 += r6") +__msg("mark_precise: frame0: last_idx 9 first_idx 6") +__msg("mark_precise: frame0: regs=r6 stack= before 8: (bf) r1 = r7") +__msg("mark_precise: frame0: regs=r6 stack= before 7: (27) r6 *= 4") +__msg("mark_precise: frame0: regs=r6 stack= before 6: (79) r6 = *(u64 *)(r10 -8)") +__msg("mark_precise: frame0: parent state regs= stack=-8:") +__msg("mark_precise: frame0: last_idx 13 first_idx 0") +__msg("mark_precise: frame0: regs= stack=-8 before 13: (95) exit") +__msg("mark_precise: frame1: regs= stack= before 12: (bf) r0 = r1") +__msg("mark_precise: frame1: regs= stack= before 5: (85) call pc+6") +__msg("mark_precise: frame0: regs= stack=-8 before 4: (b7) r1 = 0") +__msg("mark_precise: frame0: regs= stack=-8 before 3: (7b) *(u64 *)(r10 -8) = r6") +__msg("mark_precise: frame0: regs=r6 stack= before 2: (b7) r6 = 3") +__naked int parent_stack_slot_precise(void) +{ + asm volatile ( + /* spill reg */ + "r6 = 3;" + "*(u64 *)(r10 - 8) = r6;" + + /* call subprog and ignore result; we need this call only to + * complicate jump history + */ + "r1 = 0;" + "call identity_subprog;" + + /* restore reg from stack; in this case we'll be carrying + * stack mask when going back into subprog through jump + * history + */ + "r6 = *(u64 *)(r10 - 8);" + + "r6 *= 4;" + "r1 = %[vals];" + /* here r6 is forced to be precise and has to be propagated + * back to the beginning, handling (and ignoring) subprog call + */ + "r1 += r6;" + "r0 = *(u32 *)(r1 + 0);" + "exit;" + : + : __imm_ptr(vals) + : __clobber_common, "r6" + ); +} + +SEC("?raw_tp") +__success __log_level(2) +__msg("9: (0f) r1 += r6") +__msg("mark_precise: frame0: last_idx 9 first_idx 6") +__msg("mark_precise: frame0: regs=r6 stack= before 8: (bf) r1 = r7") +__msg("mark_precise: frame0: regs=r6 stack= before 7: (27) r6 *= 4") +__msg("mark_precise: frame0: regs=r6 stack= before 6: (79) r6 = *(u64 *)(r10 -8)") +__msg("mark_precise: frame0: parent state regs= stack=-8:") +__msg("mark_precise: frame0: last_idx 5 first_idx 0") +__msg("mark_precise: frame0: regs= stack=-8 before 5: (85) call pc+6") +__msg("mark_precise: frame0: regs= stack=-8 before 4: (b7) r1 = 0") +__msg("mark_precise: frame0: regs= stack=-8 before 3: (7b) *(u64 *)(r10 -8) = r6") +__msg("mark_precise: frame0: regs=r6 stack= before 2: (b7) r6 = 3") +__naked int parent_stack_slot_precise_global(void) +{ + asm volatile ( + /* spill reg */ + "r6 = 3;" + "*(u64 *)(r10 - 8) = r6;" + + /* call subprog and ignore result; we need this call only to + * complicate jump history + */ + "r1 = 0;" + "call global_identity_subprog;" + + /* restore reg from stack; in this case we'll be carrying + * stack mask when going back into subprog through jump + * history + */ + "r6 = *(u64 *)(r10 - 8);" + + "r6 *= 4;" + "r1 = %[vals];" + /* here r6 is forced to be precise and has to be propagated + * back to the beginning, handling (and ignoring) subprog call + */ + "r1 += r6;" + "r0 = *(u32 *)(r1 + 0);" + "exit;" + : + : __imm_ptr(vals) + : __clobber_common, "r6" + ); +} + +SEC("?raw_tp") +__success __log_level(2) +__msg("14: (0f) r1 += r6") +__msg("mark_precise: frame0: last_idx 14 first_idx 11") +__msg("mark_precise: frame0: regs=r6 stack= before 13: (bf) r1 = r7") +__msg("mark_precise: frame0: regs=r6 stack= before 12: (27) r6 *= 4") +__msg("mark_precise: frame0: regs=r6 stack= before 11: (79) r6 = *(u64 *)(r10 -8)") +__msg("mark_precise: frame0: parent state regs= stack=-8:") +__msg("mark_precise: frame0: last_idx 18 first_idx 0") +__msg("mark_precise: frame0: regs= stack=-8 before 18: (95) exit") +__msg("mark_precise: frame1: regs= stack= before 17: (b7) r0 = 0") +__msg("mark_precise: frame1: regs= stack= before 10: (85) call bpf_loop#181") +__msg("mark_precise: frame0: regs= stack=-8 before 9: (b7) r4 = 0") +__msg("mark_precise: frame0: regs= stack=-8 before 8: (b7) r3 = 0") +__msg("mark_precise: frame0: regs= stack=-8 before 7: (bf) r2 = r8") +__msg("mark_precise: frame0: regs= stack=-8 before 6: (bf) r1 = r6") +__msg("mark_precise: frame0: regs= stack=-8 before 5: (7b) *(u64 *)(r10 -8) = r6") +__msg("mark_precise: frame0: regs=r6 stack= before 4: (b7) r6 = 3") +__naked int parent_stack_slot_precise_with_callback(void) +{ + asm volatile ( + /* spill reg */ + "r6 = 3;" + "*(u64 *)(r10 - 8) = r6;" + + /* ensure we have callback frame in jump history */ + "r1 = r6;" /* nr_loops */ + "r2 = %[callback_subprog];" /* callback_fn */ + "r3 = 0;" /* callback_ctx */ + "r4 = 0;" /* flags */ + "call %[bpf_loop];" + + /* restore reg from stack; in this case we'll be carrying + * stack mask when going back into subprog through jump + * history + */ + "r6 = *(u64 *)(r10 - 8);" + + "r6 *= 4;" + "r1 = %[vals];" + /* here r6 is forced to be precise and has to be propagated + * back to the beginning, handling (and ignoring) subprog call + */ + "r1 += r6;" + "r0 = *(u32 *)(r1 + 0);" + "exit;" + : + : __imm_ptr(vals), + __imm_ptr(callback_subprog), + __imm(bpf_loop) + : __clobber_common, "r6" + ); +} + +__noinline __used +static __u64 subprog_with_precise_arg(__u64 x) +{ + return vals[x]; /* x is forced to be precise */ +} + +SEC("?raw_tp") +__success __log_level(2) +__msg("8: (0f) r2 += r1") +__msg("mark_precise: frame1: last_idx 8 first_idx 0") +__msg("mark_precise: frame1: regs=r1 stack= before 6: (18) r2 = ") +__msg("mark_precise: frame1: regs=r1 stack= before 5: (67) r1 <<= 2") +__msg("mark_precise: frame1: regs=r1 stack= before 2: (85) call pc+2") +__msg("mark_precise: frame0: regs=r1 stack= before 1: (bf) r1 = r6") +__msg("mark_precise: frame0: regs=r6 stack= before 0: (b7) r6 = 3") +__naked int subprog_arg_precise(void) +{ + asm volatile ( + "r6 = 3;" + "r1 = r6;" + /* subprog_with_precise_arg expects its argument to be + * precise, so r1->r6 will be marked precise from inside the + * subprog + */ + "call subprog_with_precise_arg;" + "r0 += r6;" + "exit;" + : + : + : __clobber_common, "r6" + ); +} + +/* r1 is pointer to stack slot; + * r2 is a register to spill into that slot + * subprog also spills r2 into its own stack slot + */ +__naked __noinline __used +static __u64 subprog_spill_reg_precise(void) +{ + asm volatile ( + /* spill to parent stack */ + "*(u64 *)(r1 + 0) = r2;" + /* spill to subprog stack (we use -16 offset to avoid + * accidental confusion with parent's -8 stack slot in + * verifier log output) + */ + "*(u64 *)(r10 - 16) = r2;" + /* use both spills as return result to propagete precision everywhere */ + "r0 = *(u64 *)(r10 - 16);" + "r2 = *(u64 *)(r1 + 0);" + "r0 += r2;" + "exit;" + ); +} + +SEC("?raw_tp") +__success __log_level(2) +/* precision backtracking can't currently handle stack access not through r10, + * so we won't be able to mark stack slot fp-8 as precise, and so will + * fallback to forcing all as precise + */ +__msg("mark_precise: frame0: falling back to forcing all scalars precise") +__naked int subprog_spill_into_parent_stack_slot_precise(void) +{ + asm volatile ( + "r6 = 1;" + + /* pass pointer to stack slot and r6 to subprog; + * r6 will be marked precise and spilled into fp-8 slot, which + * also should be marked precise + */ + "r1 = r10;" + "r1 += -8;" + "r2 = r6;" + "call subprog_spill_reg_precise;" + + /* restore reg from stack; in this case we'll be carrying + * stack mask when going back into subprog through jump + * history + */ + "r7 = *(u64 *)(r10 - 8);" + + "r7 *= 4;" + "r1 = %[vals];" + /* here r7 is forced to be precise and has to be propagated + * back to the beginning, handling subprog call and logic + */ + "r1 += r7;" + "r0 = *(u32 *)(r1 + 0);" + "exit;" + : + : __imm_ptr(vals) + : __clobber_common, "r6", "r7" + ); +} + +__naked __noinline __used +static __u64 subprog_with_checkpoint(void) +{ + asm volatile ( + "r0 = 0;" + /* guaranteed checkpoint if BPF_F_TEST_STATE_FREQ is used */ + "goto +0;" + "exit;" + ); +} + +char _license[] SEC("license") = "GPL"; diff --git a/tools/testing/selftests/bpf/progs/vrf_socket_lookup.c b/tools/testing/selftests/bpf/progs/vrf_socket_lookup.c new file mode 100644 index 000000000000..bcfb6feb38c0 --- /dev/null +++ b/tools/testing/selftests/bpf/progs/vrf_socket_lookup.c @@ -0,0 +1,89 @@ +// SPDX-License-Identifier: GPL-2.0 +#include <linux/bpf.h> +#include <bpf/bpf_helpers.h> +#include <bpf/bpf_endian.h> + +#include <linux/ip.h> +#include <linux/in.h> +#include <linux/if_ether.h> +#include <linux/pkt_cls.h> +#include <stdbool.h> + +int lookup_status; +bool test_xdp; +bool tcp_skc; + +#define CUR_NS BPF_F_CURRENT_NETNS + +static void socket_lookup(void *ctx, void *data_end, void *data) +{ + struct ethhdr *eth = data; + struct bpf_sock_tuple *tp; + struct bpf_sock *sk; + struct iphdr *iph; + int tplen; + + if (eth + 1 > data_end) + return; + + if (eth->h_proto != bpf_htons(ETH_P_IP)) + return; + + iph = (struct iphdr *)(eth + 1); + if (iph + 1 > data_end) + return; + + tp = (struct bpf_sock_tuple *)&iph->saddr; + tplen = sizeof(tp->ipv4); + if ((void *)tp + tplen > data_end) + return; + + switch (iph->protocol) { + case IPPROTO_TCP: + if (tcp_skc) + sk = bpf_skc_lookup_tcp(ctx, tp, tplen, CUR_NS, 0); + else + sk = bpf_sk_lookup_tcp(ctx, tp, tplen, CUR_NS, 0); + break; + case IPPROTO_UDP: + sk = bpf_sk_lookup_udp(ctx, tp, tplen, CUR_NS, 0); + break; + default: + return; + } + + lookup_status = 0; + + if (sk) { + bpf_sk_release(sk); + lookup_status = 1; + } +} + +SEC("tc") +int tc_socket_lookup(struct __sk_buff *skb) +{ + void *data_end = (void *)(long)skb->data_end; + void *data = (void *)(long)skb->data; + + if (test_xdp) + return TC_ACT_UNSPEC; + + socket_lookup(skb, data_end, data); + return TC_ACT_UNSPEC; +} + +SEC("xdp") +int xdp_socket_lookup(struct xdp_md *xdp) +{ + void *data_end = (void *)(long)xdp->data_end; + void *data = (void *)(long)xdp->data; + + if (!test_xdp) + return XDP_PASS; + + socket_lookup(xdp, data_end, data); + return XDP_PASS; +} + +char _license[] SEC("license") = "GPL"; diff --git a/tools/testing/selftests/bpf/progs/xdp_hw_metadata.c b/tools/testing/selftests/bpf/progs/xdp_hw_metadata.c index e1c787815e44..b2dfd7066c6e 100644 --- a/tools/testing/selftests/bpf/progs/xdp_hw_metadata.c +++ b/tools/testing/selftests/bpf/progs/xdp_hw_metadata.c @@ -77,7 +77,9 @@ int rx(struct xdp_md *ctx) } err = bpf_xdp_metadata_rx_timestamp(ctx, &meta->rx_timestamp); - if (err) + if (!err) + meta->xdp_timestamp = bpf_ktime_get_tai_ns(); + else meta->rx_timestamp = 0; /* Used by AF_XDP as not avail signal */ err = bpf_xdp_metadata_rx_hash(ctx, &meta->rx_hash, &meta->rx_hash_type); diff --git a/tools/testing/selftests/bpf/test_progs.c b/tools/testing/selftests/bpf/test_progs.c index ea82921110da..4d582cac2c09 100644 --- a/tools/testing/selftests/bpf/test_progs.c +++ b/tools/testing/selftests/bpf/test_progs.c @@ -11,7 +11,6 @@ #include <signal.h> #include <string.h> #include <execinfo.h> /* backtrace */ -#include <linux/membarrier.h> #include <sys/sysinfo.h> /* get_nprocs */ #include <netinet/in.h> #include <sys/select.h> @@ -629,68 +628,6 @@ out: return err; } -static int finit_module(int fd, const char *param_values, int flags) -{ - return syscall(__NR_finit_module, fd, param_values, flags); -} - -static int delete_module(const char *name, int flags) -{ - return syscall(__NR_delete_module, name, flags); -} - -/* - * Trigger synchronize_rcu() in kernel. - */ -int kern_sync_rcu(void) -{ - return syscall(__NR_membarrier, MEMBARRIER_CMD_SHARED, 0, 0); -} - -static void unload_bpf_testmod(void) -{ - if (kern_sync_rcu()) - fprintf(env.stderr, "Failed to trigger kernel-side RCU sync!\n"); - if (delete_module("bpf_testmod", 0)) { - if (errno == ENOENT) { - if (verbose()) - fprintf(stdout, "bpf_testmod.ko is already unloaded.\n"); - return; - } - fprintf(env.stderr, "Failed to unload bpf_testmod.ko from kernel: %d\n", -errno); - return; - } - if (verbose()) - fprintf(stdout, "Successfully unloaded bpf_testmod.ko.\n"); -} - -static int load_bpf_testmod(void) -{ - int fd; - - /* ensure previous instance of the module is unloaded */ - unload_bpf_testmod(); - - if (verbose()) - fprintf(stdout, "Loading bpf_testmod.ko...\n"); - - fd = open("bpf_testmod.ko", O_RDONLY); - if (fd < 0) { - fprintf(env.stderr, "Can't find bpf_testmod.ko kernel module: %d\n", -errno); - return -ENOENT; - } - if (finit_module(fd, "", 0)) { - fprintf(env.stderr, "Failed to load bpf_testmod.ko into the kernel: %d\n", -errno); - close(fd); - return -EINVAL; - } - close(fd); - - if (verbose()) - fprintf(stdout, "Successfully loaded bpf_testmod.ko.\n"); - return 0; -} - /* extern declarations for test funcs */ #define DEFINE_TEST(name) \ extern void test_##name(void) __weak; \ @@ -714,7 +651,13 @@ static struct test_state test_states[ARRAY_SIZE(prog_test_defs)]; const char *argp_program_version = "test_progs 0.1"; const char *argp_program_bug_address = "<bpf@vger.kernel.org>"; -static const char argp_program_doc[] = "BPF selftests test runner"; +static const char argp_program_doc[] = +"BPF selftests test runner\v" +"Options accepting the NAMES parameter take either a comma-separated list\n" +"of test names, or a filename prefixed with @. The file contains one name\n" +"(or wildcard pattern) per line, and comments beginning with # are ignored.\n" +"\n" +"These options can be passed repeatedly to read multiple files.\n"; enum ARG_KEYS { ARG_TEST_NUM = 'n', @@ -797,6 +740,7 @@ extern int extra_prog_load_log_flags; static error_t parse_arg(int key, char *arg, struct argp_state *state) { struct test_env *env = state->input; + int err = 0; switch (key) { case ARG_TEST_NUM: { @@ -821,18 +765,28 @@ static error_t parse_arg(int key, char *arg, struct argp_state *state) } case ARG_TEST_NAME_GLOB_ALLOWLIST: case ARG_TEST_NAME: { - if (parse_test_list(arg, - &env->test_selector.whitelist, - key == ARG_TEST_NAME_GLOB_ALLOWLIST)) - return -ENOMEM; + if (arg[0] == '@') + err = parse_test_list_file(arg + 1, + &env->test_selector.whitelist, + key == ARG_TEST_NAME_GLOB_ALLOWLIST); + else + err = parse_test_list(arg, + &env->test_selector.whitelist, + key == ARG_TEST_NAME_GLOB_ALLOWLIST); + break; } case ARG_TEST_NAME_GLOB_DENYLIST: case ARG_TEST_NAME_BLACKLIST: { - if (parse_test_list(arg, - &env->test_selector.blacklist, - key == ARG_TEST_NAME_GLOB_DENYLIST)) - return -ENOMEM; + if (arg[0] == '@') + err = parse_test_list_file(arg + 1, + &env->test_selector.blacklist, + key == ARG_TEST_NAME_GLOB_DENYLIST); + else + err = parse_test_list(arg, + &env->test_selector.blacklist, + key == ARG_TEST_NAME_GLOB_DENYLIST); + break; } case ARG_VERIFIER_STATS: @@ -900,7 +854,7 @@ static error_t parse_arg(int key, char *arg, struct argp_state *state) default: return ARGP_ERR_UNKNOWN; } - return 0; + return err; } /* @@ -1703,9 +1657,14 @@ int main(int argc, char **argv) env.stderr = stderr; env.has_testmod = true; - if (!env.list_test_names && load_bpf_testmod()) { - fprintf(env.stderr, "WARNING! Selftests relying on bpf_testmod.ko will be skipped.\n"); - env.has_testmod = false; + if (!env.list_test_names) { + /* ensure previous instance of the module is unloaded */ + unload_bpf_testmod(verbose()); + + if (load_bpf_testmod(verbose())) { + fprintf(env.stderr, "WARNING! Selftests relying on bpf_testmod.ko will be skipped.\n"); + env.has_testmod = false; + } } /* initializing tests */ @@ -1802,7 +1761,7 @@ int main(int argc, char **argv) close(env.saved_netns_fd); out: if (!env.list_test_names && env.has_testmod) - unload_bpf_testmod(); + unload_bpf_testmod(verbose()); free_test_selector(&env.test_selector); free_test_selector(&env.subtest_selector); diff --git a/tools/testing/selftests/bpf/test_progs.h b/tools/testing/selftests/bpf/test_progs.h index 0ed3134333d4..77bd492c6024 100644 --- a/tools/testing/selftests/bpf/test_progs.h +++ b/tools/testing/selftests/bpf/test_progs.h @@ -405,7 +405,6 @@ static inline void *u64_to_ptr(__u64 ptr) int bpf_find_map(const char *test, struct bpf_object *obj, const char *name); int compare_map_keys(int map1_fd, int map2_fd); int compare_stack_ips(int smap_fd, int amap_fd, int stack_trace_len); -int kern_sync_rcu(void); int trigger_module_test_read(int read_sz); int trigger_module_test_write(int write_sz); int write_sysctl(const char *sysctl, const char *value); diff --git a/tools/testing/selftests/bpf/test_verifier.c b/tools/testing/selftests/bpf/test_verifier.c index e4657c5bc3f1..31f1c935cd07 100644 --- a/tools/testing/selftests/bpf/test_verifier.c +++ b/tools/testing/selftests/bpf/test_verifier.c @@ -40,6 +40,7 @@ #include "bpf_util.h" #include "test_btf.h" #include "../../../include/linux/filter.h" +#include "testing_helpers.h" #ifndef ENOTSUPP #define ENOTSUPP 524 @@ -873,8 +874,140 @@ static int create_map_kptr(void) return fd; } +static void set_root(bool set) +{ + __u64 caps; + + if (set) { + if (cap_enable_effective(1ULL << CAP_SYS_ADMIN, &caps)) + perror("cap_disable_effective(CAP_SYS_ADMIN)"); + } else { + if (cap_disable_effective(1ULL << CAP_SYS_ADMIN, &caps)) + perror("cap_disable_effective(CAP_SYS_ADMIN)"); + } +} + +static __u64 ptr_to_u64(const void *ptr) +{ + return (uintptr_t) ptr; +} + +static struct btf *btf__load_testmod_btf(struct btf *vmlinux) +{ + struct bpf_btf_info info; + __u32 len = sizeof(info); + struct btf *btf = NULL; + char name[64]; + __u32 id = 0; + int err, fd; + + /* Iterate all loaded BTF objects and find bpf_testmod, + * we need SYS_ADMIN cap for that. + */ + set_root(true); + + while (true) { + err = bpf_btf_get_next_id(id, &id); + if (err) { + if (errno == ENOENT) + break; + perror("bpf_btf_get_next_id failed"); + break; + } + + fd = bpf_btf_get_fd_by_id(id); + if (fd < 0) { + if (errno == ENOENT) + continue; + perror("bpf_btf_get_fd_by_id failed"); + break; + } + + memset(&info, 0, sizeof(info)); + info.name_len = sizeof(name); + info.name = ptr_to_u64(name); + len = sizeof(info); + + err = bpf_obj_get_info_by_fd(fd, &info, &len); + if (err) { + close(fd); + perror("bpf_obj_get_info_by_fd failed"); + break; + } + + if (strcmp("bpf_testmod", name)) { + close(fd); + continue; + } + + btf = btf__load_from_kernel_by_id_split(id, vmlinux); + if (!btf) { + close(fd); + break; + } + + /* We need the fd to stay open so it can be used in fd_array. + * The final cleanup call to btf__free will free btf object + * and close the file descriptor. + */ + btf__set_fd(btf, fd); + break; + } + + set_root(false); + return btf; +} + +static struct btf *testmod_btf; +static struct btf *vmlinux_btf; + +static void kfuncs_cleanup(void) +{ + btf__free(testmod_btf); + btf__free(vmlinux_btf); +} + +static void fixup_prog_kfuncs(struct bpf_insn *prog, int *fd_array, + struct kfunc_btf_id_pair *fixup_kfunc_btf_id) +{ + /* Patch in kfunc BTF IDs */ + while (fixup_kfunc_btf_id->kfunc) { + int btf_id = 0; + + /* try to find kfunc in kernel BTF */ + vmlinux_btf = vmlinux_btf ?: btf__load_vmlinux_btf(); + if (vmlinux_btf) { + btf_id = btf__find_by_name_kind(vmlinux_btf, + fixup_kfunc_btf_id->kfunc, + BTF_KIND_FUNC); + btf_id = btf_id < 0 ? 0 : btf_id; + } + + /* kfunc not found in kernel BTF, try bpf_testmod BTF */ + if (!btf_id) { + testmod_btf = testmod_btf ?: btf__load_testmod_btf(vmlinux_btf); + if (testmod_btf) { + btf_id = btf__find_by_name_kind(testmod_btf, + fixup_kfunc_btf_id->kfunc, + BTF_KIND_FUNC); + btf_id = btf_id < 0 ? 0 : btf_id; + if (btf_id) { + /* We put bpf_testmod module fd into fd_array + * and its index 1 into instruction 'off'. + */ + *fd_array = btf__fd(testmod_btf); + prog[fixup_kfunc_btf_id->insn_idx].off = 1; + } + } + } + + prog[fixup_kfunc_btf_id->insn_idx].imm = btf_id; + fixup_kfunc_btf_id++; + } +} + static void do_test_fixup(struct bpf_test *test, enum bpf_prog_type prog_type, - struct bpf_insn *prog, int *map_fds) + struct bpf_insn *prog, int *map_fds, int *fd_array) { int *fixup_map_hash_8b = test->fixup_map_hash_8b; int *fixup_map_hash_48b = test->fixup_map_hash_48b; @@ -899,7 +1032,6 @@ static void do_test_fixup(struct bpf_test *test, enum bpf_prog_type prog_type, int *fixup_map_ringbuf = test->fixup_map_ringbuf; int *fixup_map_timer = test->fixup_map_timer; int *fixup_map_kptr = test->fixup_map_kptr; - struct kfunc_btf_id_pair *fixup_kfunc_btf_id = test->fixup_kfunc_btf_id; if (test->fill_helper) { test->fill_insns = calloc(MAX_TEST_INSNS, sizeof(struct bpf_insn)); @@ -1100,25 +1232,7 @@ static void do_test_fixup(struct bpf_test *test, enum bpf_prog_type prog_type, } while (*fixup_map_kptr); } - /* Patch in kfunc BTF IDs */ - if (fixup_kfunc_btf_id->kfunc) { - struct btf *btf; - int btf_id; - - do { - btf_id = 0; - btf = btf__load_vmlinux_btf(); - if (btf) { - btf_id = btf__find_by_name_kind(btf, - fixup_kfunc_btf_id->kfunc, - BTF_KIND_FUNC); - btf_id = btf_id < 0 ? 0 : btf_id; - } - btf__free(btf); - prog[fixup_kfunc_btf_id->insn_idx].imm = btf_id; - fixup_kfunc_btf_id++; - } while (fixup_kfunc_btf_id->kfunc); - } + fixup_prog_kfuncs(prog, fd_array, test->fixup_kfunc_btf_id); } struct libcap { @@ -1227,45 +1341,46 @@ static bool cmp_str_seq(const char *log, const char *exp) return true; } -static int get_xlated_program(int fd_prog, struct bpf_insn **buf, int *cnt) +static struct bpf_insn *get_xlated_program(int fd_prog, int *cnt) { + __u32 buf_element_size = sizeof(struct bpf_insn); struct bpf_prog_info info = {}; __u32 info_len = sizeof(info); __u32 xlated_prog_len; - __u32 buf_element_size = sizeof(struct bpf_insn); + struct bpf_insn *buf; if (bpf_prog_get_info_by_fd(fd_prog, &info, &info_len)) { perror("bpf_prog_get_info_by_fd failed"); - return -1; + return NULL; } xlated_prog_len = info.xlated_prog_len; if (xlated_prog_len % buf_element_size) { printf("Program length %d is not multiple of %d\n", xlated_prog_len, buf_element_size); - return -1; + return NULL; } *cnt = xlated_prog_len / buf_element_size; - *buf = calloc(*cnt, buf_element_size); + buf = calloc(*cnt, buf_element_size); if (!buf) { perror("can't allocate xlated program buffer"); - return -ENOMEM; + return NULL; } bzero(&info, sizeof(info)); info.xlated_prog_len = xlated_prog_len; - info.xlated_prog_insns = (__u64)(unsigned long)*buf; + info.xlated_prog_insns = (__u64)(unsigned long)buf; if (bpf_prog_get_info_by_fd(fd_prog, &info, &info_len)) { perror("second bpf_prog_get_info_by_fd failed"); goto out_free_buf; } - return 0; + return buf; out_free_buf: - free(*buf); - return -1; + free(buf); + return NULL; } static bool is_null_insn(struct bpf_insn *insn) @@ -1398,7 +1513,8 @@ static bool check_xlated_program(struct bpf_test *test, int fd_prog) if (!check_expected && !check_unexpected) goto out; - if (get_xlated_program(fd_prog, &buf, &cnt)) { + buf = get_xlated_program(fd_prog, &cnt); + if (!buf) { printf("FAIL: can't get xlated program\n"); result = false; goto out; @@ -1445,6 +1561,7 @@ static void do_test_single(struct bpf_test *test, bool unpriv, int run_errs, run_successes; int map_fds[MAX_NR_MAPS]; const char *expected_err; + int fd_array[2] = { -1, -1 }; int saved_errno; int fixup_skips; __u32 pflags; @@ -1458,7 +1575,7 @@ static void do_test_single(struct bpf_test *test, bool unpriv, if (!prog_type) prog_type = BPF_PROG_TYPE_SOCKET_FILTER; fixup_skips = skips; - do_test_fixup(test, prog_type, prog, map_fds); + do_test_fixup(test, prog_type, prog, map_fds, &fd_array[1]); if (test->fill_insns) { prog = test->fill_insns; prog_len = test->prog_len; @@ -1492,6 +1609,8 @@ static void do_test_single(struct bpf_test *test, bool unpriv, else opts.log_level = DEFAULT_LIBBPF_LOG_LEVEL; opts.prog_flags = pflags; + if (fd_array[1] != -1) + opts.fd_array = &fd_array[0]; if ((prog_type == BPF_PROG_TYPE_TRACING || prog_type == BPF_PROG_TYPE_LSM) && test->kfunc) { @@ -1684,6 +1803,12 @@ static int do_test(bool unpriv, unsigned int from, unsigned int to) { int i, passes = 0, errors = 0; + /* ensure previous instance of the module is unloaded */ + unload_bpf_testmod(verbose); + + if (load_bpf_testmod(verbose)) + return EXIT_FAILURE; + for (i = from; i < to; i++) { struct bpf_test *test = &tests[i]; @@ -1711,6 +1836,9 @@ static int do_test(bool unpriv, unsigned int from, unsigned int to) } } + unload_bpf_testmod(verbose); + kfuncs_cleanup(); + printf("Summary: %d PASSED, %d SKIPPED, %d FAILED\n", passes, skips, errors); return errors ? EXIT_FAILURE : EXIT_SUCCESS; diff --git a/tools/testing/selftests/bpf/test_xsk.sh b/tools/testing/selftests/bpf/test_xsk.sh index 377fb157a57c..c2ad50f26b63 100755 --- a/tools/testing/selftests/bpf/test_xsk.sh +++ b/tools/testing/selftests/bpf/test_xsk.sh @@ -68,9 +68,6 @@ # Run with verbose output: # sudo ./test_xsk.sh -v # -# Run and dump packet contents: -# sudo ./test_xsk.sh -D -# # Set up veth interfaces and leave them up so xskxceiver can be launched in a debugger: # sudo ./test_xsk.sh -d # @@ -81,11 +78,10 @@ ETH="" -while getopts "vDi:d" flag +while getopts "vi:d" flag do case "${flag}" in v) verbose=1;; - D) dump_pkts=1;; d) debug=1;; i) ETH=${OPTARG};; esac @@ -157,10 +153,6 @@ if [[ $verbose -eq 1 ]]; then ARGS+="-v " fi -if [[ $dump_pkts -eq 1 ]]; then - ARGS="-D " -fi - retval=$? test_status $retval "${TEST_NAME}" diff --git a/tools/testing/selftests/bpf/testing_helpers.c b/tools/testing/selftests/bpf/testing_helpers.c index 0b5e0829e5be..8d994884c7b4 100644 --- a/tools/testing/selftests/bpf/testing_helpers.c +++ b/tools/testing/selftests/bpf/testing_helpers.c @@ -1,6 +1,7 @@ // SPDX-License-Identifier: (LGPL-2.1 OR BSD-2-Clause) /* Copyright (C) 2019 Netronome Systems, Inc. */ /* Copyright (C) 2020 Facebook, Inc. */ +#include <ctype.h> #include <stdlib.h> #include <string.h> #include <errno.h> @@ -8,6 +9,7 @@ #include <bpf/libbpf.h> #include "test_progs.h" #include "testing_helpers.h" +#include <linux/membarrier.h> int parse_num_list(const char *s, bool **num_set, int *num_set_len) { @@ -70,92 +72,168 @@ int parse_num_list(const char *s, bool **num_set, int *num_set_len) return 0; } -int parse_test_list(const char *s, - struct test_filter_set *set, - bool is_glob_pattern) +static int do_insert_test(struct test_filter_set *set, + char *test_str, + char *subtest_str) { - char *input, *state = NULL, *next; - struct test_filter *tmp, *tests = NULL; - int i, j, cnt = 0; + struct test_filter *tmp, *test; + char **ctmp; + int i; - input = strdup(s); - if (!input) + for (i = 0; i < set->cnt; i++) { + test = &set->tests[i]; + + if (strcmp(test_str, test->name) == 0) { + free(test_str); + goto subtest; + } + } + + tmp = realloc(set->tests, sizeof(*test) * (set->cnt + 1)); + if (!tmp) return -ENOMEM; - while ((next = strtok_r(state ? NULL : input, ",", &state))) { - char *subtest_str = strchr(next, '/'); - char *pattern = NULL; - int glob_chars = 0; + set->tests = tmp; + test = &set->tests[set->cnt]; - tmp = realloc(tests, sizeof(*tests) * (cnt + 1)); - if (!tmp) - goto err; - tests = tmp; + test->name = test_str; + test->subtests = NULL; + test->subtest_cnt = 0; - tests[cnt].subtest_cnt = 0; - tests[cnt].subtests = NULL; + set->cnt++; - if (is_glob_pattern) { - pattern = "%s"; - } else { - pattern = "*%s*"; - glob_chars = 2; - } +subtest: + if (!subtest_str) + return 0; - if (subtest_str) { - char **tmp_subtests = NULL; - int subtest_cnt = tests[cnt].subtest_cnt; - - *subtest_str = '\0'; - subtest_str += 1; - tmp_subtests = realloc(tests[cnt].subtests, - sizeof(*tmp_subtests) * - (subtest_cnt + 1)); - if (!tmp_subtests) - goto err; - tests[cnt].subtests = tmp_subtests; - - tests[cnt].subtests[subtest_cnt] = - malloc(strlen(subtest_str) + glob_chars + 1); - if (!tests[cnt].subtests[subtest_cnt]) - goto err; - sprintf(tests[cnt].subtests[subtest_cnt], - pattern, - subtest_str); - - tests[cnt].subtest_cnt++; + for (i = 0; i < test->subtest_cnt; i++) { + if (strcmp(subtest_str, test->subtests[i]) == 0) { + free(subtest_str); + return 0; } + } - tests[cnt].name = malloc(strlen(next) + glob_chars + 1); - if (!tests[cnt].name) - goto err; - sprintf(tests[cnt].name, pattern, next); + ctmp = realloc(test->subtests, + sizeof(*test->subtests) * (test->subtest_cnt + 1)); + if (!ctmp) + return -ENOMEM; - cnt++; + test->subtests = ctmp; + test->subtests[test->subtest_cnt] = subtest_str; + + test->subtest_cnt++; + + return 0; +} + +static int insert_test(struct test_filter_set *set, + char *test_spec, + bool is_glob_pattern) +{ + char *pattern, *subtest_str, *ext_test_str, *ext_subtest_str = NULL; + int glob_chars = 0; + + if (is_glob_pattern) { + pattern = "%s"; + } else { + pattern = "*%s*"; + glob_chars = 2; } - tmp = realloc(set->tests, sizeof(*tests) * (cnt + set->cnt)); - if (!tmp) + subtest_str = strchr(test_spec, '/'); + if (subtest_str) { + *subtest_str = '\0'; + subtest_str += 1; + } + + ext_test_str = malloc(strlen(test_spec) + glob_chars + 1); + if (!ext_test_str) goto err; - memcpy(tmp + set->cnt, tests, sizeof(*tests) * cnt); - set->tests = tmp; - set->cnt += cnt; + sprintf(ext_test_str, pattern, test_spec); - free(tests); - free(input); - return 0; + if (subtest_str) { + ext_subtest_str = malloc(strlen(subtest_str) + glob_chars + 1); + if (!ext_subtest_str) + goto err; + + sprintf(ext_subtest_str, pattern, subtest_str); + } + + return do_insert_test(set, ext_test_str, ext_subtest_str); err: - for (i = 0; i < cnt; i++) { - for (j = 0; j < tests[i].subtest_cnt; j++) - free(tests[i].subtests[j]); + free(ext_test_str); + free(ext_subtest_str); + + return -ENOMEM; +} + +int parse_test_list_file(const char *path, + struct test_filter_set *set, + bool is_glob_pattern) +{ + char *buf = NULL, *capture_start, *capture_end, *scan_end; + size_t buflen = 0; + int err = 0; + FILE *f; + + f = fopen(path, "r"); + if (!f) { + err = -errno; + fprintf(stderr, "Failed to open '%s': %d\n", path, err); + return err; + } + + while (getline(&buf, &buflen, f) != -1) { + capture_start = buf; + + while (isspace(*capture_start)) + ++capture_start; + + capture_end = capture_start; + scan_end = capture_start; + + while (*scan_end && *scan_end != '#') { + if (!isspace(*scan_end)) + capture_end = scan_end; + + ++scan_end; + } + + if (capture_end == capture_start) + continue; + + *(++capture_end) = '\0'; + + err = insert_test(set, capture_start, is_glob_pattern); + if (err) + break; + } + + fclose(f); + return err; +} + +int parse_test_list(const char *s, + struct test_filter_set *set, + bool is_glob_pattern) +{ + char *input, *state = NULL, *test_spec; + int err = 0; + + input = strdup(s); + if (!input) + return -ENOMEM; - free(tests[i].name); + while ((test_spec = strtok_r(state ? NULL : input, ",", &state))) { + err = insert_test(set, test_spec, is_glob_pattern); + if (err) + break; } - free(tests); + free(input); - return -ENOMEM; + return err; } __u32 link_info_prog_id(const struct bpf_link *link, struct bpf_link_info *info) @@ -249,3 +327,63 @@ __u64 read_perf_max_sample_freq(void) fclose(f); return sample_freq; } + +static int finit_module(int fd, const char *param_values, int flags) +{ + return syscall(__NR_finit_module, fd, param_values, flags); +} + +static int delete_module(const char *name, int flags) +{ + return syscall(__NR_delete_module, name, flags); +} + +int unload_bpf_testmod(bool verbose) +{ + if (kern_sync_rcu()) + fprintf(stdout, "Failed to trigger kernel-side RCU sync!\n"); + if (delete_module("bpf_testmod", 0)) { + if (errno == ENOENT) { + if (verbose) + fprintf(stdout, "bpf_testmod.ko is already unloaded.\n"); + return -1; + } + fprintf(stdout, "Failed to unload bpf_testmod.ko from kernel: %d\n", -errno); + return -1; + } + if (verbose) + fprintf(stdout, "Successfully unloaded bpf_testmod.ko.\n"); + return 0; +} + +int load_bpf_testmod(bool verbose) +{ + int fd; + + if (verbose) + fprintf(stdout, "Loading bpf_testmod.ko...\n"); + + fd = open("bpf_testmod.ko", O_RDONLY); + if (fd < 0) { + fprintf(stdout, "Can't find bpf_testmod.ko kernel module: %d\n", -errno); + return -ENOENT; + } + if (finit_module(fd, "", 0)) { + fprintf(stdout, "Failed to load bpf_testmod.ko into the kernel: %d\n", -errno); + close(fd); + return -EINVAL; + } + close(fd); + + if (verbose) + fprintf(stdout, "Successfully loaded bpf_testmod.ko.\n"); + return 0; +} + +/* + * Trigger synchronize_rcu() in kernel. + */ +int kern_sync_rcu(void) +{ + return syscall(__NR_membarrier, MEMBARRIER_CMD_SHARED, 0, 0); +} diff --git a/tools/testing/selftests/bpf/testing_helpers.h b/tools/testing/selftests/bpf/testing_helpers.h index eb8790f928e4..5312323881b6 100644 --- a/tools/testing/selftests/bpf/testing_helpers.h +++ b/tools/testing/selftests/bpf/testing_helpers.h @@ -1,5 +1,9 @@ /* SPDX-License-Identifier: (LGPL-2.1 OR BSD-2-Clause) */ /* Copyright (C) 2020 Facebook, Inc. */ + +#ifndef __TESTING_HELPERS_H +#define __TESTING_HELPERS_H + #include <stdbool.h> #include <bpf/bpf.h> #include <bpf/libbpf.h> @@ -20,5 +24,13 @@ struct test_filter_set; int parse_test_list(const char *s, struct test_filter_set *test_set, bool is_glob_pattern); +int parse_test_list_file(const char *path, + struct test_filter_set *test_set, + bool is_glob_pattern); __u64 read_perf_max_sample_freq(void); +int load_bpf_testmod(bool verbose); +int unload_bpf_testmod(bool verbose); +int kern_sync_rcu(void); + +#endif /* __TESTING_HELPERS_H */ diff --git a/tools/testing/selftests/bpf/verifier/precise.c b/tools/testing/selftests/bpf/verifier/precise.c index 6c03a7d805f9..99272bb890da 100644 --- a/tools/testing/selftests/bpf/verifier/precise.c +++ b/tools/testing/selftests/bpf/verifier/precise.c @@ -38,25 +38,24 @@ .fixup_map_array_48b = { 1 }, .result = VERBOSE_ACCEPT, .errstr = - "26: (85) call bpf_probe_read_kernel#113\ - last_idx 26 first_idx 20\ - regs=4 stack=0 before 25\ - regs=4 stack=0 before 24\ - regs=4 stack=0 before 23\ - regs=4 stack=0 before 22\ - regs=4 stack=0 before 20\ - parent didn't have regs=4 stack=0 marks\ - last_idx 19 first_idx 10\ - regs=4 stack=0 before 19\ - regs=200 stack=0 before 18\ - regs=300 stack=0 before 17\ - regs=201 stack=0 before 15\ - regs=201 stack=0 before 14\ - regs=200 stack=0 before 13\ - regs=200 stack=0 before 12\ - regs=200 stack=0 before 11\ - regs=200 stack=0 before 10\ - parent already had regs=0 stack=0 marks", + "mark_precise: frame0: last_idx 26 first_idx 20\ + mark_precise: frame0: regs=r2 stack= before 25\ + mark_precise: frame0: regs=r2 stack= before 24\ + mark_precise: frame0: regs=r2 stack= before 23\ + mark_precise: frame0: regs=r2 stack= before 22\ + mark_precise: frame0: regs=r2 stack= before 20\ + mark_precise: frame0: parent state regs=r2 stack=:\ + mark_precise: frame0: last_idx 19 first_idx 10\ + mark_precise: frame0: regs=r2,r9 stack= before 19\ + mark_precise: frame0: regs=r9 stack= before 18\ + mark_precise: frame0: regs=r8,r9 stack= before 17\ + mark_precise: frame0: regs=r0,r9 stack= before 15\ + mark_precise: frame0: regs=r0,r9 stack= before 14\ + mark_precise: frame0: regs=r9 stack= before 13\ + mark_precise: frame0: regs=r9 stack= before 12\ + mark_precise: frame0: regs=r9 stack= before 11\ + mark_precise: frame0: regs=r9 stack= before 10\ + mark_precise: frame0: parent state regs= stack=:", }, { "precise: test 2", @@ -100,20 +99,20 @@ .flags = BPF_F_TEST_STATE_FREQ, .errstr = "26: (85) call bpf_probe_read_kernel#113\ - last_idx 26 first_idx 22\ - regs=4 stack=0 before 25\ - regs=4 stack=0 before 24\ - regs=4 stack=0 before 23\ - regs=4 stack=0 before 22\ - parent didn't have regs=4 stack=0 marks\ - last_idx 20 first_idx 20\ - regs=4 stack=0 before 20\ - parent didn't have regs=4 stack=0 marks\ - last_idx 19 first_idx 17\ - regs=4 stack=0 before 19\ - regs=200 stack=0 before 18\ - regs=300 stack=0 before 17\ - parent already had regs=0 stack=0 marks", + mark_precise: frame0: last_idx 26 first_idx 22\ + mark_precise: frame0: regs=r2 stack= before 25\ + mark_precise: frame0: regs=r2 stack= before 24\ + mark_precise: frame0: regs=r2 stack= before 23\ + mark_precise: frame0: regs=r2 stack= before 22\ + mark_precise: frame0: parent state regs=r2 stack=:\ + mark_precise: frame0: last_idx 20 first_idx 20\ + mark_precise: frame0: regs=r2,r9 stack= before 20\ + mark_precise: frame0: parent state regs=r2,r9 stack=:\ + mark_precise: frame0: last_idx 19 first_idx 17\ + mark_precise: frame0: regs=r2,r9 stack= before 19\ + mark_precise: frame0: regs=r9 stack= before 18\ + mark_precise: frame0: regs=r8,r9 stack= before 17\ + mark_precise: frame0: parent state regs= stack=:", }, { "precise: cross frame pruning", @@ -153,15 +152,16 @@ }, .prog_type = BPF_PROG_TYPE_XDP, .flags = BPF_F_TEST_STATE_FREQ, - .errstr = "5: (2d) if r4 > r0 goto pc+0\ - last_idx 5 first_idx 5\ - parent didn't have regs=10 stack=0 marks\ - last_idx 4 first_idx 2\ - regs=10 stack=0 before 4\ - regs=10 stack=0 before 3\ - regs=0 stack=1 before 2\ - last_idx 5 first_idx 5\ - parent didn't have regs=1 stack=0 marks", + .errstr = "mark_precise: frame0: last_idx 5 first_idx 5\ + mark_precise: frame0: parent state regs=r4 stack=:\ + mark_precise: frame0: last_idx 4 first_idx 2\ + mark_precise: frame0: regs=r4 stack= before 4\ + mark_precise: frame0: regs=r4 stack= before 3\ + mark_precise: frame0: regs= stack=-8 before 2\ + mark_precise: frame0: falling back to forcing all scalars precise\ + force_precise: frame0: forcing r0 to be precise\ + mark_precise: frame0: last_idx 5 first_idx 5\ + mark_precise: frame0: parent state regs= stack=:", .result = VERBOSE_ACCEPT, .retval = -1, }, @@ -179,16 +179,19 @@ }, .prog_type = BPF_PROG_TYPE_XDP, .flags = BPF_F_TEST_STATE_FREQ, - .errstr = "last_idx 6 first_idx 6\ - parent didn't have regs=10 stack=0 marks\ - last_idx 5 first_idx 3\ - regs=10 stack=0 before 5\ - regs=10 stack=0 before 4\ - regs=0 stack=1 before 3\ - last_idx 6 first_idx 6\ - parent didn't have regs=1 stack=0 marks\ - last_idx 5 first_idx 3\ - regs=1 stack=0 before 5", + .errstr = "mark_precise: frame0: last_idx 6 first_idx 6\ + mark_precise: frame0: parent state regs=r4 stack=:\ + mark_precise: frame0: last_idx 5 first_idx 3\ + mark_precise: frame0: regs=r4 stack= before 5\ + mark_precise: frame0: regs=r4 stack= before 4\ + mark_precise: frame0: regs= stack=-8 before 3\ + mark_precise: frame0: falling back to forcing all scalars precise\ + force_precise: frame0: forcing r0 to be precise\ + force_precise: frame0: forcing r0 to be precise\ + force_precise: frame0: forcing r0 to be precise\ + force_precise: frame0: forcing r0 to be precise\ + mark_precise: frame0: last_idx 6 first_idx 6\ + mark_precise: frame0: parent state regs= stack=:", .result = VERBOSE_ACCEPT, .retval = -1, }, @@ -217,3 +220,39 @@ .errstr = "invalid access to memory, mem_size=1 off=42 size=8", .result = REJECT, }, +{ + "precise: program doesn't prematurely prune branches", + .insns = { + BPF_ALU64_IMM(BPF_MOV, BPF_REG_6, 0x400), + BPF_ALU64_IMM(BPF_MOV, BPF_REG_7, 0), + BPF_ALU64_IMM(BPF_MOV, BPF_REG_8, 0), + BPF_ALU64_IMM(BPF_MOV, BPF_REG_9, 0x80000000), + BPF_ALU64_IMM(BPF_MOD, BPF_REG_6, 0x401), + BPF_JMP_IMM(BPF_JA, 0, 0, 0), + BPF_JMP_REG(BPF_JLE, BPF_REG_6, BPF_REG_9, 2), + BPF_ALU64_IMM(BPF_MOD, BPF_REG_6, 1), + BPF_ALU64_IMM(BPF_MOV, BPF_REG_9, 0), + BPF_JMP_REG(BPF_JLE, BPF_REG_6, BPF_REG_9, 1), + BPF_ALU64_IMM(BPF_MOV, BPF_REG_6, 0), + BPF_ALU64_IMM(BPF_MOV, BPF_REG_0, 0), + BPF_STX_MEM(BPF_W, BPF_REG_10, BPF_REG_0, -4), + BPF_LD_MAP_FD(BPF_REG_4, 0), + BPF_ALU64_REG(BPF_MOV, BPF_REG_1, BPF_REG_4), + BPF_ALU64_REG(BPF_MOV, BPF_REG_2, BPF_REG_10), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -4), + BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem), + BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1), + BPF_EXIT_INSN(), + BPF_ALU64_IMM(BPF_RSH, BPF_REG_6, 10), + BPF_ALU64_IMM(BPF_MUL, BPF_REG_6, 8192), + BPF_ALU64_REG(BPF_MOV, BPF_REG_1, BPF_REG_0), + BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_6), + BPF_LDX_MEM(BPF_DW, BPF_REG_3, BPF_REG_0, 0), + BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_3, 0), + BPF_EXIT_INSN(), + }, + .fixup_map_array_48b = { 13 }, + .prog_type = BPF_PROG_TYPE_XDP, + .result = REJECT, + .errstr = "register with unbounded min value is not allowed", +}, diff --git a/tools/testing/selftests/bpf/veristat.c b/tools/testing/selftests/bpf/veristat.c index 1db7185181da..655095810d4a 100644 --- a/tools/testing/selftests/bpf/veristat.c +++ b/tools/testing/selftests/bpf/veristat.c @@ -141,6 +141,7 @@ static struct env { bool verbose; bool debug; bool quiet; + bool force_checkpoints; enum resfmt out_fmt; bool show_version; bool comparison_mode; @@ -209,6 +210,8 @@ static const struct argp_option opts[] = { { "log-level", 'l', "LEVEL", 0, "Verifier log level (default 0 for normal mode, 1 for verbose mode)" }, { "log-fixed", OPT_LOG_FIXED, NULL, 0, "Disable verifier log rotation" }, { "log-size", OPT_LOG_SIZE, "BYTES", 0, "Customize verifier log size (default to 16MB)" }, + { "test-states", 't', NULL, 0, + "Force frequent BPF verifier state checkpointing (set BPF_F_TEST_STATE_FREQ program flag)" }, { "quiet", 'q', NULL, 0, "Quiet mode" }, { "emit", 'e', "SPEC", 0, "Specify stats to be emitted" }, { "sort", 's', "SPEC", 0, "Specify sort order" }, @@ -284,6 +287,9 @@ static error_t parse_arg(int key, char *arg, struct argp_state *state) argp_usage(state); } break; + case 't': + env.force_checkpoints = true; + break; case 'C': env.comparison_mode = true; break; @@ -989,6 +995,9 @@ static int process_prog(const char *filename, struct bpf_object *obj, struct bpf /* increase chances of successful BPF object loading */ fixup_obj(obj, prog, base_filename); + if (env.force_checkpoints) + bpf_program__set_flags(prog, bpf_program__flags(prog) | BPF_F_TEST_STATE_FREQ); + err = bpf_object__load(obj); env.progs_processed++; diff --git a/tools/testing/selftests/bpf/xdp_hw_metadata.c b/tools/testing/selftests/bpf/xdp_hw_metadata.c index 987cf0db5ebc..613321eb84c1 100644 --- a/tools/testing/selftests/bpf/xdp_hw_metadata.c +++ b/tools/testing/selftests/bpf/xdp_hw_metadata.c @@ -27,6 +27,7 @@ #include <sys/mman.h> #include <net/if.h> #include <poll.h> +#include <time.h> #include "xdp_metadata.h" @@ -134,18 +135,52 @@ static void refill_rx(struct xsk *xsk, __u64 addr) } } -static void verify_xdp_metadata(void *data) +#define NANOSEC_PER_SEC 1000000000 /* 10^9 */ +static __u64 gettime(clockid_t clock_id) +{ + struct timespec t; + int res; + + /* See man clock_gettime(2) for type of clock_id's */ + res = clock_gettime(clock_id, &t); + + if (res < 0) + error(res, errno, "Error with clock_gettime()"); + + return (__u64) t.tv_sec * NANOSEC_PER_SEC + t.tv_nsec; +} + +static void verify_xdp_metadata(void *data, clockid_t clock_id) { struct xdp_meta *meta; meta = data - sizeof(*meta); - printf("rx_timestamp: %llu\n", meta->rx_timestamp); if (meta->rx_hash_err < 0) printf("No rx_hash err=%d\n", meta->rx_hash_err); else printf("rx_hash: 0x%X with RSS type:0x%X\n", meta->rx_hash, meta->rx_hash_type); + + printf("rx_timestamp: %llu (sec:%0.4f)\n", meta->rx_timestamp, + (double)meta->rx_timestamp / NANOSEC_PER_SEC); + if (meta->rx_timestamp) { + __u64 usr_clock = gettime(clock_id); + __u64 xdp_clock = meta->xdp_timestamp; + __s64 delta_X = xdp_clock - meta->rx_timestamp; + __s64 delta_X2U = usr_clock - xdp_clock; + + printf("XDP RX-time: %llu (sec:%0.4f) delta sec:%0.4f (%0.3f usec)\n", + xdp_clock, (double)xdp_clock / NANOSEC_PER_SEC, + (double)delta_X / NANOSEC_PER_SEC, + (double)delta_X / 1000); + + printf("AF_XDP time: %llu (sec:%0.4f) delta sec:%0.4f (%0.3f usec)\n", + usr_clock, (double)usr_clock / NANOSEC_PER_SEC, + (double)delta_X2U / NANOSEC_PER_SEC, + (double)delta_X2U / 1000); + } + } static void verify_skb_metadata(int fd) @@ -193,7 +228,7 @@ static void verify_skb_metadata(int fd) printf("skb hwtstamp is not found!\n"); } -static int verify_metadata(struct xsk *rx_xsk, int rxq, int server_fd) +static int verify_metadata(struct xsk *rx_xsk, int rxq, int server_fd, clockid_t clock_id) { const struct xdp_desc *rx_desc; struct pollfd fds[rxq + 1]; @@ -243,7 +278,8 @@ static int verify_metadata(struct xsk *rx_xsk, int rxq, int server_fd) addr = xsk_umem__add_offset_to_addr(rx_desc->addr); printf("%p: rx_desc[%u]->addr=%llx addr=%llx comp_addr=%llx\n", xsk, idx, rx_desc->addr, addr, comp_addr); - verify_xdp_metadata(xsk_umem__get_data(xsk->umem_area, addr)); + verify_xdp_metadata(xsk_umem__get_data(xsk->umem_area, addr), + clock_id); xsk_ring_cons__release(&xsk->rx, 1); refill_rx(xsk, comp_addr); } @@ -370,6 +406,7 @@ static void timestamping_enable(int fd, int val) int main(int argc, char *argv[]) { + clockid_t clock_id = CLOCK_TAI; int server_fd = -1; int ret; int i; @@ -443,7 +480,7 @@ int main(int argc, char *argv[]) error(1, -ret, "bpf_xdp_attach"); signal(SIGINT, handle_signal); - ret = verify_metadata(rx_xsk, rxq, server_fd); + ret = verify_metadata(rx_xsk, rxq, server_fd, clock_id); close(server_fd); cleanup(); if (ret) diff --git a/tools/testing/selftests/bpf/xdp_metadata.h b/tools/testing/selftests/bpf/xdp_metadata.h index 0c4624dc6f2f..938a729bd307 100644 --- a/tools/testing/selftests/bpf/xdp_metadata.h +++ b/tools/testing/selftests/bpf/xdp_metadata.h @@ -11,6 +11,7 @@ struct xdp_meta { __u64 rx_timestamp; + __u64 xdp_timestamp; __u32 rx_hash; union { __u32 rx_hash_type; diff --git a/tools/testing/selftests/bpf/xsk.h b/tools/testing/selftests/bpf/xsk.h index 04ed8b544712..8da8d557768b 100644 --- a/tools/testing/selftests/bpf/xsk.h +++ b/tools/testing/selftests/bpf/xsk.h @@ -134,6 +134,11 @@ static inline void xsk_ring_prod__submit(struct xsk_ring_prod *prod, __u32 nb) __atomic_store_n(prod->producer, *prod->producer + nb, __ATOMIC_RELEASE); } +static inline void xsk_ring_prod__cancel(struct xsk_ring_prod *prod, __u32 nb) +{ + prod->cached_prod -= nb; +} + static inline __u32 xsk_ring_cons__peek(struct xsk_ring_cons *cons, __u32 nb, __u32 *idx) { __u32 entries = xsk_cons_nb_avail(cons, nb); diff --git a/tools/testing/selftests/bpf/xskxceiver.c b/tools/testing/selftests/bpf/xskxceiver.c index f144d0604ddf..218d7f694e5c 100644 --- a/tools/testing/selftests/bpf/xskxceiver.c +++ b/tools/testing/selftests/bpf/xskxceiver.c @@ -76,16 +76,13 @@ #include <asm/barrier.h> #include <linux/if_link.h> #include <linux/if_ether.h> -#include <linux/ip.h> #include <linux/mman.h> -#include <linux/udp.h> #include <arpa/inet.h> #include <net/if.h> #include <locale.h> #include <poll.h> #include <pthread.h> #include <signal.h> -#include <stdbool.h> #include <stdio.h> #include <stdlib.h> #include <string.h> @@ -94,10 +91,8 @@ #include <sys/socket.h> #include <sys/time.h> #include <sys/types.h> -#include <sys/queue.h> #include <time.h> #include <unistd.h> -#include <stdatomic.h> #include "xsk_xdp_progs.skel.h" #include "xsk.h" @@ -109,10 +104,6 @@ static const char *MAC1 = "\x00\x0A\x56\x9E\xEE\x62"; static const char *MAC2 = "\x00\x0A\x56\x9E\xEE\x61"; -static const char *IP1 = "192.168.100.162"; -static const char *IP2 = "192.168.100.161"; -static const u16 UDP_PORT1 = 2020; -static const u16 UDP_PORT2 = 2121; static void __exit_with_error(int error, const char *file, const char *func, int line) { @@ -147,112 +138,25 @@ static void report_failure(struct test_spec *test) test->fail = true; } -static void memset32_htonl(void *dest, u32 val, u32 size) -{ - u32 *ptr = (u32 *)dest; - int i; - - val = htonl(val); - - for (i = 0; i < (size & (~0x3)); i += 4) - ptr[i >> 2] = val; -} - -/* - * Fold a partial checksum - * This function code has been taken from - * Linux kernel include/asm-generic/checksum.h - */ -static __u16 csum_fold(__u32 csum) -{ - u32 sum = (__force u32)csum; - - sum = (sum & 0xffff) + (sum >> 16); - sum = (sum & 0xffff) + (sum >> 16); - return (__force __u16)~sum; -} - -/* - * This function code has been taken from - * Linux kernel lib/checksum.c - */ -static u32 from64to32(u64 x) -{ - /* add up 32-bit and 32-bit for 32+c bit */ - x = (x & 0xffffffff) + (x >> 32); - /* add up carry.. */ - x = (x & 0xffffffff) + (x >> 32); - return (u32)x; -} - -/* - * This function code has been taken from - * Linux kernel lib/checksum.c - */ -static __u32 csum_tcpudp_nofold(__be32 saddr, __be32 daddr, __u32 len, __u8 proto, __u32 sum) -{ - unsigned long long s = (__force u32)sum; - - s += (__force u32)saddr; - s += (__force u32)daddr; -#ifdef __BIG_ENDIAN__ - s += proto + len; -#else - s += (proto + len) << 8; -#endif - return (__force __u32)from64to32(s); -} - -/* - * This function has been taken from - * Linux kernel include/asm-generic/checksum.h +/* The payload is a word consisting of a packet sequence number in the upper + * 16-bits and a intra packet data sequence number in the lower 16 bits. So the 3rd packet's + * 5th word of data will contain the number (2<<16) | 4 as they are numbered from 0. */ -static __u16 csum_tcpudp_magic(__be32 saddr, __be32 daddr, __u32 len, __u8 proto, __u32 sum) -{ - return csum_fold(csum_tcpudp_nofold(saddr, daddr, len, proto, sum)); -} - -static u16 udp_csum(u32 saddr, u32 daddr, u32 len, u8 proto, u16 *udp_pkt) +static void write_payload(void *dest, u32 pkt_nb, u32 start, u32 size) { - u32 csum = 0; - u32 cnt = 0; - - /* udp hdr and data */ - for (; cnt < len; cnt += 2) - csum += udp_pkt[cnt >> 1]; + u32 *ptr = (u32 *)dest, i; - return csum_tcpudp_magic(saddr, daddr, len, proto, csum); + start /= sizeof(*ptr); + size /= sizeof(*ptr); + for (i = 0; i < size; i++) + ptr[i] = htonl(pkt_nb << 16 | (i + start)); } static void gen_eth_hdr(struct ifobject *ifobject, struct ethhdr *eth_hdr) { memcpy(eth_hdr->h_dest, ifobject->dst_mac, ETH_ALEN); memcpy(eth_hdr->h_source, ifobject->src_mac, ETH_ALEN); - eth_hdr->h_proto = htons(ETH_P_IP); -} - -static void gen_ip_hdr(struct ifobject *ifobject, struct iphdr *ip_hdr) -{ - ip_hdr->version = IP_PKT_VER; - ip_hdr->ihl = 0x5; - ip_hdr->tos = IP_PKT_TOS; - ip_hdr->tot_len = htons(IP_PKT_SIZE); - ip_hdr->id = 0; - ip_hdr->frag_off = 0; - ip_hdr->ttl = IPDEFTTL; - ip_hdr->protocol = IPPROTO_UDP; - ip_hdr->saddr = ifobject->src_ip; - ip_hdr->daddr = ifobject->dst_ip; - ip_hdr->check = 0; -} - -static void gen_udp_hdr(u32 payload, void *pkt, struct ifobject *ifobject, - struct udphdr *udp_hdr) -{ - udp_hdr->source = htons(ifobject->src_port); - udp_hdr->dest = htons(ifobject->dst_port); - udp_hdr->len = htons(UDP_PKT_SIZE); - memset32_htonl(pkt + PKT_HDR_SIZE, payload, UDP_PKT_DATA_SIZE); + eth_hdr->h_proto = htons(ETH_P_LOOPBACK); } static bool is_umem_valid(struct ifobject *ifobj) @@ -260,19 +164,18 @@ static bool is_umem_valid(struct ifobject *ifobj) return !!ifobj->umem->umem; } -static void gen_udp_csum(struct udphdr *udp_hdr, struct iphdr *ip_hdr) +static u32 mode_to_xdp_flags(enum test_mode mode) { - udp_hdr->check = 0; - udp_hdr->check = - udp_csum(ip_hdr->saddr, ip_hdr->daddr, UDP_PKT_SIZE, IPPROTO_UDP, (u16 *)udp_hdr); + return (mode == TEST_MODE_SKB) ? XDP_FLAGS_SKB_MODE : XDP_FLAGS_DRV_MODE; } -static u32 mode_to_xdp_flags(enum test_mode mode) +static u64 umem_size(struct xsk_umem_info *umem) { - return (mode == TEST_MODE_SKB) ? XDP_FLAGS_SKB_MODE : XDP_FLAGS_DRV_MODE; + return umem->num_frames * umem->frame_size; } -static int xsk_configure_umem(struct xsk_umem_info *umem, void *buffer, u64 size) +static int xsk_configure_umem(struct ifobject *ifobj, struct xsk_umem_info *umem, void *buffer, + u64 size) { struct xsk_umem_config cfg = { .fill_size = XSK_RING_PROD__DEFAULT_NUM_DESCS, @@ -292,9 +195,31 @@ static int xsk_configure_umem(struct xsk_umem_info *umem, void *buffer, u64 size return ret; umem->buffer = buffer; + if (ifobj->shared_umem && ifobj->rx_on) { + umem->base_addr = umem_size(umem); + umem->next_buffer = umem_size(umem); + } + return 0; } +static u64 umem_alloc_buffer(struct xsk_umem_info *umem) +{ + u64 addr; + + addr = umem->next_buffer; + umem->next_buffer += umem->frame_size; + if (umem->next_buffer >= umem->base_addr + umem_size(umem)) + umem->next_buffer = umem->base_addr; + + return addr; +} + +static void umem_reset_alloc(struct xsk_umem_info *umem) +{ + umem->next_buffer = 0; +} + static void enable_busy_poll(struct xsk_socket_info *xsk) { int sock_opt; @@ -354,7 +279,7 @@ static bool ifobj_zc_avail(struct ifobject *ifobject) exit_with_error(ENOMEM); } umem->frame_size = XSK_UMEM__DEFAULT_FRAME_SIZE; - ret = xsk_configure_umem(umem, bufs, umem_sz); + ret = xsk_configure_umem(ifobject, umem, bufs, umem_sz); if (ret) exit_with_error(-ret); @@ -380,7 +305,6 @@ out: static struct option long_options[] = { {"interface", required_argument, 0, 'i'}, {"busy-poll", no_argument, 0, 'b'}, - {"dump-pkts", no_argument, 0, 'D'}, {"verbose", no_argument, 0, 'v'}, {0, 0, 0, 0} }; @@ -391,7 +315,6 @@ static void usage(const char *prog) " Usage: %s [OPTIONS]\n" " Options:\n" " -i, --interface Use interface\n" - " -D, --dump-pkts Dump packets L2 - L5\n" " -v, --verbose Verbose output\n" " -b, --busy-poll Enable busy poll\n"; @@ -415,7 +338,7 @@ static void parse_command_line(struct ifobject *ifobj_tx, struct ifobject *ifobj opterr = 0; for (;;) { - c = getopt_long(argc, argv, "i:Dvb", long_options, &option_index); + c = getopt_long(argc, argv, "i:vb", long_options, &option_index); if (c == -1) break; @@ -437,9 +360,6 @@ static void parse_command_line(struct ifobject *ifobj_tx, struct ifobject *ifobj interface_nb++; break; - case 'D': - opt_pkt_dump = true; - break; case 'v': opt_verbose = true; break; @@ -482,9 +402,6 @@ static void __test_spec_init(struct test_spec *test, struct ifobject *ifobj_tx, memset(ifobj->umem, 0, sizeof(*ifobj->umem)); ifobj->umem->num_frames = DEFAULT_UMEM_BUFFERS; ifobj->umem->frame_size = XSK_UMEM__DEFAULT_FRAME_SIZE; - if (ifobj->shared_umem && ifobj->rx_on) - ifobj->umem->base_addr = DEFAULT_UMEM_BUFFERS * - XSK_UMEM__DEFAULT_FRAME_SIZE; for (j = 0; j < MAX_SOCKETS; j++) { memset(&ifobj->xsk_arr[j], 0, sizeof(ifobj->xsk_arr[j])); @@ -554,24 +471,24 @@ static void test_spec_set_xdp_prog(struct test_spec *test, struct bpf_program *x static void pkt_stream_reset(struct pkt_stream *pkt_stream) { if (pkt_stream) - pkt_stream->rx_pkt_nb = 0; + pkt_stream->current_pkt_nb = 0; } -static struct pkt *pkt_stream_get_pkt(struct pkt_stream *pkt_stream, u32 pkt_nb) +static struct pkt *pkt_stream_get_next_tx_pkt(struct pkt_stream *pkt_stream) { - if (pkt_nb >= pkt_stream->nb_pkts) + if (pkt_stream->current_pkt_nb >= pkt_stream->nb_pkts) return NULL; - return &pkt_stream->pkts[pkt_nb]; + return &pkt_stream->pkts[pkt_stream->current_pkt_nb++]; } static struct pkt *pkt_stream_get_next_rx_pkt(struct pkt_stream *pkt_stream, u32 *pkts_sent) { - while (pkt_stream->rx_pkt_nb < pkt_stream->nb_pkts) { + while (pkt_stream->current_pkt_nb < pkt_stream->nb_pkts) { (*pkts_sent)++; - if (pkt_stream->pkts[pkt_stream->rx_pkt_nb].valid) - return &pkt_stream->pkts[pkt_stream->rx_pkt_nb++]; - pkt_stream->rx_pkt_nb++; + if (pkt_stream->pkts[pkt_stream->current_pkt_nb].valid) + return &pkt_stream->pkts[pkt_stream->current_pkt_nb++]; + pkt_stream->current_pkt_nb++; } return NULL; } @@ -616,9 +533,21 @@ static struct pkt_stream *__pkt_stream_alloc(u32 nb_pkts) return pkt_stream; } -static void pkt_set(struct xsk_umem_info *umem, struct pkt *pkt, u64 addr, u32 len) +static u32 ceil_u32(u32 a, u32 b) +{ + return (a + b - 1) / b; +} + +static u32 pkt_nb_frags(u32 frame_size, struct pkt *pkt) +{ + if (!pkt || !pkt->valid) + return 1; + return ceil_u32(pkt->len, frame_size); +} + +static void pkt_set(struct xsk_umem_info *umem, struct pkt *pkt, int offset, u32 len) { - pkt->addr = addr + umem->base_addr; + pkt->offset = offset; pkt->len = len; if (len > umem->frame_size - XDP_PACKET_HEADROOM - MIN_PKT_SIZE * 2 - umem->frame_headroom) pkt->valid = false; @@ -626,6 +555,11 @@ static void pkt_set(struct xsk_umem_info *umem, struct pkt *pkt, u64 addr, u32 l pkt->valid = true; } +static u32 pkt_get_buffer_len(struct xsk_umem_info *umem, u32 len) +{ + return ceil_u32(len, umem->frame_size) * umem->frame_size; +} + static struct pkt_stream *pkt_stream_generate(struct xsk_umem_info *umem, u32 nb_pkts, u32 pkt_len) { struct pkt_stream *pkt_stream; @@ -635,10 +569,13 @@ static struct pkt_stream *pkt_stream_generate(struct xsk_umem_info *umem, u32 nb if (!pkt_stream) exit_with_error(ENOMEM); + pkt_stream->nb_pkts = nb_pkts; + pkt_stream->max_pkt_len = pkt_len; for (i = 0; i < nb_pkts; i++) { - pkt_set(umem, &pkt_stream->pkts[i], (i % umem->num_frames) * umem->frame_size, - pkt_len); - pkt_stream->pkts[i].payload = i; + struct pkt *pkt = &pkt_stream->pkts[i]; + + pkt_set(umem, pkt, 0, pkt_len); + pkt->pkt_nb = i; } return pkt_stream; @@ -669,8 +606,7 @@ static void __pkt_stream_replace_half(struct ifobject *ifobj, u32 pkt_len, pkt_stream = pkt_stream_clone(umem, ifobj->pkt_stream); for (i = 1; i < ifobj->pkt_stream->nb_pkts; i += 2) - pkt_set(umem, &pkt_stream->pkts[i], - (i % umem->num_frames) * umem->frame_size + offset, pkt_len); + pkt_set(umem, &pkt_stream->pkts[i], offset, pkt_len); ifobj->pkt_stream = pkt_stream; } @@ -694,30 +630,31 @@ static void pkt_stream_receive_half(struct test_spec *test) pkt_stream->pkts[i].valid = false; } -static struct pkt *pkt_generate(struct ifobject *ifobject, u32 pkt_nb) +static u64 pkt_get_addr(struct pkt *pkt, struct xsk_umem_info *umem) { - struct pkt *pkt = pkt_stream_get_pkt(ifobject->pkt_stream, pkt_nb); - struct udphdr *udp_hdr; - struct ethhdr *eth_hdr; - struct iphdr *ip_hdr; - void *data; + if (!pkt->valid) + return pkt->offset; + return pkt->offset + umem_alloc_buffer(umem); +} - if (!pkt) - return NULL; - if (!pkt->valid || pkt->len < MIN_PKT_SIZE) - return pkt; +static void pkt_generate(struct ifobject *ifobject, u64 addr, u32 len, u32 pkt_nb, + u32 bytes_written) +{ + void *data = xsk_umem__get_data(ifobject->umem->buffer, addr); - data = xsk_umem__get_data(ifobject->umem->buffer, pkt->addr); - udp_hdr = (struct udphdr *)(data + sizeof(struct ethhdr) + sizeof(struct iphdr)); - ip_hdr = (struct iphdr *)(data + sizeof(struct ethhdr)); - eth_hdr = (struct ethhdr *)data; + if (len < MIN_PKT_SIZE) + return; - gen_udp_hdr(pkt_nb, data, ifobject, udp_hdr); - gen_ip_hdr(ifobject, ip_hdr); - gen_udp_csum(udp_hdr, ip_hdr); - gen_eth_hdr(ifobject, eth_hdr); + if (!bytes_written) { + gen_eth_hdr(ifobject, data); - return pkt; + len -= PKT_HDR_SIZE; + data += PKT_HDR_SIZE; + } else { + bytes_written -= PKT_HDR_SIZE; + } + + write_payload(data, pkt_nb, bytes_written, len); } static void __pkt_stream_generate_custom(struct ifobject *ifobj, @@ -731,10 +668,14 @@ static void __pkt_stream_generate_custom(struct ifobject *ifobj, exit_with_error(ENOMEM); for (i = 0; i < nb_pkts; i++) { - pkt_stream->pkts[i].addr = pkts[i].addr + ifobj->umem->base_addr; - pkt_stream->pkts[i].len = pkts[i].len; - pkt_stream->pkts[i].payload = i; - pkt_stream->pkts[i].valid = pkts[i].valid; + struct pkt *pkt = &pkt_stream->pkts[i]; + + pkt->offset = pkts[i].offset; + pkt->len = pkts[i].len; + pkt->pkt_nb = i; + pkt->valid = pkts[i].valid; + if (pkt->len > pkt_stream->max_pkt_len) + pkt_stream->max_pkt_len = pkt->len; } ifobj->pkt_stream = pkt_stream; @@ -746,53 +687,62 @@ static void pkt_stream_generate_custom(struct test_spec *test, struct pkt *pkts, __pkt_stream_generate_custom(test->ifobj_rx, pkts, nb_pkts); } -static void pkt_dump(void *pkt, u32 len) -{ - char s[INET_ADDRSTRLEN]; - struct ethhdr *ethhdr; - struct udphdr *udphdr; - struct iphdr *iphdr; - u32 payload, i; - - ethhdr = pkt; - iphdr = pkt + sizeof(*ethhdr); - udphdr = pkt + sizeof(*ethhdr) + sizeof(*iphdr); - - /*extract L2 frame */ - fprintf(stdout, "DEBUG>> L2: dst mac: "); - for (i = 0; i < ETH_ALEN; i++) - fprintf(stdout, "%02X", ethhdr->h_dest[i]); - - fprintf(stdout, "\nDEBUG>> L2: src mac: "); - for (i = 0; i < ETH_ALEN; i++) - fprintf(stdout, "%02X", ethhdr->h_source[i]); - - /*extract L3 frame */ - fprintf(stdout, "\nDEBUG>> L3: ip_hdr->ihl: %02X\n", iphdr->ihl); - fprintf(stdout, "DEBUG>> L3: ip_hdr->saddr: %s\n", - inet_ntop(AF_INET, &iphdr->saddr, s, sizeof(s))); - fprintf(stdout, "DEBUG>> L3: ip_hdr->daddr: %s\n", - inet_ntop(AF_INET, &iphdr->daddr, s, sizeof(s))); - /*extract L4 frame */ - fprintf(stdout, "DEBUG>> L4: udp_hdr->src: %d\n", ntohs(udphdr->source)); - fprintf(stdout, "DEBUG>> L4: udp_hdr->dst: %d\n", ntohs(udphdr->dest)); - /*extract L5 frame */ - payload = ntohl(*((u32 *)(pkt + PKT_HDR_SIZE))); +static void pkt_print_data(u32 *data, u32 cnt) +{ + u32 i; + + for (i = 0; i < cnt; i++) { + u32 seqnum, pkt_nb; - fprintf(stdout, "DEBUG>> L5: payload: %d\n", payload); - fprintf(stdout, "---------------------------------------\n"); + seqnum = ntohl(*data) & 0xffff; + pkt_nb = ntohl(*data) >> 16; + fprintf(stdout, "%u:%u ", pkt_nb, seqnum); + data++; + } } -static bool is_offset_correct(struct xsk_umem_info *umem, struct pkt_stream *pkt_stream, u64 addr, - u64 pkt_stream_addr) +static void pkt_dump(void *pkt, u32 len, bool eth_header) +{ + struct ethhdr *ethhdr = pkt; + u32 i, *data; + + if (eth_header) { + /*extract L2 frame */ + fprintf(stdout, "DEBUG>> L2: dst mac: "); + for (i = 0; i < ETH_ALEN; i++) + fprintf(stdout, "%02X", ethhdr->h_dest[i]); + + fprintf(stdout, "\nDEBUG>> L2: src mac: "); + for (i = 0; i < ETH_ALEN; i++) + fprintf(stdout, "%02X", ethhdr->h_source[i]); + + data = pkt + PKT_HDR_SIZE; + } else { + data = pkt; + } + + /*extract L5 frame */ + fprintf(stdout, "\nDEBUG>> L5: seqnum: "); + pkt_print_data(data, PKT_DUMP_NB_TO_PRINT); + fprintf(stdout, "...."); + if (len > PKT_DUMP_NB_TO_PRINT * sizeof(u32)) { + fprintf(stdout, "\n.... "); + pkt_print_data(data + len / sizeof(u32) - PKT_DUMP_NB_TO_PRINT, + PKT_DUMP_NB_TO_PRINT); + } + fprintf(stdout, "\n---------------------------------------\n"); +} + +static bool is_offset_correct(struct xsk_umem_info *umem, struct pkt *pkt, u64 addr) { u32 headroom = umem->unaligned_mode ? 0 : umem->frame_headroom; - u32 offset = addr % umem->frame_size, expected_offset = 0; + u32 offset = addr % umem->frame_size, expected_offset; + int pkt_offset = pkt->valid ? pkt->offset : 0; - if (!pkt_stream->use_addr_for_fill) - pkt_stream_addr = 0; + if (!umem->unaligned_mode) + pkt_offset = 0; - expected_offset += (pkt_stream_addr + headroom + XDP_PACKET_HEADROOM) % umem->frame_size; + expected_offset = (pkt_offset + headroom + XDP_PACKET_HEADROOM) % umem->frame_size; if (offset == expected_offset) return true; @@ -806,9 +756,9 @@ static bool is_metadata_correct(struct pkt *pkt, void *buffer, u64 addr) void *data = xsk_umem__get_data(buffer, addr); struct xdp_info *meta = data - sizeof(struct xdp_info); - if (meta->count != pkt->payload) { + if (meta->count != pkt->pkt_nb) { ksft_print_msg("[%s] expected meta_count [%d], got meta_count [%d]\n", - __func__, pkt->payload, meta->count); + __func__, pkt->pkt_nb, meta->count); return false; } @@ -818,11 +768,11 @@ static bool is_metadata_correct(struct pkt *pkt, void *buffer, u64 addr) static bool is_pkt_valid(struct pkt *pkt, void *buffer, u64 addr, u32 len) { void *data = xsk_umem__get_data(buffer, addr); - struct iphdr *iphdr = (struct iphdr *)(data + sizeof(struct ethhdr)); + u32 seqnum, pkt_data; if (!pkt) { ksft_print_msg("[%s] too many packets received\n", __func__); - return false; + goto error; } if (len < MIN_PKT_SIZE || pkt->len < MIN_PKT_SIZE) { @@ -833,28 +783,23 @@ static bool is_pkt_valid(struct pkt *pkt, void *buffer, u64 addr, u32 len) if (pkt->len != len) { ksft_print_msg("[%s] expected length [%d], got length [%d]\n", __func__, pkt->len, len); - return false; + goto error; } - if (iphdr->version == IP_PKT_VER && iphdr->tos == IP_PKT_TOS) { - u32 seqnum = ntohl(*((u32 *)(data + PKT_HDR_SIZE))); - - if (opt_pkt_dump) - pkt_dump(data, PKT_SIZE); + pkt_data = ntohl(*((u32 *)(data + PKT_HDR_SIZE))); + seqnum = pkt_data >> 16; - if (pkt->payload != seqnum) { - ksft_print_msg("[%s] expected seqnum [%d], got seqnum [%d]\n", - __func__, pkt->payload, seqnum); - return false; - } - } else { - ksft_print_msg("Invalid frame received: "); - ksft_print_msg("[IP_PKT_VER: %02X], [IP_PKT_TOS: %02X]\n", iphdr->version, - iphdr->tos); - return false; + if (pkt->pkt_nb != seqnum) { + ksft_print_msg("[%s] expected seqnum [%d], got seqnum [%d]\n", + __func__, pkt->pkt_nb, seqnum); + goto error; } return true; + +error: + pkt_dump(data, len, true); + return false; } static void kick_tx(struct xsk_socket_info *xsk) @@ -976,7 +921,7 @@ static int receive_pkts(struct test_spec *test, struct pollfd *fds) addr = xsk_umem__add_offset_to_addr(addr); if (!is_pkt_valid(pkt, umem->buffer, addr, desc->len) || - !is_offset_correct(umem, pkt_stream, addr, pkt->addr) || + !is_offset_correct(umem, pkt, addr) || (ifobj->use_metadata && !is_metadata_correct(pkt, umem->buffer, addr))) return TEST_FAILURE; @@ -992,8 +937,6 @@ static int receive_pkts(struct test_spec *test, struct pollfd *fds) pthread_mutex_lock(&pacing_mutex); pkts_in_flight -= pkts_sent; - if (pkts_in_flight < umem->num_frames) - pthread_cond_signal(&pacing_cond); pthread_mutex_unlock(&pacing_mutex); pkts_sent = 0; } @@ -1001,14 +944,21 @@ static int receive_pkts(struct test_spec *test, struct pollfd *fds) return TEST_PASS; } -static int __send_pkts(struct ifobject *ifobject, u32 *pkt_nb, struct pollfd *fds, - bool timeout) +static int __send_pkts(struct ifobject *ifobject, struct pollfd *fds, bool timeout) { struct xsk_socket_info *xsk = ifobject->xsk; + struct xsk_umem_info *umem = ifobject->umem; + u32 i, idx = 0, valid_pkts = 0, buffer_len; bool use_poll = ifobject->use_poll; - u32 i, idx = 0, valid_pkts = 0; int ret; + buffer_len = pkt_get_buffer_len(umem, ifobject->pkt_stream->max_pkt_len); + /* pkts_in_flight might be negative if many invalid packets are sent */ + if (pkts_in_flight >= (int)((umem_size(umem) - BATCH_SIZE * buffer_len) / buffer_len)) { + kick_tx(xsk); + return TEST_CONTINUE; + } + while (xsk_ring_prod__reserve(&xsk->tx, BATCH_SIZE, &idx) < BATCH_SIZE) { if (use_poll) { ret = poll(fds, 1, POLL_TMOUT); @@ -1034,25 +984,21 @@ static int __send_pkts(struct ifobject *ifobject, u32 *pkt_nb, struct pollfd *fd for (i = 0; i < BATCH_SIZE; i++) { struct xdp_desc *tx_desc = xsk_ring_prod__tx_desc(&xsk->tx, idx + i); - struct pkt *pkt = pkt_generate(ifobject, *pkt_nb); + struct pkt *pkt = pkt_stream_get_next_tx_pkt(ifobject->pkt_stream); if (!pkt) break; - tx_desc->addr = pkt->addr; + tx_desc->addr = pkt_get_addr(pkt, umem); tx_desc->len = pkt->len; - (*pkt_nb)++; - if (pkt->valid) + if (pkt->valid) { valid_pkts++; + pkt_generate(ifobject, tx_desc->addr, tx_desc->len, pkt->pkt_nb, 0); + } } pthread_mutex_lock(&pacing_mutex); pkts_in_flight += valid_pkts; - /* pkts_in_flight might be negative if many invalid packets are sent */ - if (pkts_in_flight >= (int)(ifobject->umem->num_frames - BATCH_SIZE)) { - kick_tx(xsk); - pthread_cond_wait(&pacing_cond, &pacing_mutex); - } pthread_mutex_unlock(&pacing_mutex); xsk_ring_prod__submit(&xsk->tx, i); @@ -1088,18 +1034,21 @@ static void wait_for_tx_completion(struct xsk_socket_info *xsk) static int send_pkts(struct test_spec *test, struct ifobject *ifobject) { + struct pkt_stream *pkt_stream = ifobject->pkt_stream; bool timeout = !is_umem_valid(test->ifobj_rx); struct pollfd fds = { }; - u32 pkt_cnt = 0, ret; + u32 ret; fds.fd = xsk_socket__fd(ifobject->xsk->xsk); fds.events = POLLOUT; - while (pkt_cnt < ifobject->pkt_stream->nb_pkts) { - ret = __send_pkts(ifobject, &pkt_cnt, &fds, timeout); + while (pkt_stream->current_pkt_nb < pkt_stream->nb_pkts) { + ret = __send_pkts(ifobject, &fds, timeout); + if (ret == TEST_CONTINUE && !test->fail) + continue; if ((ret || test->fail) && !timeout) return TEST_FAILURE; - else if (ret == TEST_PASS && timeout) + if (ret == TEST_PASS && timeout) return ret; } @@ -1249,11 +1198,14 @@ static void thread_common_ops_tx(struct test_spec *test, struct ifobject *ifobje ifobject->xsk = &ifobject->xsk_arr[0]; ifobject->xskmap = test->ifobj_rx->xskmap; memcpy(ifobject->umem, test->ifobj_rx->umem, sizeof(struct xsk_umem_info)); + ifobject->umem->base_addr = 0; } -static void xsk_populate_fill_ring(struct xsk_umem_info *umem, struct pkt_stream *pkt_stream) +static void xsk_populate_fill_ring(struct xsk_umem_info *umem, struct pkt_stream *pkt_stream, + bool fill_up) { - u32 idx = 0, i, buffers_to_fill; + u32 rx_frame_size = umem->frame_size - XDP_PACKET_HEADROOM; + u32 idx = 0, filled = 0, buffers_to_fill, nb_pkts; int ret; if (umem->num_frames < XSK_RING_PROD__DEFAULT_NUM_DESCS) @@ -1264,22 +1216,33 @@ static void xsk_populate_fill_ring(struct xsk_umem_info *umem, struct pkt_stream ret = xsk_ring_prod__reserve(&umem->fq, buffers_to_fill, &idx); if (ret != buffers_to_fill) exit_with_error(ENOSPC); - for (i = 0; i < buffers_to_fill; i++) { - u64 addr; - if (pkt_stream->use_addr_for_fill) { - struct pkt *pkt = pkt_stream_get_pkt(pkt_stream, i); + while (filled < buffers_to_fill) { + struct pkt *pkt = pkt_stream_get_next_rx_pkt(pkt_stream, &nb_pkts); + u64 addr; + u32 i; + + for (i = 0; i < pkt_nb_frags(rx_frame_size, pkt); i++) { + if (!pkt) { + if (!fill_up) + break; + addr = filled * umem->frame_size + umem->base_addr; + } else if (pkt->offset >= 0) { + addr = pkt->offset % umem->frame_size + umem_alloc_buffer(umem); + } else { + addr = pkt->offset + umem_alloc_buffer(umem); + } - if (!pkt) + *xsk_ring_prod__fill_addr(&umem->fq, idx++) = addr; + if (++filled >= buffers_to_fill) break; - addr = pkt->addr; - } else { - addr = i * umem->frame_size; } - - *xsk_ring_prod__fill_addr(&umem->fq, idx++) = addr; } - xsk_ring_prod__submit(&umem->fq, i); + xsk_ring_prod__submit(&umem->fq, filled); + xsk_ring_prod__cancel(&umem->fq, buffers_to_fill - filled); + + pkt_stream_reset(pkt_stream); + umem_reset_alloc(umem); } static void thread_common_ops(struct test_spec *test, struct ifobject *ifobject) @@ -1300,12 +1263,10 @@ static void thread_common_ops(struct test_spec *test, struct ifobject *ifobject) if (bufs == MAP_FAILED) exit_with_error(errno); - ret = xsk_configure_umem(ifobject->umem, bufs, umem_sz); + ret = xsk_configure_umem(ifobject, ifobject->umem, bufs, umem_sz); if (ret) exit_with_error(-ret); - xsk_populate_fill_ring(ifobject->umem, ifobject->pkt_stream); - xsk_configure_socket(test, ifobject, ifobject->umem, false); ifobject->xsk = &ifobject->xsk_arr[0]; @@ -1313,6 +1274,8 @@ static void thread_common_ops(struct test_spec *test, struct ifobject *ifobject) if (!ifobject->rx_on) return; + xsk_populate_fill_ring(ifobject->umem, ifobject->pkt_stream, ifobject->use_fill_ring); + ret = xsk_update_xskmap(ifobject->xskmap, ifobject->xsk->xsk); if (ret) exit_with_error(errno); @@ -1370,12 +1333,8 @@ static void *worker_testapp_validate_rx(void *arg) if (!err && ifobject->validation_func) err = ifobject->validation_func(ifobject); - if (err) { + if (err) report_failure(test); - pthread_mutex_lock(&pacing_mutex); - pthread_cond_signal(&pacing_cond); - pthread_mutex_unlock(&pacing_mutex); - } pthread_exit(NULL); } @@ -1402,11 +1361,20 @@ static void handler(int signum) pthread_exit(NULL); } -static bool xdp_prog_changed(struct test_spec *test, struct ifobject *ifobj) +static bool xdp_prog_changed_rx(struct test_spec *test) { + struct ifobject *ifobj = test->ifobj_rx; + return ifobj->xdp_prog != test->xdp_prog_rx || ifobj->mode != test->mode; } +static bool xdp_prog_changed_tx(struct test_spec *test) +{ + struct ifobject *ifobj = test->ifobj_tx; + + return ifobj->xdp_prog != test->xdp_prog_tx || ifobj->mode != test->mode; +} + static void xsk_reattach_xdp(struct ifobject *ifobj, struct bpf_program *xdp_prog, struct bpf_map *xskmap, enum test_mode mode) { @@ -1433,13 +1401,13 @@ static void xsk_reattach_xdp(struct ifobject *ifobj, struct bpf_program *xdp_pro static void xsk_attach_xdp_progs(struct test_spec *test, struct ifobject *ifobj_rx, struct ifobject *ifobj_tx) { - if (xdp_prog_changed(test, ifobj_rx)) + if (xdp_prog_changed_rx(test)) xsk_reattach_xdp(ifobj_rx, test->xdp_prog_rx, test->xskmap_rx, test->mode); if (!ifobj_tx || ifobj_tx->shared_umem) return; - if (xdp_prog_changed(test, ifobj_tx)) + if (xdp_prog_changed_tx(test)) xsk_reattach_xdp(ifobj_tx, test->xdp_prog_tx, test->xskmap_tx, test->mode); } @@ -1448,9 +1416,11 @@ static int __testapp_validate_traffic(struct test_spec *test, struct ifobject *i { pthread_t t0, t1; - if (ifobj2) + if (ifobj2) { if (pthread_barrier_init(&barr, NULL, 2)) exit_with_error(errno); + pkt_stream_reset(ifobj2->pkt_stream); + } test->current_step++; pkt_stream_reset(ifobj1->pkt_stream); @@ -1493,6 +1463,12 @@ static int testapp_validate_traffic(struct test_spec *test) struct ifobject *ifobj_rx = test->ifobj_rx; struct ifobject *ifobj_tx = test->ifobj_tx; + if ((ifobj_rx->umem->unaligned_mode && !ifobj_rx->unaligned_supp) || + (ifobj_tx->umem->unaligned_mode && !ifobj_tx->unaligned_supp)) { + ksft_test_result_skip("No huge pages present.\n"); + return TEST_SKIP; + } + xsk_attach_xdp_progs(test, ifobj_rx, ifobj_tx); return __testapp_validate_traffic(test, ifobj_rx, ifobj_tx); } @@ -1502,16 +1478,18 @@ static int testapp_validate_traffic_single_thread(struct test_spec *test, struct return __testapp_validate_traffic(test, ifobj, NULL); } -static void testapp_teardown(struct test_spec *test) +static int testapp_teardown(struct test_spec *test) { int i; test_spec_set_name(test, "TEARDOWN"); for (i = 0; i < MAX_TEARDOWN_ITER; i++) { if (testapp_validate_traffic(test)) - return; + return TEST_FAILURE; test_spec_reset(test); } + + return TEST_PASS; } static void swap_directions(struct ifobject **ifobj1, struct ifobject **ifobj2) @@ -1526,20 +1504,23 @@ static void swap_directions(struct ifobject **ifobj1, struct ifobject **ifobj2) *ifobj2 = tmp_ifobj; } -static void testapp_bidi(struct test_spec *test) +static int testapp_bidi(struct test_spec *test) { + int res; + test_spec_set_name(test, "BIDIRECTIONAL"); test->ifobj_tx->rx_on = true; test->ifobj_rx->tx_on = true; test->total_steps = 2; if (testapp_validate_traffic(test)) - return; + return TEST_FAILURE; print_verbose("Switching Tx/Rx vectors\n"); swap_directions(&test->ifobj_rx, &test->ifobj_tx); - __testapp_validate_traffic(test, test->ifobj_rx, test->ifobj_tx); + res = __testapp_validate_traffic(test, test->ifobj_rx, test->ifobj_tx); swap_directions(&test->ifobj_rx, &test->ifobj_tx); + return res; } static void swap_xsk_resources(struct ifobject *ifobj_tx, struct ifobject *ifobj_rx) @@ -1556,160 +1537,139 @@ static void swap_xsk_resources(struct ifobject *ifobj_tx, struct ifobject *ifobj exit_with_error(errno); } -static void testapp_bpf_res(struct test_spec *test) +static int testapp_bpf_res(struct test_spec *test) { test_spec_set_name(test, "BPF_RES"); test->total_steps = 2; test->nb_sockets = 2; if (testapp_validate_traffic(test)) - return; + return TEST_FAILURE; swap_xsk_resources(test->ifobj_tx, test->ifobj_rx); - testapp_validate_traffic(test); + return testapp_validate_traffic(test); } -static void testapp_headroom(struct test_spec *test) +static int testapp_headroom(struct test_spec *test) { test_spec_set_name(test, "UMEM_HEADROOM"); test->ifobj_rx->umem->frame_headroom = UMEM_HEADROOM_TEST_SIZE; - testapp_validate_traffic(test); + return testapp_validate_traffic(test); } -static void testapp_stats_rx_dropped(struct test_spec *test) +static int testapp_stats_rx_dropped(struct test_spec *test) { test_spec_set_name(test, "STAT_RX_DROPPED"); + if (test->mode == TEST_MODE_ZC) { + ksft_test_result_skip("Can not run RX_DROPPED test for ZC mode\n"); + return TEST_SKIP; + } + pkt_stream_replace_half(test, MIN_PKT_SIZE * 4, 0); test->ifobj_rx->umem->frame_headroom = test->ifobj_rx->umem->frame_size - XDP_PACKET_HEADROOM - MIN_PKT_SIZE * 3; pkt_stream_receive_half(test); test->ifobj_rx->validation_func = validate_rx_dropped; - testapp_validate_traffic(test); + return testapp_validate_traffic(test); } -static void testapp_stats_tx_invalid_descs(struct test_spec *test) +static int testapp_stats_tx_invalid_descs(struct test_spec *test) { test_spec_set_name(test, "STAT_TX_INVALID"); pkt_stream_replace_half(test, XSK_UMEM__INVALID_FRAME_SIZE, 0); test->ifobj_tx->validation_func = validate_tx_invalid_descs; - testapp_validate_traffic(test); + return testapp_validate_traffic(test); } -static void testapp_stats_rx_full(struct test_spec *test) +static int testapp_stats_rx_full(struct test_spec *test) { test_spec_set_name(test, "STAT_RX_FULL"); - pkt_stream_replace(test, DEFAULT_UMEM_BUFFERS + DEFAULT_UMEM_BUFFERS / 2, PKT_SIZE); + pkt_stream_replace(test, DEFAULT_UMEM_BUFFERS + DEFAULT_UMEM_BUFFERS / 2, MIN_PKT_SIZE); test->ifobj_rx->pkt_stream = pkt_stream_generate(test->ifobj_rx->umem, - DEFAULT_UMEM_BUFFERS, PKT_SIZE); - if (!test->ifobj_rx->pkt_stream) - exit_with_error(ENOMEM); + DEFAULT_UMEM_BUFFERS, MIN_PKT_SIZE); test->ifobj_rx->xsk->rxqsize = DEFAULT_UMEM_BUFFERS; test->ifobj_rx->release_rx = false; test->ifobj_rx->validation_func = validate_rx_full; - testapp_validate_traffic(test); + return testapp_validate_traffic(test); } -static void testapp_stats_fill_empty(struct test_spec *test) +static int testapp_stats_fill_empty(struct test_spec *test) { test_spec_set_name(test, "STAT_RX_FILL_EMPTY"); - pkt_stream_replace(test, DEFAULT_UMEM_BUFFERS + DEFAULT_UMEM_BUFFERS / 2, PKT_SIZE); + pkt_stream_replace(test, DEFAULT_UMEM_BUFFERS + DEFAULT_UMEM_BUFFERS / 2, MIN_PKT_SIZE); test->ifobj_rx->pkt_stream = pkt_stream_generate(test->ifobj_rx->umem, - DEFAULT_UMEM_BUFFERS, PKT_SIZE); - if (!test->ifobj_rx->pkt_stream) - exit_with_error(ENOMEM); + DEFAULT_UMEM_BUFFERS, MIN_PKT_SIZE); test->ifobj_rx->use_fill_ring = false; test->ifobj_rx->validation_func = validate_fill_empty; - testapp_validate_traffic(test); -} - -/* Simple test */ -static bool hugepages_present(struct ifobject *ifobject) -{ - size_t mmap_sz = 2 * ifobject->umem->num_frames * ifobject->umem->frame_size; - void *bufs; - - bufs = mmap(NULL, mmap_sz, PROT_READ | PROT_WRITE, - MAP_PRIVATE | MAP_ANONYMOUS | MAP_HUGETLB | MAP_HUGE_2MB, -1, 0); - if (bufs == MAP_FAILED) - return false; - - mmap_sz = ceil_u64(mmap_sz, HUGEPAGE_SIZE) * HUGEPAGE_SIZE; - munmap(bufs, mmap_sz); - return true; + return testapp_validate_traffic(test); } -static bool testapp_unaligned(struct test_spec *test) +static int testapp_unaligned(struct test_spec *test) { - if (!hugepages_present(test->ifobj_tx)) { - ksft_test_result_skip("No 2M huge pages present.\n"); - return false; - } - test_spec_set_name(test, "UNALIGNED_MODE"); test->ifobj_tx->umem->unaligned_mode = true; test->ifobj_rx->umem->unaligned_mode = true; - /* Let half of the packets straddle a buffer boundrary */ - pkt_stream_replace_half(test, PKT_SIZE, -PKT_SIZE / 2); - test->ifobj_rx->pkt_stream->use_addr_for_fill = true; - testapp_validate_traffic(test); + /* Let half of the packets straddle a 4K buffer boundary */ + pkt_stream_replace_half(test, MIN_PKT_SIZE, -MIN_PKT_SIZE / 2); - return true; + return testapp_validate_traffic(test); } -static void testapp_single_pkt(struct test_spec *test) +static int testapp_single_pkt(struct test_spec *test) { - struct pkt pkts[] = {{0x1000, PKT_SIZE, 0, true}}; + struct pkt pkts[] = {{0, MIN_PKT_SIZE, 0, true}}; pkt_stream_generate_custom(test, pkts, ARRAY_SIZE(pkts)); - testapp_validate_traffic(test); + return testapp_validate_traffic(test); } -static void testapp_invalid_desc(struct test_spec *test) +static int testapp_invalid_desc(struct test_spec *test) { - u64 umem_size = test->ifobj_tx->umem->num_frames * test->ifobj_tx->umem->frame_size; + struct xsk_umem_info *umem = test->ifobj_tx->umem; + u64 umem_size = umem->num_frames * umem->frame_size; struct pkt pkts[] = { /* Zero packet address allowed */ - {0, PKT_SIZE, 0, true}, + {0, MIN_PKT_SIZE, 0, true}, /* Allowed packet */ - {0x1000, PKT_SIZE, 0, true}, + {0, MIN_PKT_SIZE, 0, true}, /* Straddling the start of umem */ - {-2, PKT_SIZE, 0, false}, + {-2, MIN_PKT_SIZE, 0, false}, /* Packet too large */ - {0x2000, XSK_UMEM__INVALID_FRAME_SIZE, 0, false}, + {0, XSK_UMEM__INVALID_FRAME_SIZE, 0, false}, /* Up to end of umem allowed */ - {umem_size - PKT_SIZE, PKT_SIZE, 0, true}, + {umem_size - MIN_PKT_SIZE - 2 * umem->frame_size, MIN_PKT_SIZE, 0, true}, /* After umem ends */ - {umem_size, PKT_SIZE, 0, false}, + {umem_size, MIN_PKT_SIZE, 0, false}, /* Straddle the end of umem */ - {umem_size - PKT_SIZE / 2, PKT_SIZE, 0, false}, - /* Straddle a page boundrary */ - {0x3000 - PKT_SIZE / 2, PKT_SIZE, 0, false}, - /* Straddle a 2K boundrary */ - {0x3800 - PKT_SIZE / 2, PKT_SIZE, 0, true}, + {umem_size - MIN_PKT_SIZE / 2, MIN_PKT_SIZE, 0, false}, + /* Straddle a 4K boundary */ + {0x1000 - MIN_PKT_SIZE / 2, MIN_PKT_SIZE, 0, false}, + /* Straddle a 2K boundary */ + {0x800 - MIN_PKT_SIZE / 2, MIN_PKT_SIZE, 0, true}, /* Valid packet for synch so that something is received */ - {0x4000, PKT_SIZE, 0, true}}; + {0, MIN_PKT_SIZE, 0, true}}; - if (test->ifobj_tx->umem->unaligned_mode) { - /* Crossing a page boundrary allowed */ + if (umem->unaligned_mode) { + /* Crossing a page boundary allowed */ pkts[7].valid = true; } - if (test->ifobj_tx->umem->frame_size == XSK_UMEM__DEFAULT_FRAME_SIZE / 2) { - /* Crossing a 2K frame size boundrary not allowed */ + if (umem->frame_size == XSK_UMEM__DEFAULT_FRAME_SIZE / 2) { + /* Crossing a 2K frame size boundary not allowed */ pkts[8].valid = false; } if (test->ifobj_tx->shared_umem) { - pkts[4].addr += umem_size; - pkts[5].addr += umem_size; - pkts[6].addr += umem_size; + pkts[4].offset += umem_size; + pkts[5].offset += umem_size; + pkts[6].offset += umem_size; } pkt_stream_generate_custom(test, pkts, ARRAY_SIZE(pkts)); - testapp_validate_traffic(test); + return testapp_validate_traffic(test); } -static void testapp_xdp_drop(struct test_spec *test) +static int testapp_xdp_drop(struct test_spec *test) { struct xsk_xdp_progs *skel_rx = test->ifobj_rx->xdp_progs; struct xsk_xdp_progs *skel_tx = test->ifobj_tx->xdp_progs; @@ -1719,10 +1679,10 @@ static void testapp_xdp_drop(struct test_spec *test) skel_rx->maps.xsk, skel_tx->maps.xsk); pkt_stream_receive_half(test); - testapp_validate_traffic(test); + return testapp_validate_traffic(test); } -static void testapp_xdp_metadata_count(struct test_spec *test) +static int testapp_xdp_metadata_count(struct test_spec *test) { struct xsk_xdp_progs *skel_rx = test->ifobj_rx->xdp_progs; struct xsk_xdp_progs *skel_tx = test->ifobj_tx->xdp_progs; @@ -1743,10 +1703,10 @@ static void testapp_xdp_metadata_count(struct test_spec *test) if (bpf_map_update_elem(bpf_map__fd(data_map), &key, &count, BPF_ANY)) exit_with_error(errno); - testapp_validate_traffic(test); + return testapp_validate_traffic(test); } -static void testapp_poll_txq_tmout(struct test_spec *test) +static int testapp_poll_txq_tmout(struct test_spec *test) { test_spec_set_name(test, "POLL_TXQ_FULL"); @@ -1754,14 +1714,14 @@ static void testapp_poll_txq_tmout(struct test_spec *test) /* create invalid frame by set umem frame_size and pkt length equal to 2048 */ test->ifobj_tx->umem->frame_size = 2048; pkt_stream_replace(test, 2 * DEFAULT_PKT_CNT, 2048); - testapp_validate_traffic_single_thread(test, test->ifobj_tx); + return testapp_validate_traffic_single_thread(test, test->ifobj_tx); } -static void testapp_poll_rxq_tmout(struct test_spec *test) +static int testapp_poll_rxq_tmout(struct test_spec *test) { test_spec_set_name(test, "POLL_RXQ_EMPTY"); test->ifobj_rx->use_poll = true; - testapp_validate_traffic_single_thread(test, test->ifobj_rx); + return testapp_validate_traffic_single_thread(test, test->ifobj_rx); } static int xsk_load_xdp_programs(struct ifobject *ifobj) @@ -1778,25 +1738,30 @@ static void xsk_unload_xdp_programs(struct ifobject *ifobj) xsk_xdp_progs__destroy(ifobj->xdp_progs); } +/* Simple test */ +static bool hugepages_present(void) +{ + size_t mmap_sz = 2 * DEFAULT_UMEM_BUFFERS * XSK_UMEM__DEFAULT_FRAME_SIZE; + void *bufs; + + bufs = mmap(NULL, mmap_sz, PROT_READ | PROT_WRITE, + MAP_PRIVATE | MAP_ANONYMOUS | MAP_HUGETLB, -1, MAP_HUGE_2MB); + if (bufs == MAP_FAILED) + return false; + + mmap_sz = ceil_u64(mmap_sz, HUGEPAGE_SIZE) * HUGEPAGE_SIZE; + munmap(bufs, mmap_sz); + return true; +} + static void init_iface(struct ifobject *ifobj, const char *dst_mac, const char *src_mac, - const char *dst_ip, const char *src_ip, const u16 dst_port, - const u16 src_port, thread_func_t func_ptr) + thread_func_t func_ptr) { - struct in_addr ip; int err; memcpy(ifobj->dst_mac, dst_mac, ETH_ALEN); memcpy(ifobj->src_mac, src_mac, ETH_ALEN); - inet_aton(dst_ip, &ip); - ifobj->dst_ip = ip.s_addr; - - inet_aton(src_ip, &ip); - ifobj->src_ip = ip.s_addr; - - ifobj->dst_port = dst_port; - ifobj->src_port = src_port; - ifobj->func_ptr = func_ptr; err = xsk_load_xdp_programs(ifobj); @@ -1804,94 +1769,87 @@ static void init_iface(struct ifobject *ifobj, const char *dst_mac, const char * printf("Error loading XDP program\n"); exit_with_error(err); } + + if (hugepages_present()) + ifobj->unaligned_supp = true; } static void run_pkt_test(struct test_spec *test, enum test_mode mode, enum test_type type) { + int ret = TEST_SKIP; + switch (type) { case TEST_TYPE_STATS_RX_DROPPED: - if (mode == TEST_MODE_ZC) { - ksft_test_result_skip("Can not run RX_DROPPED test for ZC mode\n"); - return; - } - testapp_stats_rx_dropped(test); + ret = testapp_stats_rx_dropped(test); break; case TEST_TYPE_STATS_TX_INVALID_DESCS: - testapp_stats_tx_invalid_descs(test); + ret = testapp_stats_tx_invalid_descs(test); break; case TEST_TYPE_STATS_RX_FULL: - testapp_stats_rx_full(test); + ret = testapp_stats_rx_full(test); break; case TEST_TYPE_STATS_FILL_EMPTY: - testapp_stats_fill_empty(test); + ret = testapp_stats_fill_empty(test); break; case TEST_TYPE_TEARDOWN: - testapp_teardown(test); + ret = testapp_teardown(test); break; case TEST_TYPE_BIDI: - testapp_bidi(test); + ret = testapp_bidi(test); break; case TEST_TYPE_BPF_RES: - testapp_bpf_res(test); + ret = testapp_bpf_res(test); break; case TEST_TYPE_RUN_TO_COMPLETION: test_spec_set_name(test, "RUN_TO_COMPLETION"); - testapp_validate_traffic(test); + ret = testapp_validate_traffic(test); break; case TEST_TYPE_RUN_TO_COMPLETION_SINGLE_PKT: test_spec_set_name(test, "RUN_TO_COMPLETION_SINGLE_PKT"); - testapp_single_pkt(test); + ret = testapp_single_pkt(test); break; case TEST_TYPE_RUN_TO_COMPLETION_2K_FRAME: test_spec_set_name(test, "RUN_TO_COMPLETION_2K_FRAME_SIZE"); test->ifobj_tx->umem->frame_size = 2048; test->ifobj_rx->umem->frame_size = 2048; - pkt_stream_replace(test, DEFAULT_PKT_CNT, PKT_SIZE); - testapp_validate_traffic(test); + pkt_stream_replace(test, DEFAULT_PKT_CNT, MIN_PKT_SIZE); + ret = testapp_validate_traffic(test); break; case TEST_TYPE_RX_POLL: test->ifobj_rx->use_poll = true; test_spec_set_name(test, "POLL_RX"); - testapp_validate_traffic(test); + ret = testapp_validate_traffic(test); break; case TEST_TYPE_TX_POLL: test->ifobj_tx->use_poll = true; test_spec_set_name(test, "POLL_TX"); - testapp_validate_traffic(test); + ret = testapp_validate_traffic(test); break; case TEST_TYPE_POLL_TXQ_TMOUT: - testapp_poll_txq_tmout(test); + ret = testapp_poll_txq_tmout(test); break; case TEST_TYPE_POLL_RXQ_TMOUT: - testapp_poll_rxq_tmout(test); + ret = testapp_poll_rxq_tmout(test); break; case TEST_TYPE_ALIGNED_INV_DESC: test_spec_set_name(test, "ALIGNED_INV_DESC"); - testapp_invalid_desc(test); + ret = testapp_invalid_desc(test); break; case TEST_TYPE_ALIGNED_INV_DESC_2K_FRAME: test_spec_set_name(test, "ALIGNED_INV_DESC_2K_FRAME_SIZE"); test->ifobj_tx->umem->frame_size = 2048; test->ifobj_rx->umem->frame_size = 2048; - testapp_invalid_desc(test); + ret = testapp_invalid_desc(test); break; case TEST_TYPE_UNALIGNED_INV_DESC: - if (!hugepages_present(test->ifobj_tx)) { - ksft_test_result_skip("No 2M huge pages present.\n"); - return; - } test_spec_set_name(test, "UNALIGNED_INV_DESC"); test->ifobj_tx->umem->unaligned_mode = true; test->ifobj_rx->umem->unaligned_mode = true; - testapp_invalid_desc(test); + ret = testapp_invalid_desc(test); break; case TEST_TYPE_UNALIGNED_INV_DESC_4K1_FRAME: { u64 page_size, umem_size; - if (!hugepages_present(test->ifobj_tx)) { - ksft_test_result_skip("No 2M huge pages present.\n"); - return; - } test_spec_set_name(test, "UNALIGNED_INV_DESC_4K1_FRAME_SIZE"); /* Odd frame size so the UMEM doesn't end near a page boundary. */ test->ifobj_tx->umem->frame_size = 4001; @@ -1903,29 +1861,28 @@ static void run_pkt_test(struct test_spec *test, enum test_mode mode, enum test_ */ page_size = sysconf(_SC_PAGESIZE); umem_size = test->ifobj_tx->umem->num_frames * test->ifobj_tx->umem->frame_size; - assert(umem_size % page_size > PKT_SIZE); - assert(umem_size % page_size < page_size - PKT_SIZE); - testapp_invalid_desc(test); + assert(umem_size % page_size > MIN_PKT_SIZE); + assert(umem_size % page_size < page_size - MIN_PKT_SIZE); + ret = testapp_invalid_desc(test); break; } case TEST_TYPE_UNALIGNED: - if (!testapp_unaligned(test)) - return; + ret = testapp_unaligned(test); break; case TEST_TYPE_HEADROOM: - testapp_headroom(test); + ret = testapp_headroom(test); break; case TEST_TYPE_XDP_DROP_HALF: - testapp_xdp_drop(test); + ret = testapp_xdp_drop(test); break; case TEST_TYPE_XDP_METADATA_COUNT: - testapp_xdp_metadata_count(test); + ret = testapp_xdp_metadata_count(test); break; default: break; } - if (!test->fail) + if (ret == TEST_PASS) ksft_test_result_pass("PASS: %s %s%s\n", mode_string(test), busy_poll_string(test), test->name); pkt_stream_restore_default(test); @@ -2030,14 +1987,12 @@ int main(int argc, char **argv) modes++; } - init_iface(ifobj_rx, MAC1, MAC2, IP1, IP2, UDP_PORT1, UDP_PORT2, - worker_testapp_validate_rx); - init_iface(ifobj_tx, MAC2, MAC1, IP2, IP1, UDP_PORT2, UDP_PORT1, - worker_testapp_validate_tx); + init_iface(ifobj_rx, MAC1, MAC2, worker_testapp_validate_rx); + init_iface(ifobj_tx, MAC2, MAC1, worker_testapp_validate_tx); test_spec_init(&test, ifobj_tx, ifobj_rx, 0); - tx_pkt_stream_default = pkt_stream_generate(ifobj_tx->umem, DEFAULT_PKT_CNT, PKT_SIZE); - rx_pkt_stream_default = pkt_stream_generate(ifobj_rx->umem, DEFAULT_PKT_CNT, PKT_SIZE); + tx_pkt_stream_default = pkt_stream_generate(ifobj_tx->umem, DEFAULT_PKT_CNT, MIN_PKT_SIZE); + rx_pkt_stream_default = pkt_stream_generate(ifobj_rx->umem, DEFAULT_PKT_CNT, MIN_PKT_SIZE); if (!tx_pkt_stream_default || !rx_pkt_stream_default) exit_with_error(ENOMEM); test.tx_pkt_stream_default = tx_pkt_stream_default; diff --git a/tools/testing/selftests/bpf/xskxceiver.h b/tools/testing/selftests/bpf/xskxceiver.h index c535aeab2ca3..aaf27e067640 100644 --- a/tools/testing/selftests/bpf/xskxceiver.h +++ b/tools/testing/selftests/bpf/xskxceiver.h @@ -30,22 +30,14 @@ #define TEST_PASS 0 #define TEST_FAILURE -1 #define TEST_CONTINUE 1 +#define TEST_SKIP 2 #define MAX_INTERFACES 2 #define MAX_INTERFACE_NAME_CHARS 16 #define MAX_SOCKETS 2 #define MAX_TEST_NAME_SIZE 32 #define MAX_TEARDOWN_ITER 10 -#define PKT_HDR_SIZE (sizeof(struct ethhdr) + sizeof(struct iphdr) + \ - sizeof(struct udphdr)) -#define MIN_ETH_PKT_SIZE 64 -#define ETH_FCS_SIZE 4 -#define MIN_PKT_SIZE (MIN_ETH_PKT_SIZE - ETH_FCS_SIZE) -#define PKT_SIZE (MIN_PKT_SIZE) -#define IP_PKT_SIZE (PKT_SIZE - sizeof(struct ethhdr)) -#define IP_PKT_VER 0x4 -#define IP_PKT_TOS 0x9 -#define UDP_PKT_SIZE (IP_PKT_SIZE - sizeof(struct iphdr)) -#define UDP_PKT_DATA_SIZE (UDP_PKT_SIZE - sizeof(struct udphdr)) +#define PKT_HDR_SIZE (sizeof(struct ethhdr) + 2) /* Just to align the data in the packet */ +#define MIN_PKT_SIZE 64 #define USLEEP_MAX 10000 #define SOCK_RECONF_CTR 10 #define BATCH_SIZE 64 @@ -57,6 +49,7 @@ #define UMEM_HEADROOM_TEST_SIZE 128 #define XSK_UMEM__INVALID_FRAME_SIZE (XSK_UMEM__DEFAULT_FRAME_SIZE + 1) #define HUGEPAGE_SIZE (2 * 1024 * 1024) +#define PKT_DUMP_NB_TO_PRINT 16 #define print_verbose(x...) do { if (opt_verbose) ksft_print_msg(x); } while (0) @@ -93,13 +86,13 @@ enum test_type { TEST_TYPE_MAX }; -static bool opt_pkt_dump; static bool opt_verbose; struct xsk_umem_info { struct xsk_ring_prod fq; struct xsk_ring_cons cq; struct xsk_umem *umem; + u64 next_buffer; u32 num_frames; u32 frame_headroom; void *buffer; @@ -118,17 +111,17 @@ struct xsk_socket_info { }; struct pkt { - u64 addr; + int offset; u32 len; - u32 payload; + u32 pkt_nb; bool valid; }; struct pkt_stream { u32 nb_pkts; - u32 rx_pkt_nb; + u32 current_pkt_nb; struct pkt *pkts; - bool use_addr_for_fill; + u32 max_pkt_len; }; struct ifobject; @@ -148,11 +141,7 @@ struct ifobject { struct bpf_program *xdp_prog; enum test_mode mode; int ifindex; - u32 dst_ip; - u32 src_ip; u32 bind_flags; - u16 src_port; - u16 dst_port; bool tx_on; bool rx_on; bool use_poll; @@ -161,6 +150,7 @@ struct ifobject { bool release_rx; bool shared_umem; bool use_metadata; + bool unaligned_supp; u8 dst_mac[ETH_ALEN]; u8 src_mac[ETH_ALEN]; }; @@ -184,7 +174,6 @@ struct test_spec { pthread_barrier_t barr; pthread_mutex_t pacing_mutex = PTHREAD_MUTEX_INITIALIZER; -pthread_cond_t pacing_cond = PTHREAD_COND_INITIALIZER; int pkts_in_flight; |