diff options
Diffstat (limited to 'tools')
39 files changed, 2025 insertions, 389 deletions
diff --git a/tools/bpf/bpftool/feature.c b/tools/bpf/bpftool/feature.c index da16e6a27ccc..0675d6a46413 100644 --- a/tools/bpf/bpftool/feature.c +++ b/tools/bpf/bpftool/feature.c @@ -167,12 +167,12 @@ static int get_vendor_id(int ifindex) return strtol(buf, NULL, 0); } -static int read_procfs(const char *path) +static long read_procfs(const char *path) { char *endptr, *line = NULL; size_t len = 0; FILE *fd; - int res; + long res; fd = fopen(path, "r"); if (!fd) @@ -194,7 +194,7 @@ static int read_procfs(const char *path) static void probe_unprivileged_disabled(void) { - int res; + long res; /* No support for C-style ouptut */ @@ -216,14 +216,14 @@ static void probe_unprivileged_disabled(void) printf("Unable to retrieve required privileges for bpf() syscall\n"); break; default: - printf("bpf() syscall restriction has unknown value %d\n", res); + printf("bpf() syscall restriction has unknown value %ld\n", res); } } } static void probe_jit_enable(void) { - int res; + long res; /* No support for C-style ouptut */ @@ -245,7 +245,7 @@ static void probe_jit_enable(void) printf("Unable to retrieve JIT-compiler status\n"); break; default: - printf("JIT-compiler status has unknown value %d\n", + printf("JIT-compiler status has unknown value %ld\n", res); } } @@ -253,7 +253,7 @@ static void probe_jit_enable(void) static void probe_jit_harden(void) { - int res; + long res; /* No support for C-style ouptut */ @@ -275,7 +275,7 @@ static void probe_jit_harden(void) printf("Unable to retrieve JIT hardening status\n"); break; default: - printf("JIT hardening status has unknown value %d\n", + printf("JIT hardening status has unknown value %ld\n", res); } } @@ -283,7 +283,7 @@ static void probe_jit_harden(void) static void probe_jit_kallsyms(void) { - int res; + long res; /* No support for C-style ouptut */ @@ -302,14 +302,14 @@ static void probe_jit_kallsyms(void) printf("Unable to retrieve JIT kallsyms export status\n"); break; default: - printf("JIT kallsyms exports status has unknown value %d\n", res); + printf("JIT kallsyms exports status has unknown value %ld\n", res); } } } static void probe_jit_limit(void) { - int res; + long res; /* No support for C-style ouptut */ @@ -322,7 +322,7 @@ static void probe_jit_limit(void) printf("Unable to retrieve global memory limit for JIT compiler for unprivileged users\n"); break; default: - printf("Global memory limit for JIT compiler for unprivileged users is %d bytes\n", res); + printf("Global memory limit for JIT compiler for unprivileged users is %ld bytes\n", res); } } } diff --git a/tools/bpf/bpftool/link.c b/tools/bpf/bpftool/link.c index d98dbc50cf4c..243b74e18e51 100644 --- a/tools/bpf/bpftool/link.c +++ b/tools/bpf/bpftool/link.c @@ -212,7 +212,10 @@ static int show_link_close_json(int fd, struct bpf_link_info *info) case BPF_LINK_TYPE_NETFILTER: netfilter_dump_json(info, json_wtr); break; - + case BPF_LINK_TYPE_STRUCT_OPS: + jsonw_uint_field(json_wtr, "map_id", + info->struct_ops.map_id); + break; default: break; } @@ -245,7 +248,10 @@ static void show_link_header_plain(struct bpf_link_info *info) else printf("type %u ", info->type); - printf("prog %u ", info->prog_id); + if (info->type == BPF_LINK_TYPE_STRUCT_OPS) + printf("map %u ", info->struct_ops.map_id); + else + printf("prog %u ", info->prog_id); } static void show_link_attach_type_plain(__u32 attach_type) diff --git a/tools/bpf/bpftool/map.c b/tools/bpf/bpftool/map.c index aaeb8939e137..ae9e822aa3fe 100644 --- a/tools/bpf/bpftool/map.c +++ b/tools/bpf/bpftool/map.c @@ -139,6 +139,9 @@ static void print_entry_json(struct bpf_map_info *info, unsigned char *key, print_hex_data_json(key, info->key_size); jsonw_name(json_wtr, "value"); print_hex_data_json(value, info->value_size); + if (map_is_map_of_maps(info->type)) + jsonw_uint_field(json_wtr, "inner_map_id", + *(unsigned int *)value); if (btf) { struct btf_dumper d = { .btf = btf, @@ -259,8 +262,13 @@ static void print_entry_plain(struct bpf_map_info *info, unsigned char *key, } if (info->value_size) { - printf("value:%c", break_names ? '\n' : ' '); - fprint_hex(stdout, value, info->value_size, " "); + if (map_is_map_of_maps(info->type)) { + printf("inner_map_id:%c", break_names ? '\n' : ' '); + printf("%u ", *(unsigned int *)value); + } else { + printf("value:%c", break_names ? '\n' : ' '); + fprint_hex(stdout, value, info->value_size, " "); + } } printf("\n"); diff --git a/tools/lib/bpf/bpf_helpers.h b/tools/lib/bpf/bpf_helpers.h index 929a3baca8ef..bbab9ad9dc5a 100644 --- a/tools/lib/bpf/bpf_helpers.h +++ b/tools/lib/bpf/bpf_helpers.h @@ -77,16 +77,21 @@ /* * Helper macros to manipulate data structures */ -#ifndef offsetof -#define offsetof(TYPE, MEMBER) ((unsigned long)&((TYPE *)0)->MEMBER) -#endif -#ifndef container_of + +/* offsetof() definition that uses __builtin_offset() might not preserve field + * offset CO-RE relocation properly, so force-redefine offsetof() using + * old-school approach which works with CO-RE correctly + */ +#undef offsetof +#define offsetof(type, member) ((unsigned long)&((type *)0)->member) + +/* redefined container_of() to ensure we use the above offsetof() macro */ +#undef container_of #define container_of(ptr, type, member) \ ({ \ void *__mptr = (void *)(ptr); \ ((type *)(__mptr - offsetof(type, member))); \ }) -#endif /* * Compiler (optimization) barrier. diff --git a/tools/lib/bpf/bpf_tracing.h b/tools/lib/bpf/bpf_tracing.h index 6fb3d0f9af17..be076a4041ab 100644 --- a/tools/lib/bpf/bpf_tracing.h +++ b/tools/lib/bpf/bpf_tracing.h @@ -351,6 +351,7 @@ struct pt_regs___arm64 { * https://github.com/riscv-non-isa/riscv-elf-psabi-doc/blob/master/riscv-cc.adoc#risc-v-calling-conventions */ +/* riscv provides struct user_regs_struct instead of struct pt_regs to userspace */ #define __PT_REGS_CAST(x) ((const struct user_regs_struct *)(x)) #define __PT_PARM1_REG a0 #define __PT_PARM2_REG a1 @@ -383,7 +384,7 @@ struct pt_regs___arm64 { * https://raw.githubusercontent.com/wiki/foss-for-synopsys-dwc-arc-processors/toolchain/files/ARCv2_ABI.pdf */ -/* arc provides struct user_pt_regs instead of struct pt_regs to userspace */ +/* arc provides struct user_regs_struct instead of struct pt_regs to userspace */ #define __PT_REGS_CAST(x) ((const struct user_regs_struct *)(x)) #define __PT_PARM1_REG scratch.r0 #define __PT_PARM2_REG scratch.r1 diff --git a/tools/lib/bpf/btf_dump.c b/tools/lib/bpf/btf_dump.c index 580985ee5545..4d9f30bf7f01 100644 --- a/tools/lib/bpf/btf_dump.c +++ b/tools/lib/bpf/btf_dump.c @@ -2250,9 +2250,25 @@ static int btf_dump_type_data_check_overflow(struct btf_dump *d, const struct btf_type *t, __u32 id, const void *data, - __u8 bits_offset) + __u8 bits_offset, + __u8 bit_sz) { - __s64 size = btf__resolve_size(d->btf, id); + __s64 size; + + if (bit_sz) { + /* bits_offset is at most 7. bit_sz is at most 128. */ + __u8 nr_bytes = (bits_offset + bit_sz + 7) / 8; + + /* When bit_sz is non zero, it is called from + * btf_dump_struct_data() where it only cares about + * negative error value. + * Return nr_bytes in success case to make it + * consistent as the regular integer case below. + */ + return data + nr_bytes > d->typed_dump->data_end ? -E2BIG : nr_bytes; + } + + size = btf__resolve_size(d->btf, id); if (size < 0 || size >= INT_MAX) { pr_warn("unexpected size [%zu] for id [%u]\n", @@ -2407,7 +2423,7 @@ static int btf_dump_dump_type_data(struct btf_dump *d, { int size, err = 0; - size = btf_dump_type_data_check_overflow(d, t, id, data, bits_offset); + size = btf_dump_type_data_check_overflow(d, t, id, data, bits_offset, bit_sz); if (size < 0) return size; err = btf_dump_type_data_check_zero(d, t, id, data, bits_offset, bit_sz); diff --git a/tools/testing/selftests/bpf/DENYLIST.aarch64 b/tools/testing/selftests/bpf/DENYLIST.aarch64 index 0a6837f97c32..08adc805878b 100644 --- a/tools/testing/selftests/bpf/DENYLIST.aarch64 +++ b/tools/testing/selftests/bpf/DENYLIST.aarch64 @@ -1,33 +1,6 @@ -bloom_filter_map # libbpf: prog 'check_bloom': failed to attach: ERROR: strerror_r(-524)=22 -bpf_cookie/lsm -bpf_cookie/multi_kprobe_attach_api -bpf_cookie/multi_kprobe_link_api -bpf_cookie/trampoline -bpf_loop/check_callback_fn_stop # link unexpected error: -524 -bpf_loop/check_invalid_flags -bpf_loop/check_nested_calls -bpf_loop/check_non_constant_callback -bpf_loop/check_nr_loops -bpf_loop/check_null_callback_ctx -bpf_loop/check_stack -bpf_mod_race # bpf_mod_kfunc_race__attach unexpected error: -524 (errno 524) -bpf_tcp_ca/dctcp_fallback -btf_dump/btf_dump: var_data # find type id unexpected find type id: actual -2 < expected 0 -cgroup_hierarchical_stats # attach unexpected error: -524 (errno 524) -d_path/basic # setup attach failed: -524 -deny_namespace # attach unexpected error: -524 (errno 524) -fentry_fexit # fentry_attach unexpected error: -1 (errno 524) -fentry_test # fentry_attach unexpected error: -1 (errno 524) -fexit_sleep # fexit_attach fexit attach failed: -1 -fexit_stress # fexit attach unexpected fexit attach: actual -524 < expected 0 -fexit_test # fexit_attach unexpected error: -1 (errno 524) -get_func_args_test # get_func_args_test__attach unexpected error: -524 (errno 524) (trampoline) -get_func_ip_test # get_func_ip_test__attach unexpected error: -524 (errno 524) (trampoline) -htab_update/reenter_update -kfree_skb # attach fentry unexpected error: -524 (trampoline) -kfunc_call/subprog # extern (var ksym) 'bpf_prog_active': not found in kernel BTF -kfunc_call/subprog_lskel # skel unexpected error: -2 -kfunc_dynptr_param/dynptr_data_null # libbpf: prog 'dynptr_data_null': failed to attach: ERROR: strerror_r(-524)=22 +bpf_cookie/multi_kprobe_attach_api # kprobe_multi_link_api_subtest:FAIL:fentry_raw_skel_load unexpected error: -3 +bpf_cookie/multi_kprobe_link_api # kprobe_multi_link_api_subtest:FAIL:fentry_raw_skel_load unexpected error: -3 +fexit_sleep # The test never returns. The remaining tests cannot start. kprobe_multi_bench_attach # bpf_program__attach_kprobe_multi_opts unexpected error: -95 kprobe_multi_test/attach_api_addrs # bpf_program__attach_kprobe_multi_opts unexpected error: -95 kprobe_multi_test/attach_api_pattern # bpf_program__attach_kprobe_multi_opts unexpected error: -95 @@ -35,51 +8,5 @@ kprobe_multi_test/attach_api_syms # bpf_program__attach_kprobe_mu kprobe_multi_test/bench_attach # bpf_program__attach_kprobe_multi_opts unexpected error: -95 kprobe_multi_test/link_api_addrs # link_fd unexpected link_fd: actual -95 < expected 0 kprobe_multi_test/link_api_syms # link_fd unexpected link_fd: actual -95 < expected 0 -kprobe_multi_test/skel_api # kprobe_multi__attach unexpected error: -524 (errno 524) -ksyms_module/libbpf # 'bpf_testmod_ksym_percpu': not found in kernel BTF -ksyms_module/lskel # test_ksyms_module_lskel__open_and_load unexpected error: -2 -libbpf_get_fd_by_id_opts # test_libbpf_get_fd_by_id_opts__attach unexpected error: -524 (errno 524) -linked_list -lookup_key # test_lookup_key__attach unexpected error: -524 (errno 524) -lru_bug # lru_bug__attach unexpected error: -524 (errno 524) -modify_return # modify_return__attach failed unexpected error: -524 (errno 524) -module_attach # skel_attach skeleton attach failed: -524 -module_fentry_shadow # bpf_link_create unexpected bpf_link_create: actual -524 < expected 0 -mptcp/base # run_test mptcp unexpected error: -524 (errno 524) -netcnt # packets unexpected packets: actual 10001 != expected 10000 -rcu_read_lock # failed to attach: ERROR: strerror_r(-524)=22 -recursion # skel_attach unexpected error: -524 (errno 524) -ringbuf # skel_attach skeleton attachment failed: -1 -setget_sockopt # attach_cgroup unexpected error: -524 -sk_storage_tracing # test_sk_storage_tracing__attach unexpected error: -524 (errno 524) -skc_to_unix_sock # could not attach BPF object unexpected error: -524 (errno 524) -socket_cookie # prog_attach unexpected error: -524 -stacktrace_build_id # compare_stack_ips stackmap vs. stack_amap err -1 errno 2 -task_local_storage/exit_creds # skel_attach unexpected error: -524 (errno 524) -task_local_storage/recursion # skel_attach unexpected error: -524 (errno 524) -test_bprm_opts # attach attach failed: -524 -test_ima # attach attach failed: -524 -test_local_storage # attach lsm attach failed: -524 -test_lsm # test_lsm_first_attach unexpected error: -524 (errno 524) -test_overhead # attach_fentry unexpected error: -524 -timer # timer unexpected error: -524 (errno 524) -timer_crash # timer_crash__attach unexpected error: -524 (errno 524) -timer_mim # timer_mim unexpected error: -524 (errno 524) -trace_printk # trace_printk__attach unexpected error: -1 (errno 524) -trace_vprintk # trace_vprintk__attach unexpected error: -1 (errno 524) -tracing_struct # tracing_struct__attach unexpected error: -524 (errno 524) -trampoline_count # attach_prog unexpected error: -524 -unpriv_bpf_disabled # skel_attach unexpected error: -524 (errno 524) -user_ringbuf/test_user_ringbuf_post_misaligned # misaligned_skel unexpected error: -524 (errno 524) -user_ringbuf/test_user_ringbuf_post_producer_wrong_offset -user_ringbuf/test_user_ringbuf_post_larger_than_ringbuf_sz -user_ringbuf/test_user_ringbuf_basic # ringbuf_basic_skel unexpected error: -524 (errno 524) -user_ringbuf/test_user_ringbuf_sample_full_ring_buffer -user_ringbuf/test_user_ringbuf_post_alignment_autoadjust -user_ringbuf/test_user_ringbuf_overfill -user_ringbuf/test_user_ringbuf_discards_properly_ignored -user_ringbuf/test_user_ringbuf_loop -user_ringbuf/test_user_ringbuf_msg_protocol -user_ringbuf/test_user_ringbuf_blocking_reserve -verify_pkcs7_sig # test_verify_pkcs7_sig__attach unexpected error: -524 (errno 524) -vmlinux # skel_attach skeleton attach failed: -524 +kprobe_multi_test/skel_api # libbpf: failed to load BPF skeleton 'kprobe_multi': -3 +module_attach # prog 'kprobe_multi': failed to auto-attach: -95 diff --git a/tools/testing/selftests/bpf/DENYLIST.s390x b/tools/testing/selftests/bpf/DENYLIST.s390x index c7463f3ec3c0..5061d9e24c16 100644 --- a/tools/testing/selftests/bpf/DENYLIST.s390x +++ b/tools/testing/selftests/bpf/DENYLIST.s390x @@ -26,3 +26,4 @@ user_ringbuf # failed to find kernel BTF type ID of verif_stats # trace_vprintk__open_and_load unexpected error: -9 (?) xdp_bonding # failed to auto-attach program 'trace_on_entry': -524 (trampoline) xdp_metadata # JIT does not support calling kernel function (kfunc) +test_task_under_cgroup # JIT does not support calling kernel function (kfunc) diff --git a/tools/testing/selftests/bpf/bpf_kfuncs.h b/tools/testing/selftests/bpf/bpf_kfuncs.h index 8c993ec8ceea..f3c41f8902a0 100644 --- a/tools/testing/selftests/bpf/bpf_kfuncs.h +++ b/tools/testing/selftests/bpf/bpf_kfuncs.h @@ -35,4 +35,10 @@ extern void *bpf_dynptr_slice(const struct bpf_dynptr *ptr, __u32 offset, extern void *bpf_dynptr_slice_rdwr(const struct bpf_dynptr *ptr, __u32 offset, void *buffer, __u32 buffer__szk) __ksym; +extern int bpf_dynptr_adjust(const struct bpf_dynptr *ptr, __u32 start, __u32 end) __ksym; +extern int bpf_dynptr_is_null(const struct bpf_dynptr *ptr) __ksym; +extern int bpf_dynptr_is_rdonly(const struct bpf_dynptr *ptr) __ksym; +extern __u32 bpf_dynptr_size(const struct bpf_dynptr *ptr) __ksym; +extern int bpf_dynptr_clone(const struct bpf_dynptr *ptr, struct bpf_dynptr *clone__init) __ksym; + #endif diff --git a/tools/testing/selftests/bpf/prog_tests/arg_parsing.c b/tools/testing/selftests/bpf/prog_tests/arg_parsing.c index b17bfa0e0aac..bb143de68875 100644 --- a/tools/testing/selftests/bpf/prog_tests/arg_parsing.c +++ b/tools/testing/selftests/bpf/prog_tests/arg_parsing.c @@ -96,12 +96,80 @@ static void test_parse_test_list(void) goto error; ASSERT_OK(strcmp("*bpf_cookie*", set.tests[0].name), "test name"); ASSERT_OK(strcmp("*trace*", set.tests[0].subtests[0]), "subtest name"); + free_test_filter_set(&set); + + ASSERT_OK(parse_test_list("t/subtest1,t/subtest2", &set, true), + "parsing"); + if (!ASSERT_EQ(set.cnt, 1, "count of test filters")) + goto error; + if (!ASSERT_OK_PTR(set.tests, "test filters initialized")) + goto error; + if (!ASSERT_EQ(set.tests[0].subtest_cnt, 2, "subtest filters count")) + goto error; + ASSERT_OK(strcmp("t", set.tests[0].name), "test name"); + ASSERT_OK(strcmp("subtest1", set.tests[0].subtests[0]), "subtest name"); + ASSERT_OK(strcmp("subtest2", set.tests[0].subtests[1]), "subtest name"); error: free_test_filter_set(&set); } +static void test_parse_test_list_file(void) +{ + struct test_filter_set set; + char tmpfile[80]; + FILE *fp; + int fd; + + snprintf(tmpfile, sizeof(tmpfile), "/tmp/bpf_arg_parsing_test.XXXXXX"); + fd = mkstemp(tmpfile); + if (!ASSERT_GE(fd, 0, "create tmp")) + return; + + fp = fdopen(fd, "w"); + if (!ASSERT_NEQ(fp, NULL, "fdopen tmp")) { + close(fd); + goto out_remove; + } + + fprintf(fp, "# comment\n"); + fprintf(fp, " test_with_spaces \n"); + fprintf(fp, "testA/subtest # comment\n"); + fprintf(fp, "testB#comment with no space\n"); + fprintf(fp, "testB # duplicate\n"); + fprintf(fp, "testA/subtest # subtest duplicate\n"); + fprintf(fp, "testA/subtest2\n"); + fprintf(fp, "testC_no_eof_newline"); + fflush(fp); + + if (!ASSERT_OK(ferror(fp), "prepare tmp")) + goto out_fclose; + + init_test_filter_set(&set); + + ASSERT_OK(parse_test_list_file(tmpfile, &set, true), "parse file"); + + ASSERT_EQ(set.cnt, 4, "test count"); + ASSERT_OK(strcmp("test_with_spaces", set.tests[0].name), "test 0 name"); + ASSERT_EQ(set.tests[0].subtest_cnt, 0, "test 0 subtest count"); + ASSERT_OK(strcmp("testA", set.tests[1].name), "test 1 name"); + ASSERT_EQ(set.tests[1].subtest_cnt, 2, "test 1 subtest count"); + ASSERT_OK(strcmp("subtest", set.tests[1].subtests[0]), "test 1 subtest 0"); + ASSERT_OK(strcmp("subtest2", set.tests[1].subtests[1]), "test 1 subtest 1"); + ASSERT_OK(strcmp("testB", set.tests[2].name), "test 2 name"); + ASSERT_OK(strcmp("testC_no_eof_newline", set.tests[3].name), "test 3 name"); + + free_test_filter_set(&set); + +out_fclose: + fclose(fp); +out_remove: + remove(tmpfile); +} + void test_arg_parsing(void) { if (test__start_subtest("test_parse_test_list")) test_parse_test_list(); + if (test__start_subtest("test_parse_test_list_file")) + test_parse_test_list_file(); } diff --git a/tools/testing/selftests/bpf/prog_tests/cgroup_getset_retval.c b/tools/testing/selftests/bpf/prog_tests/cgroup_getset_retval.c index 4d2fa99273d8..2bb5773d6f99 100644 --- a/tools/testing/selftests/bpf/prog_tests/cgroup_getset_retval.c +++ b/tools/testing/selftests/bpf/prog_tests/cgroup_getset_retval.c @@ -25,6 +25,8 @@ static void test_setsockopt_set(int cgroup_fd, int sock_fd) if (!ASSERT_OK_PTR(obj, "skel-load")) return; + obj->bss->page_size = sysconf(_SC_PAGESIZE); + /* Attach setsockopt that sets EUNATCH, assert that * we actually get that error when we run setsockopt() */ @@ -59,6 +61,8 @@ static void test_setsockopt_set_and_get(int cgroup_fd, int sock_fd) if (!ASSERT_OK_PTR(obj, "skel-load")) return; + obj->bss->page_size = sysconf(_SC_PAGESIZE); + /* Attach setsockopt that sets EUNATCH, and one that gets the * previously set errno. Assert that we get the same errno back. */ @@ -100,6 +104,8 @@ static void test_setsockopt_default_zero(int cgroup_fd, int sock_fd) if (!ASSERT_OK_PTR(obj, "skel-load")) return; + obj->bss->page_size = sysconf(_SC_PAGESIZE); + /* Attach setsockopt that gets the previously set errno. * Assert that, without anything setting one, we get 0. */ @@ -134,6 +140,8 @@ static void test_setsockopt_default_zero_and_set(int cgroup_fd, int sock_fd) if (!ASSERT_OK_PTR(obj, "skel-load")) return; + obj->bss->page_size = sysconf(_SC_PAGESIZE); + /* Attach setsockopt that gets the previously set errno, and then * one that sets the errno to EUNATCH. Assert that the get does not * see EUNATCH set later, and does not prevent EUNATCH from being set. @@ -177,6 +185,8 @@ static void test_setsockopt_override(int cgroup_fd, int sock_fd) if (!ASSERT_OK_PTR(obj, "skel-load")) return; + obj->bss->page_size = sysconf(_SC_PAGESIZE); + /* Attach setsockopt that sets EUNATCH, then one that sets EISCONN, * and then one that gets the exported errno. Assert both the syscall * and the helper sees the last set errno. @@ -224,6 +234,8 @@ static void test_setsockopt_legacy_eperm(int cgroup_fd, int sock_fd) if (!ASSERT_OK_PTR(obj, "skel-load")) return; + obj->bss->page_size = sysconf(_SC_PAGESIZE); + /* Attach setsockopt that return a reject without setting errno * (legacy reject), and one that gets the errno. Assert that for * backward compatibility the syscall result in EPERM, and this @@ -268,6 +280,8 @@ static void test_setsockopt_legacy_no_override(int cgroup_fd, int sock_fd) if (!ASSERT_OK_PTR(obj, "skel-load")) return; + obj->bss->page_size = sysconf(_SC_PAGESIZE); + /* Attach setsockopt that sets EUNATCH, then one that return a reject * without setting errno, and then one that gets the exported errno. * Assert both the syscall and the helper's errno are unaffected by @@ -319,6 +333,8 @@ static void test_getsockopt_get(int cgroup_fd, int sock_fd) if (!ASSERT_OK_PTR(obj, "skel-load")) return; + obj->bss->page_size = sysconf(_SC_PAGESIZE); + /* Attach getsockopt that gets previously set errno. Assert that the * error from kernel is in both ctx_retval_value and retval_value. */ @@ -359,6 +375,8 @@ static void test_getsockopt_override(int cgroup_fd, int sock_fd) if (!ASSERT_OK_PTR(obj, "skel-load")) return; + obj->bss->page_size = sysconf(_SC_PAGESIZE); + /* Attach getsockopt that sets retval to -EISCONN. Assert that this * overrides the value from kernel. */ @@ -396,6 +414,8 @@ static void test_getsockopt_retval_sync(int cgroup_fd, int sock_fd) if (!ASSERT_OK_PTR(obj, "skel-load")) return; + obj->bss->page_size = sysconf(_SC_PAGESIZE); + /* Attach getsockopt that sets retval to -EISCONN, and one that clears * ctx retval. Assert that the clearing ctx retval is synced to helper * and clears any errors both from kernel and BPF.. diff --git a/tools/testing/selftests/bpf/prog_tests/dynptr.c b/tools/testing/selftests/bpf/prog_tests/dynptr.c index d176c34a7d2e..7cfac53c0d58 100644 --- a/tools/testing/selftests/bpf/prog_tests/dynptr.c +++ b/tools/testing/selftests/bpf/prog_tests/dynptr.c @@ -20,6 +20,14 @@ static struct { {"test_ringbuf", SETUP_SYSCALL_SLEEP}, {"test_skb_readonly", SETUP_SKB_PROG}, {"test_dynptr_skb_data", SETUP_SKB_PROG}, + {"test_adjust", SETUP_SYSCALL_SLEEP}, + {"test_adjust_err", SETUP_SYSCALL_SLEEP}, + {"test_zero_size_dynptr", SETUP_SYSCALL_SLEEP}, + {"test_dynptr_is_null", SETUP_SYSCALL_SLEEP}, + {"test_dynptr_is_rdonly", SETUP_SKB_PROG}, + {"test_dynptr_clone", SETUP_SKB_PROG}, + {"test_dynptr_skb_no_buff", SETUP_SKB_PROG}, + {"test_dynptr_skb_strcmp", SETUP_SKB_PROG}, }; static void verify_success(const char *prog_name, enum test_setup_type setup_type) diff --git a/tools/testing/selftests/bpf/prog_tests/sockopt.c b/tools/testing/selftests/bpf/prog_tests/sockopt.c index aa4debf62fc6..33dd4532e642 100644 --- a/tools/testing/selftests/bpf/prog_tests/sockopt.c +++ b/tools/testing/selftests/bpf/prog_tests/sockopt.c @@ -5,10 +5,15 @@ static char bpf_log_buf[4096]; static bool verbose; +#ifndef PAGE_SIZE +#define PAGE_SIZE 4096 +#endif + enum sockopt_test_error { OK = 0, DENY_LOAD, DENY_ATTACH, + EOPNOTSUPP_GETSOCKOPT, EPERM_GETSOCKOPT, EFAULT_GETSOCKOPT, EPERM_SETSOCKOPT, @@ -273,10 +278,31 @@ static struct sockopt_test { .error = EFAULT_GETSOCKOPT, }, { - .descr = "getsockopt: deny arbitrary ctx->retval", + .descr = "getsockopt: ignore >PAGE_SIZE optlen", .insns = { - /* ctx->retval = 123 */ - BPF_MOV64_IMM(BPF_REG_0, 123), + /* write 0xFF to the first optval byte */ + + /* r6 = ctx->optval */ + BPF_LDX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, + offsetof(struct bpf_sockopt, optval)), + /* r2 = ctx->optval */ + BPF_MOV64_REG(BPF_REG_2, BPF_REG_6), + /* r6 = ctx->optval + 1 */ + BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, 1), + + /* r7 = ctx->optval_end */ + BPF_LDX_MEM(BPF_DW, BPF_REG_7, BPF_REG_1, + offsetof(struct bpf_sockopt, optval_end)), + + /* if (ctx->optval + 1 <= ctx->optval_end) { */ + BPF_JMP_REG(BPF_JGT, BPF_REG_6, BPF_REG_7, 1), + /* ctx->optval[0] = 0xF0 */ + BPF_ST_MEM(BPF_B, BPF_REG_2, 0, 0xFF), + /* } */ + + /* retval changes are ignored */ + /* ctx->retval = 5 */ + BPF_MOV64_IMM(BPF_REG_0, 5), BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, offsetof(struct bpf_sockopt, retval)), @@ -287,9 +313,11 @@ static struct sockopt_test { .attach_type = BPF_CGROUP_GETSOCKOPT, .expected_attach_type = BPF_CGROUP_GETSOCKOPT, - .get_optlen = 64, - - .error = EFAULT_GETSOCKOPT, + .get_level = 1234, + .get_optname = 5678, + .get_optval = {}, /* the changes are ignored */ + .get_optlen = PAGE_SIZE + 1, + .error = EOPNOTSUPP_GETSOCKOPT, }, { .descr = "getsockopt: support smaller ctx->optlen", @@ -649,6 +677,45 @@ static struct sockopt_test { .error = EFAULT_SETSOCKOPT, }, { + .descr = "setsockopt: ignore >PAGE_SIZE optlen", + .insns = { + /* write 0xFF to the first optval byte */ + + /* r6 = ctx->optval */ + BPF_LDX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, + offsetof(struct bpf_sockopt, optval)), + /* r2 = ctx->optval */ + BPF_MOV64_REG(BPF_REG_2, BPF_REG_6), + /* r6 = ctx->optval + 1 */ + BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, 1), + + /* r7 = ctx->optval_end */ + BPF_LDX_MEM(BPF_DW, BPF_REG_7, BPF_REG_1, + offsetof(struct bpf_sockopt, optval_end)), + + /* if (ctx->optval + 1 <= ctx->optval_end) { */ + BPF_JMP_REG(BPF_JGT, BPF_REG_6, BPF_REG_7, 1), + /* ctx->optval[0] = 0xF0 */ + BPF_ST_MEM(BPF_B, BPF_REG_2, 0, 0xF0), + /* } */ + + BPF_MOV64_IMM(BPF_REG_0, 1), + BPF_EXIT_INSN(), + }, + .attach_type = BPF_CGROUP_SETSOCKOPT, + .expected_attach_type = BPF_CGROUP_SETSOCKOPT, + + .set_level = SOL_IP, + .set_optname = IP_TOS, + .set_optval = {}, + .set_optlen = PAGE_SIZE + 1, + + .get_level = SOL_IP, + .get_optname = IP_TOS, + .get_optval = {}, /* the changes are ignored */ + .get_optlen = 4, + }, + { .descr = "setsockopt: allow changing ctx->optlen within bounds", .insns = { /* r6 = ctx->optval */ @@ -906,6 +973,13 @@ static int run_test(int cgroup_fd, struct sockopt_test *test) } if (test->set_optlen) { + if (test->set_optlen >= PAGE_SIZE) { + int num_pages = test->set_optlen / PAGE_SIZE; + int remainder = test->set_optlen % PAGE_SIZE; + + test->set_optlen = num_pages * sysconf(_SC_PAGESIZE) + remainder; + } + err = setsockopt(sock_fd, test->set_level, test->set_optname, test->set_optval, test->set_optlen); if (err) { @@ -921,7 +995,15 @@ static int run_test(int cgroup_fd, struct sockopt_test *test) } if (test->get_optlen) { + if (test->get_optlen >= PAGE_SIZE) { + int num_pages = test->get_optlen / PAGE_SIZE; + int remainder = test->get_optlen % PAGE_SIZE; + + test->get_optlen = num_pages * sysconf(_SC_PAGESIZE) + remainder; + } + optval = malloc(test->get_optlen); + memset(optval, 0, test->get_optlen); socklen_t optlen = test->get_optlen; socklen_t expected_get_optlen = test->get_optlen_ret ?: test->get_optlen; @@ -929,6 +1011,8 @@ static int run_test(int cgroup_fd, struct sockopt_test *test) err = getsockopt(sock_fd, test->get_level, test->get_optname, optval, &optlen); if (err) { + if (errno == EOPNOTSUPP && test->error == EOPNOTSUPP_GETSOCKOPT) + goto free_optval; if (errno == EPERM && test->error == EPERM_GETSOCKOPT) goto free_optval; if (errno == EFAULT && test->error == EFAULT_GETSOCKOPT) diff --git a/tools/testing/selftests/bpf/prog_tests/sockopt_inherit.c b/tools/testing/selftests/bpf/prog_tests/sockopt_inherit.c index 60c17a8e2789..917f486db826 100644 --- a/tools/testing/selftests/bpf/prog_tests/sockopt_inherit.c +++ b/tools/testing/selftests/bpf/prog_tests/sockopt_inherit.c @@ -2,6 +2,8 @@ #include <test_progs.h> #include "cgroup_helpers.h" +#include "sockopt_inherit.skel.h" + #define SOL_CUSTOM 0xdeadbeef #define CUSTOM_INHERIT1 0 #define CUSTOM_INHERIT2 1 @@ -132,58 +134,30 @@ static int start_server(void) return fd; } -static int prog_attach(struct bpf_object *obj, int cgroup_fd, const char *title, - const char *prog_name) -{ - enum bpf_attach_type attach_type; - enum bpf_prog_type prog_type; - struct bpf_program *prog; - int err; - - err = libbpf_prog_type_by_name(title, &prog_type, &attach_type); - if (err) { - log_err("Failed to deduct types for %s BPF program", prog_name); - return -1; - } - - prog = bpf_object__find_program_by_name(obj, prog_name); - if (!prog) { - log_err("Failed to find %s BPF program", prog_name); - return -1; - } - - err = bpf_prog_attach(bpf_program__fd(prog), cgroup_fd, - attach_type, 0); - if (err) { - log_err("Failed to attach %s BPF program", prog_name); - return -1; - } - - return 0; -} - static void run_test(int cgroup_fd) { + struct bpf_link *link_getsockopt = NULL; + struct bpf_link *link_setsockopt = NULL; int server_fd = -1, client_fd; - struct bpf_object *obj; + struct sockopt_inherit *obj; void *server_err; pthread_t tid; int err; - obj = bpf_object__open_file("sockopt_inherit.bpf.o", NULL); - if (!ASSERT_OK_PTR(obj, "obj_open")) + obj = sockopt_inherit__open_and_load(); + if (!ASSERT_OK_PTR(obj, "skel-load")) return; - err = bpf_object__load(obj); - if (!ASSERT_OK(err, "obj_load")) - goto close_bpf_object; + obj->bss->page_size = sysconf(_SC_PAGESIZE); - err = prog_attach(obj, cgroup_fd, "cgroup/getsockopt", "_getsockopt"); - if (!ASSERT_OK(err, "prog_attach _getsockopt")) + link_getsockopt = bpf_program__attach_cgroup(obj->progs._getsockopt, + cgroup_fd); + if (!ASSERT_OK_PTR(link_getsockopt, "cg-attach-getsockopt")) goto close_bpf_object; - err = prog_attach(obj, cgroup_fd, "cgroup/setsockopt", "_setsockopt"); - if (!ASSERT_OK(err, "prog_attach _setsockopt")) + link_setsockopt = bpf_program__attach_cgroup(obj->progs._setsockopt, + cgroup_fd); + if (!ASSERT_OK_PTR(link_setsockopt, "cg-attach-setsockopt")) goto close_bpf_object; server_fd = start_server(); @@ -217,7 +191,10 @@ static void run_test(int cgroup_fd) close_server_fd: close(server_fd); close_bpf_object: - bpf_object__close(obj); + bpf_link__destroy(link_getsockopt); + bpf_link__destroy(link_setsockopt); + + sockopt_inherit__destroy(obj); } void test_sockopt_inherit(void) diff --git a/tools/testing/selftests/bpf/prog_tests/sockopt_multi.c b/tools/testing/selftests/bpf/prog_tests/sockopt_multi.c index 7f5659349011..759bbb6f8c5f 100644 --- a/tools/testing/selftests/bpf/prog_tests/sockopt_multi.c +++ b/tools/testing/selftests/bpf/prog_tests/sockopt_multi.c @@ -2,61 +2,13 @@ #include <test_progs.h> #include "cgroup_helpers.h" -static int prog_attach(struct bpf_object *obj, int cgroup_fd, const char *title, const char *name) -{ - enum bpf_attach_type attach_type; - enum bpf_prog_type prog_type; - struct bpf_program *prog; - int err; - - err = libbpf_prog_type_by_name(title, &prog_type, &attach_type); - if (err) { - log_err("Failed to deduct types for %s BPF program", title); - return -1; - } - - prog = bpf_object__find_program_by_name(obj, name); - if (!prog) { - log_err("Failed to find %s BPF program", name); - return -1; - } - - err = bpf_prog_attach(bpf_program__fd(prog), cgroup_fd, - attach_type, BPF_F_ALLOW_MULTI); - if (err) { - log_err("Failed to attach %s BPF program", name); - return -1; - } - - return 0; -} +#include "sockopt_multi.skel.h" -static int prog_detach(struct bpf_object *obj, int cgroup_fd, const char *title, const char *name) -{ - enum bpf_attach_type attach_type; - enum bpf_prog_type prog_type; - struct bpf_program *prog; - int err; - - err = libbpf_prog_type_by_name(title, &prog_type, &attach_type); - if (err) - return -1; - - prog = bpf_object__find_program_by_name(obj, name); - if (!prog) - return -1; - - err = bpf_prog_detach2(bpf_program__fd(prog), cgroup_fd, - attach_type); - if (err) - return -1; - - return 0; -} - -static int run_getsockopt_test(struct bpf_object *obj, int cg_parent, +static int run_getsockopt_test(struct sockopt_multi *obj, int cg_parent, int cg_child, int sock_fd) { + struct bpf_link *link_parent = NULL; + struct bpf_link *link_child = NULL; socklen_t optlen; __u8 buf; int err; @@ -89,8 +41,9 @@ static int run_getsockopt_test(struct bpf_object *obj, int cg_parent, * - child: 0x80 -> 0x90 */ - err = prog_attach(obj, cg_child, "cgroup/getsockopt", "_getsockopt_child"); - if (err) + link_child = bpf_program__attach_cgroup(obj->progs._getsockopt_child, + cg_child); + if (!ASSERT_OK_PTR(link_child, "cg-attach-getsockopt_child")) goto detach; buf = 0x00; @@ -113,8 +66,9 @@ static int run_getsockopt_test(struct bpf_object *obj, int cg_parent, * - parent: 0x90 -> 0xA0 */ - err = prog_attach(obj, cg_parent, "cgroup/getsockopt", "_getsockopt_parent"); - if (err) + link_parent = bpf_program__attach_cgroup(obj->progs._getsockopt_parent, + cg_parent); + if (!ASSERT_OK_PTR(link_parent, "cg-attach-getsockopt_parent")) goto detach; buf = 0x00; @@ -157,11 +111,8 @@ static int run_getsockopt_test(struct bpf_object *obj, int cg_parent, * - parent: unexpected 0x40, EPERM */ - err = prog_detach(obj, cg_child, "cgroup/getsockopt", "_getsockopt_child"); - if (err) { - log_err("Failed to detach child program"); - goto detach; - } + bpf_link__destroy(link_child); + link_child = NULL; buf = 0x00; optlen = 1; @@ -198,15 +149,17 @@ static int run_getsockopt_test(struct bpf_object *obj, int cg_parent, } detach: - prog_detach(obj, cg_child, "cgroup/getsockopt", "_getsockopt_child"); - prog_detach(obj, cg_parent, "cgroup/getsockopt", "_getsockopt_parent"); + bpf_link__destroy(link_child); + bpf_link__destroy(link_parent); return err; } -static int run_setsockopt_test(struct bpf_object *obj, int cg_parent, +static int run_setsockopt_test(struct sockopt_multi *obj, int cg_parent, int cg_child, int sock_fd) { + struct bpf_link *link_parent = NULL; + struct bpf_link *link_child = NULL; socklen_t optlen; __u8 buf; int err; @@ -236,8 +189,9 @@ static int run_setsockopt_test(struct bpf_object *obj, int cg_parent, /* Attach child program and make sure it adds 0x10. */ - err = prog_attach(obj, cg_child, "cgroup/setsockopt", "_setsockopt"); - if (err) + link_child = bpf_program__attach_cgroup(obj->progs._setsockopt, + cg_child); + if (!ASSERT_OK_PTR(link_child, "cg-attach-setsockopt_child")) goto detach; buf = 0x80; @@ -263,8 +217,9 @@ static int run_setsockopt_test(struct bpf_object *obj, int cg_parent, /* Attach parent program and make sure it adds another 0x10. */ - err = prog_attach(obj, cg_parent, "cgroup/setsockopt", "_setsockopt"); - if (err) + link_parent = bpf_program__attach_cgroup(obj->progs._setsockopt, + cg_parent); + if (!ASSERT_OK_PTR(link_parent, "cg-attach-setsockopt_parent")) goto detach; buf = 0x80; @@ -289,8 +244,8 @@ static int run_setsockopt_test(struct bpf_object *obj, int cg_parent, } detach: - prog_detach(obj, cg_child, "cgroup/setsockopt", "_setsockopt"); - prog_detach(obj, cg_parent, "cgroup/setsockopt", "_setsockopt"); + bpf_link__destroy(link_child); + bpf_link__destroy(link_parent); return err; } @@ -298,9 +253,8 @@ detach: void test_sockopt_multi(void) { int cg_parent = -1, cg_child = -1; - struct bpf_object *obj = NULL; + struct sockopt_multi *obj = NULL; int sock_fd = -1; - int err = -1; cg_parent = test__join_cgroup("/parent"); if (!ASSERT_GE(cg_parent, 0, "join_cgroup /parent")) @@ -310,13 +264,11 @@ void test_sockopt_multi(void) if (!ASSERT_GE(cg_child, 0, "join_cgroup /parent/child")) goto out; - obj = bpf_object__open_file("sockopt_multi.bpf.o", NULL); - if (!ASSERT_OK_PTR(obj, "obj_load")) + obj = sockopt_multi__open_and_load(); + if (!ASSERT_OK_PTR(obj, "skel-load")) goto out; - err = bpf_object__load(obj); - if (!ASSERT_OK(err, "obj_load")) - goto out; + obj->bss->page_size = sysconf(_SC_PAGESIZE); sock_fd = socket(AF_INET, SOCK_STREAM, 0); if (!ASSERT_GE(sock_fd, 0, "socket")) @@ -327,7 +279,7 @@ void test_sockopt_multi(void) out: close(sock_fd); - bpf_object__close(obj); + sockopt_multi__destroy(obj); close(cg_child); close(cg_parent); } diff --git a/tools/testing/selftests/bpf/prog_tests/sockopt_qos_to_cc.c b/tools/testing/selftests/bpf/prog_tests/sockopt_qos_to_cc.c index 6b53b3cb8dad..6b2d300e9fd4 100644 --- a/tools/testing/selftests/bpf/prog_tests/sockopt_qos_to_cc.c +++ b/tools/testing/selftests/bpf/prog_tests/sockopt_qos_to_cc.c @@ -42,6 +42,8 @@ void test_sockopt_qos_to_cc(void) if (!ASSERT_OK_PTR(skel, "skel")) goto done; + skel->bss->page_size = sysconf(_SC_PAGESIZE); + sock_fd = socket(AF_INET6, SOCK_STREAM, 0); if (!ASSERT_GE(sock_fd, 0, "v6 socket open")) goto done; diff --git a/tools/testing/selftests/bpf/prog_tests/task_under_cgroup.c b/tools/testing/selftests/bpf/prog_tests/task_under_cgroup.c new file mode 100644 index 000000000000..4224727fb364 --- /dev/null +++ b/tools/testing/selftests/bpf/prog_tests/task_under_cgroup.c @@ -0,0 +1,53 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (c) 2023 Bytedance */ + +#include <sys/syscall.h> +#include <test_progs.h> +#include <cgroup_helpers.h> +#include "test_task_under_cgroup.skel.h" + +#define FOO "/foo" + +void test_task_under_cgroup(void) +{ + struct test_task_under_cgroup *skel; + int ret, foo; + pid_t pid; + + foo = test__join_cgroup(FOO); + if (!ASSERT_OK(foo < 0, "cgroup_join_foo")) + return; + + skel = test_task_under_cgroup__open(); + if (!ASSERT_OK_PTR(skel, "test_task_under_cgroup__open")) + goto cleanup; + + skel->rodata->local_pid = getpid(); + skel->bss->remote_pid = getpid(); + skel->rodata->cgid = get_cgroup_id(FOO); + + ret = test_task_under_cgroup__load(skel); + if (!ASSERT_OK(ret, "test_task_under_cgroup__load")) + goto cleanup; + + ret = test_task_under_cgroup__attach(skel); + if (!ASSERT_OK(ret, "test_task_under_cgroup__attach")) + goto cleanup; + + pid = fork(); + if (pid == 0) + exit(0); + + ret = (pid == -1); + if (ASSERT_OK(ret, "fork process")) + wait(NULL); + + test_task_under_cgroup__detach(skel); + + ASSERT_NEQ(skel->bss->remote_pid, skel->rodata->local_pid, + "test task_under_cgroup"); + +cleanup: + test_task_under_cgroup__destroy(skel); + close(foo); +} diff --git a/tools/testing/selftests/bpf/prog_tests/verifier.c b/tools/testing/selftests/bpf/prog_tests/verifier.c index 2497716ee379..531621adef42 100644 --- a/tools/testing/selftests/bpf/prog_tests/verifier.c +++ b/tools/testing/selftests/bpf/prog_tests/verifier.c @@ -55,6 +55,7 @@ #include "verifier_spill_fill.skel.h" #include "verifier_spin_lock.skel.h" #include "verifier_stack_ptr.skel.h" +#include "verifier_subprog_precision.skel.h" #include "verifier_subreg.skel.h" #include "verifier_uninit.skel.h" #include "verifier_unpriv.skel.h" @@ -154,6 +155,7 @@ void test_verifier_sock(void) { RUN(verifier_sock); } void test_verifier_spill_fill(void) { RUN(verifier_spill_fill); } void test_verifier_spin_lock(void) { RUN(verifier_spin_lock); } void test_verifier_stack_ptr(void) { RUN(verifier_stack_ptr); } +void test_verifier_subprog_precision(void) { RUN(verifier_subprog_precision); } void test_verifier_subreg(void) { RUN(verifier_subreg); } void test_verifier_uninit(void) { RUN(verifier_uninit); } void test_verifier_unpriv(void) { RUN(verifier_unpriv); } diff --git a/tools/testing/selftests/bpf/progs/bpf_misc.h b/tools/testing/selftests/bpf/progs/bpf_misc.h index d3c1217ba79a..38a57a2e70db 100644 --- a/tools/testing/selftests/bpf/progs/bpf_misc.h +++ b/tools/testing/selftests/bpf/progs/bpf_misc.h @@ -86,6 +86,10 @@ #define POINTER_VALUE 0xcafe4all #define TEST_DATA_LEN 64 +#ifndef __used +#define __used __attribute__((used)) +#endif + #if defined(__TARGET_ARCH_x86) #define SYSCALL_WRAPPER 1 #define SYS_PREFIX "__x64_" diff --git a/tools/testing/selftests/bpf/progs/cgroup_getset_retval_getsockopt.c b/tools/testing/selftests/bpf/progs/cgroup_getset_retval_getsockopt.c index b2a409e6382a..932b8ecd4ae3 100644 --- a/tools/testing/selftests/bpf/progs/cgroup_getset_retval_getsockopt.c +++ b/tools/testing/selftests/bpf/progs/cgroup_getset_retval_getsockopt.c @@ -12,6 +12,7 @@ __u32 invocations = 0; __u32 assertion_error = 0; __u32 retval_value = 0; __u32 ctx_retval_value = 0; +__u32 page_size = 0; SEC("cgroup/getsockopt") int get_retval(struct bpf_sockopt *ctx) @@ -20,6 +21,10 @@ int get_retval(struct bpf_sockopt *ctx) ctx_retval_value = ctx->retval; __sync_fetch_and_add(&invocations, 1); + /* optval larger than PAGE_SIZE use kernel's buffer. */ + if (ctx->optlen > page_size) + ctx->optlen = 0; + return 1; } @@ -31,6 +36,10 @@ int set_eisconn(struct bpf_sockopt *ctx) if (bpf_set_retval(-EISCONN)) assertion_error = 1; + /* optval larger than PAGE_SIZE use kernel's buffer. */ + if (ctx->optlen > page_size) + ctx->optlen = 0; + return 1; } @@ -41,5 +50,9 @@ int clear_retval(struct bpf_sockopt *ctx) ctx->retval = 0; + /* optval larger than PAGE_SIZE use kernel's buffer. */ + if (ctx->optlen > page_size) + ctx->optlen = 0; + return 1; } diff --git a/tools/testing/selftests/bpf/progs/cgroup_getset_retval_setsockopt.c b/tools/testing/selftests/bpf/progs/cgroup_getset_retval_setsockopt.c index d6e5903e06ba..b7fa8804e19d 100644 --- a/tools/testing/selftests/bpf/progs/cgroup_getset_retval_setsockopt.c +++ b/tools/testing/selftests/bpf/progs/cgroup_getset_retval_setsockopt.c @@ -11,6 +11,7 @@ __u32 invocations = 0; __u32 assertion_error = 0; __u32 retval_value = 0; +__u32 page_size = 0; SEC("cgroup/setsockopt") int get_retval(struct bpf_sockopt *ctx) @@ -18,6 +19,10 @@ int get_retval(struct bpf_sockopt *ctx) retval_value = bpf_get_retval(); __sync_fetch_and_add(&invocations, 1); + /* optval larger than PAGE_SIZE use kernel's buffer. */ + if (ctx->optlen > page_size) + ctx->optlen = 0; + return 1; } @@ -29,6 +34,10 @@ int set_eunatch(struct bpf_sockopt *ctx) if (bpf_set_retval(-EUNATCH)) assertion_error = 1; + /* optval larger than PAGE_SIZE use kernel's buffer. */ + if (ctx->optlen > page_size) + ctx->optlen = 0; + return 0; } @@ -40,6 +49,10 @@ int set_eisconn(struct bpf_sockopt *ctx) if (bpf_set_retval(-EISCONN)) assertion_error = 1; + /* optval larger than PAGE_SIZE use kernel's buffer. */ + if (ctx->optlen > page_size) + ctx->optlen = 0; + return 0; } @@ -48,5 +61,9 @@ int legacy_eperm(struct bpf_sockopt *ctx) { __sync_fetch_and_add(&invocations, 1); + /* optval larger than PAGE_SIZE use kernel's buffer. */ + if (ctx->optlen > page_size) + ctx->optlen = 0; + return 0; } diff --git a/tools/testing/selftests/bpf/progs/dynptr_fail.c b/tools/testing/selftests/bpf/progs/dynptr_fail.c index 759eb5c245cd..c2f0e18af951 100644 --- a/tools/testing/selftests/bpf/progs/dynptr_fail.c +++ b/tools/testing/selftests/bpf/progs/dynptr_fail.c @@ -1378,3 +1378,310 @@ int invalid_slice_rdwr_rdonly(struct __sk_buff *skb) return 0; } + +/* bpf_dynptr_adjust can only be called on initialized dynptrs */ +SEC("?raw_tp") +__failure __msg("Expected an initialized dynptr as arg #1") +int dynptr_adjust_invalid(void *ctx) +{ + struct bpf_dynptr ptr; + + /* this should fail */ + bpf_dynptr_adjust(&ptr, 1, 2); + + return 0; +} + +/* bpf_dynptr_is_null can only be called on initialized dynptrs */ +SEC("?raw_tp") +__failure __msg("Expected an initialized dynptr as arg #1") +int dynptr_is_null_invalid(void *ctx) +{ + struct bpf_dynptr ptr; + + /* this should fail */ + bpf_dynptr_is_null(&ptr); + + return 0; +} + +/* bpf_dynptr_is_rdonly can only be called on initialized dynptrs */ +SEC("?raw_tp") +__failure __msg("Expected an initialized dynptr as arg #1") +int dynptr_is_rdonly_invalid(void *ctx) +{ + struct bpf_dynptr ptr; + + /* this should fail */ + bpf_dynptr_is_rdonly(&ptr); + + return 0; +} + +/* bpf_dynptr_size can only be called on initialized dynptrs */ +SEC("?raw_tp") +__failure __msg("Expected an initialized dynptr as arg #1") +int dynptr_size_invalid(void *ctx) +{ + struct bpf_dynptr ptr; + + /* this should fail */ + bpf_dynptr_size(&ptr); + + return 0; +} + +/* Only initialized dynptrs can be cloned */ +SEC("?raw_tp") +__failure __msg("Expected an initialized dynptr as arg #1") +int clone_invalid1(void *ctx) +{ + struct bpf_dynptr ptr1; + struct bpf_dynptr ptr2; + + /* this should fail */ + bpf_dynptr_clone(&ptr1, &ptr2); + + return 0; +} + +/* Can't overwrite an existing dynptr when cloning */ +SEC("?xdp") +__failure __msg("cannot overwrite referenced dynptr") +int clone_invalid2(struct xdp_md *xdp) +{ + struct bpf_dynptr ptr1; + struct bpf_dynptr clone; + + bpf_dynptr_from_xdp(xdp, 0, &ptr1); + + bpf_ringbuf_reserve_dynptr(&ringbuf, 64, 0, &clone); + + /* this should fail */ + bpf_dynptr_clone(&ptr1, &clone); + + bpf_ringbuf_submit_dynptr(&clone, 0); + + return 0; +} + +/* Invalidating a dynptr should invalidate its clones */ +SEC("?raw_tp") +__failure __msg("Expected an initialized dynptr as arg #3") +int clone_invalidate1(void *ctx) +{ + struct bpf_dynptr clone; + struct bpf_dynptr ptr; + char read_data[64]; + + bpf_ringbuf_reserve_dynptr(&ringbuf, val, 0, &ptr); + + bpf_dynptr_clone(&ptr, &clone); + + bpf_ringbuf_submit_dynptr(&ptr, 0); + + /* this should fail */ + bpf_dynptr_read(read_data, sizeof(read_data), &clone, 0, 0); + + return 0; +} + +/* Invalidating a dynptr should invalidate its parent */ +SEC("?raw_tp") +__failure __msg("Expected an initialized dynptr as arg #3") +int clone_invalidate2(void *ctx) +{ + struct bpf_dynptr ptr; + struct bpf_dynptr clone; + char read_data[64]; + + bpf_ringbuf_reserve_dynptr(&ringbuf, val, 0, &ptr); + + bpf_dynptr_clone(&ptr, &clone); + + bpf_ringbuf_submit_dynptr(&clone, 0); + + /* this should fail */ + bpf_dynptr_read(read_data, sizeof(read_data), &ptr, 0, 0); + + return 0; +} + +/* Invalidating a dynptr should invalidate its siblings */ +SEC("?raw_tp") +__failure __msg("Expected an initialized dynptr as arg #3") +int clone_invalidate3(void *ctx) +{ + struct bpf_dynptr ptr; + struct bpf_dynptr clone1; + struct bpf_dynptr clone2; + char read_data[64]; + + bpf_ringbuf_reserve_dynptr(&ringbuf, val, 0, &ptr); + + bpf_dynptr_clone(&ptr, &clone1); + + bpf_dynptr_clone(&ptr, &clone2); + + bpf_ringbuf_submit_dynptr(&clone2, 0); + + /* this should fail */ + bpf_dynptr_read(read_data, sizeof(read_data), &clone1, 0, 0); + + return 0; +} + +/* Invalidating a dynptr should invalidate any data slices + * of its clones + */ +SEC("?raw_tp") +__failure __msg("invalid mem access 'scalar'") +int clone_invalidate4(void *ctx) +{ + struct bpf_dynptr ptr; + struct bpf_dynptr clone; + int *data; + + bpf_ringbuf_reserve_dynptr(&ringbuf, val, 0, &ptr); + + bpf_dynptr_clone(&ptr, &clone); + data = bpf_dynptr_data(&clone, 0, sizeof(val)); + if (!data) + return 0; + + bpf_ringbuf_submit_dynptr(&ptr, 0); + + /* this should fail */ + *data = 123; + + return 0; +} + +/* Invalidating a dynptr should invalidate any data slices + * of its parent + */ +SEC("?raw_tp") +__failure __msg("invalid mem access 'scalar'") +int clone_invalidate5(void *ctx) +{ + struct bpf_dynptr ptr; + struct bpf_dynptr clone; + int *data; + + bpf_ringbuf_reserve_dynptr(&ringbuf, val, 0, &ptr); + data = bpf_dynptr_data(&ptr, 0, sizeof(val)); + if (!data) + return 0; + + bpf_dynptr_clone(&ptr, &clone); + + bpf_ringbuf_submit_dynptr(&clone, 0); + + /* this should fail */ + *data = 123; + + return 0; +} + +/* Invalidating a dynptr should invalidate any data slices + * of its sibling + */ +SEC("?raw_tp") +__failure __msg("invalid mem access 'scalar'") +int clone_invalidate6(void *ctx) +{ + struct bpf_dynptr ptr; + struct bpf_dynptr clone1; + struct bpf_dynptr clone2; + int *data; + + bpf_ringbuf_reserve_dynptr(&ringbuf, val, 0, &ptr); + + bpf_dynptr_clone(&ptr, &clone1); + + bpf_dynptr_clone(&ptr, &clone2); + + data = bpf_dynptr_data(&clone1, 0, sizeof(val)); + if (!data) + return 0; + + bpf_ringbuf_submit_dynptr(&clone2, 0); + + /* this should fail */ + *data = 123; + + return 0; +} + +/* A skb clone's data slices should be invalid anytime packet data changes */ +SEC("?tc") +__failure __msg("invalid mem access 'scalar'") +int clone_skb_packet_data(struct __sk_buff *skb) +{ + char buffer[sizeof(__u32)] = {}; + struct bpf_dynptr clone; + struct bpf_dynptr ptr; + __u32 *data; + + bpf_dynptr_from_skb(skb, 0, &ptr); + + bpf_dynptr_clone(&ptr, &clone); + data = bpf_dynptr_slice_rdwr(&clone, 0, buffer, sizeof(buffer)); + if (!data) + return XDP_DROP; + + if (bpf_skb_pull_data(skb, skb->len)) + return SK_DROP; + + /* this should fail */ + *data = 123; + + return 0; +} + +/* A xdp clone's data slices should be invalid anytime packet data changes */ +SEC("?xdp") +__failure __msg("invalid mem access 'scalar'") +int clone_xdp_packet_data(struct xdp_md *xdp) +{ + char buffer[sizeof(__u32)] = {}; + struct bpf_dynptr clone; + struct bpf_dynptr ptr; + struct ethhdr *hdr; + __u32 *data; + + bpf_dynptr_from_xdp(xdp, 0, &ptr); + + bpf_dynptr_clone(&ptr, &clone); + data = bpf_dynptr_slice_rdwr(&clone, 0, buffer, sizeof(buffer)); + if (!data) + return XDP_DROP; + + if (bpf_xdp_adjust_head(xdp, 0 - (int)sizeof(*hdr))) + return XDP_DROP; + + /* this should fail */ + *data = 123; + + return 0; +} + +/* Buffers that are provided must be sufficiently long */ +SEC("?cgroup_skb/egress") +__failure __msg("memory, len pair leads to invalid memory access") +int test_dynptr_skb_small_buff(struct __sk_buff *skb) +{ + struct bpf_dynptr ptr; + char buffer[8] = {}; + __u64 *data; + + if (bpf_dynptr_from_skb(skb, 0, &ptr)) { + err = 1; + return 1; + } + + /* This may return NULL. SKB may require a buffer */ + data = bpf_dynptr_slice(&ptr, 0, buffer, 9); + + return !!data; +} diff --git a/tools/testing/selftests/bpf/progs/dynptr_success.c b/tools/testing/selftests/bpf/progs/dynptr_success.c index b2fa6c47ecc0..0c053976f8f9 100644 --- a/tools/testing/selftests/bpf/progs/dynptr_success.c +++ b/tools/testing/selftests/bpf/progs/dynptr_success.c @@ -207,3 +207,339 @@ int test_dynptr_skb_data(struct __sk_buff *skb) return 1; } + +SEC("tp/syscalls/sys_enter_nanosleep") +int test_adjust(void *ctx) +{ + struct bpf_dynptr ptr; + __u32 bytes = 64; + __u32 off = 10; + __u32 trim = 15; + + if (bpf_get_current_pid_tgid() >> 32 != pid) + return 0; + + err = bpf_ringbuf_reserve_dynptr(&ringbuf, bytes, 0, &ptr); + if (err) { + err = 1; + goto done; + } + + if (bpf_dynptr_size(&ptr) != bytes) { + err = 2; + goto done; + } + + /* Advance the dynptr by off */ + err = bpf_dynptr_adjust(&ptr, off, bpf_dynptr_size(&ptr)); + if (err) { + err = 3; + goto done; + } + + if (bpf_dynptr_size(&ptr) != bytes - off) { + err = 4; + goto done; + } + + /* Trim the dynptr */ + err = bpf_dynptr_adjust(&ptr, off, 15); + if (err) { + err = 5; + goto done; + } + + /* Check that the size was adjusted correctly */ + if (bpf_dynptr_size(&ptr) != trim - off) { + err = 6; + goto done; + } + +done: + bpf_ringbuf_discard_dynptr(&ptr, 0); + return 0; +} + +SEC("tp/syscalls/sys_enter_nanosleep") +int test_adjust_err(void *ctx) +{ + char write_data[45] = "hello there, world!!"; + struct bpf_dynptr ptr; + __u32 size = 64; + __u32 off = 20; + + if (bpf_get_current_pid_tgid() >> 32 != pid) + return 0; + + if (bpf_ringbuf_reserve_dynptr(&ringbuf, size, 0, &ptr)) { + err = 1; + goto done; + } + + /* Check that start can't be greater than end */ + if (bpf_dynptr_adjust(&ptr, 5, 1) != -EINVAL) { + err = 2; + goto done; + } + + /* Check that start can't be greater than size */ + if (bpf_dynptr_adjust(&ptr, size + 1, size + 1) != -ERANGE) { + err = 3; + goto done; + } + + /* Check that end can't be greater than size */ + if (bpf_dynptr_adjust(&ptr, 0, size + 1) != -ERANGE) { + err = 4; + goto done; + } + + if (bpf_dynptr_adjust(&ptr, off, size)) { + err = 5; + goto done; + } + + /* Check that you can't write more bytes than available into the dynptr + * after you've adjusted it + */ + if (bpf_dynptr_write(&ptr, 0, &write_data, sizeof(write_data), 0) != -E2BIG) { + err = 6; + goto done; + } + + /* Check that even after adjusting, submitting/discarding + * a ringbuf dynptr works + */ + bpf_ringbuf_submit_dynptr(&ptr, 0); + return 0; + +done: + bpf_ringbuf_discard_dynptr(&ptr, 0); + return 0; +} + +SEC("tp/syscalls/sys_enter_nanosleep") +int test_zero_size_dynptr(void *ctx) +{ + char write_data = 'x', read_data; + struct bpf_dynptr ptr; + __u32 size = 64; + + if (bpf_get_current_pid_tgid() >> 32 != pid) + return 0; + + if (bpf_ringbuf_reserve_dynptr(&ringbuf, size, 0, &ptr)) { + err = 1; + goto done; + } + + /* After this, the dynptr has a size of 0 */ + if (bpf_dynptr_adjust(&ptr, size, size)) { + err = 2; + goto done; + } + + /* Test that reading + writing non-zero bytes is not ok */ + if (bpf_dynptr_read(&read_data, sizeof(read_data), &ptr, 0, 0) != -E2BIG) { + err = 3; + goto done; + } + + if (bpf_dynptr_write(&ptr, 0, &write_data, sizeof(write_data), 0) != -E2BIG) { + err = 4; + goto done; + } + + /* Test that reading + writing 0 bytes from a 0-size dynptr is ok */ + if (bpf_dynptr_read(&read_data, 0, &ptr, 0, 0)) { + err = 5; + goto done; + } + + if (bpf_dynptr_write(&ptr, 0, &write_data, 0, 0)) { + err = 6; + goto done; + } + + err = 0; + +done: + bpf_ringbuf_discard_dynptr(&ptr, 0); + return 0; +} + +SEC("tp/syscalls/sys_enter_nanosleep") +int test_dynptr_is_null(void *ctx) +{ + struct bpf_dynptr ptr1; + struct bpf_dynptr ptr2; + __u64 size = 4; + + if (bpf_get_current_pid_tgid() >> 32 != pid) + return 0; + + /* Pass in invalid flags, get back an invalid dynptr */ + if (bpf_ringbuf_reserve_dynptr(&ringbuf, size, 123, &ptr1) != -EINVAL) { + err = 1; + goto exit_early; + } + + /* Test that the invalid dynptr is null */ + if (!bpf_dynptr_is_null(&ptr1)) { + err = 2; + goto exit_early; + } + + /* Get a valid dynptr */ + if (bpf_ringbuf_reserve_dynptr(&ringbuf, size, 0, &ptr2)) { + err = 3; + goto exit; + } + + /* Test that the valid dynptr is not null */ + if (bpf_dynptr_is_null(&ptr2)) { + err = 4; + goto exit; + } + +exit: + bpf_ringbuf_discard_dynptr(&ptr2, 0); +exit_early: + bpf_ringbuf_discard_dynptr(&ptr1, 0); + return 0; +} + +SEC("cgroup_skb/egress") +int test_dynptr_is_rdonly(struct __sk_buff *skb) +{ + struct bpf_dynptr ptr1; + struct bpf_dynptr ptr2; + struct bpf_dynptr ptr3; + + /* Pass in invalid flags, get back an invalid dynptr */ + if (bpf_dynptr_from_skb(skb, 123, &ptr1) != -EINVAL) { + err = 1; + return 0; + } + + /* Test that an invalid dynptr is_rdonly returns false */ + if (bpf_dynptr_is_rdonly(&ptr1)) { + err = 2; + return 0; + } + + /* Get a read-only dynptr */ + if (bpf_dynptr_from_skb(skb, 0, &ptr2)) { + err = 3; + return 0; + } + + /* Test that the dynptr is read-only */ + if (!bpf_dynptr_is_rdonly(&ptr2)) { + err = 4; + return 0; + } + + /* Get a read-writeable dynptr */ + if (bpf_ringbuf_reserve_dynptr(&ringbuf, 64, 0, &ptr3)) { + err = 5; + goto done; + } + + /* Test that the dynptr is read-only */ + if (bpf_dynptr_is_rdonly(&ptr3)) { + err = 6; + goto done; + } + +done: + bpf_ringbuf_discard_dynptr(&ptr3, 0); + return 0; +} + +SEC("cgroup_skb/egress") +int test_dynptr_clone(struct __sk_buff *skb) +{ + struct bpf_dynptr ptr1; + struct bpf_dynptr ptr2; + __u32 off = 2, size; + + /* Get a dynptr */ + if (bpf_dynptr_from_skb(skb, 0, &ptr1)) { + err = 1; + return 0; + } + + if (bpf_dynptr_adjust(&ptr1, off, bpf_dynptr_size(&ptr1))) { + err = 2; + return 0; + } + + /* Clone the dynptr */ + if (bpf_dynptr_clone(&ptr1, &ptr2)) { + err = 3; + return 0; + } + + size = bpf_dynptr_size(&ptr1); + + /* Check that the clone has the same size and rd-only */ + if (bpf_dynptr_size(&ptr2) != size) { + err = 4; + return 0; + } + + if (bpf_dynptr_is_rdonly(&ptr2) != bpf_dynptr_is_rdonly(&ptr1)) { + err = 5; + return 0; + } + + /* Advance and trim the original dynptr */ + bpf_dynptr_adjust(&ptr1, 5, 5); + + /* Check that only original dynptr was affected, and the clone wasn't */ + if (bpf_dynptr_size(&ptr2) != size) { + err = 6; + return 0; + } + + return 0; +} + +SEC("?cgroup_skb/egress") +int test_dynptr_skb_no_buff(struct __sk_buff *skb) +{ + struct bpf_dynptr ptr; + __u64 *data; + + if (bpf_dynptr_from_skb(skb, 0, &ptr)) { + err = 1; + return 1; + } + + /* This may return NULL. SKB may require a buffer */ + data = bpf_dynptr_slice(&ptr, 0, NULL, 1); + + return !!data; +} + +SEC("?cgroup_skb/egress") +int test_dynptr_skb_strcmp(struct __sk_buff *skb) +{ + struct bpf_dynptr ptr; + char *data; + + if (bpf_dynptr_from_skb(skb, 0, &ptr)) { + err = 1; + return 1; + } + + /* This may return NULL. SKB may require a buffer */ + data = bpf_dynptr_slice(&ptr, 0, NULL, 10); + if (data) { + bpf_strncmp(data, 10, "foo"); + return 1; + } + + return 1; +} diff --git a/tools/testing/selftests/bpf/progs/iters.c b/tools/testing/selftests/bpf/progs/iters.c index be16143ae292..6b9b3c56f009 100644 --- a/tools/testing/selftests/bpf/progs/iters.c +++ b/tools/testing/selftests/bpf/progs/iters.c @@ -651,29 +651,25 @@ int iter_stack_array_loop(const void *ctx) return sum; } -#define ARR_SZ 16 - -static __noinline void fill(struct bpf_iter_num *it, int *arr, int mul) +static __noinline void fill(struct bpf_iter_num *it, int *arr, __u32 n, int mul) { - int *t; - __u64 i; + int *t, i; while ((t = bpf_iter_num_next(it))) { i = *t; - if (i >= ARR_SZ) + if (i >= n) break; arr[i] = i * mul; } } -static __noinline int sum(struct bpf_iter_num *it, int *arr) +static __noinline int sum(struct bpf_iter_num *it, int *arr, __u32 n) { - int *t, sum = 0;; - __u64 i; + int *t, i, sum = 0;; while ((t = bpf_iter_num_next(it))) { i = *t; - if (i >= ARR_SZ) + if (i >= n) break; sum += arr[i]; } @@ -685,7 +681,7 @@ SEC("raw_tp") __success int iter_pass_iter_ptr_to_subprog(const void *ctx) { - int arr1[ARR_SZ], arr2[ARR_SZ]; + int arr1[16], arr2[32]; struct bpf_iter_num it; int n, sum1, sum2; @@ -694,25 +690,25 @@ int iter_pass_iter_ptr_to_subprog(const void *ctx) /* fill arr1 */ n = ARRAY_SIZE(arr1); bpf_iter_num_new(&it, 0, n); - fill(&it, arr1, 2); + fill(&it, arr1, n, 2); bpf_iter_num_destroy(&it); /* fill arr2 */ n = ARRAY_SIZE(arr2); bpf_iter_num_new(&it, 0, n); - fill(&it, arr2, 10); + fill(&it, arr2, n, 10); bpf_iter_num_destroy(&it); /* sum arr1 */ n = ARRAY_SIZE(arr1); bpf_iter_num_new(&it, 0, n); - sum1 = sum(&it, arr1); + sum1 = sum(&it, arr1, n); bpf_iter_num_destroy(&it); /* sum arr2 */ n = ARRAY_SIZE(arr2); bpf_iter_num_new(&it, 0, n); - sum2 = sum(&it, arr2); + sum2 = sum(&it, arr2, n); bpf_iter_num_destroy(&it); bpf_printk("sum1=%d, sum2=%d", sum1, sum2); diff --git a/tools/testing/selftests/bpf/progs/sockopt_inherit.c b/tools/testing/selftests/bpf/progs/sockopt_inherit.c index 9fb241b97291..c8f59caa4639 100644 --- a/tools/testing/selftests/bpf/progs/sockopt_inherit.c +++ b/tools/testing/selftests/bpf/progs/sockopt_inherit.c @@ -9,6 +9,8 @@ char _license[] SEC("license") = "GPL"; #define CUSTOM_INHERIT2 1 #define CUSTOM_LISTENER 2 +__u32 page_size = 0; + struct sockopt_inherit { __u8 val; }; @@ -55,7 +57,7 @@ int _getsockopt(struct bpf_sockopt *ctx) __u8 *optval = ctx->optval; if (ctx->level != SOL_CUSTOM) - return 1; /* only interested in SOL_CUSTOM */ + goto out; /* only interested in SOL_CUSTOM */ if (optval + 1 > optval_end) return 0; /* EPERM, bounds check */ @@ -70,6 +72,12 @@ int _getsockopt(struct bpf_sockopt *ctx) ctx->optlen = 1; return 1; + +out: + /* optval larger than PAGE_SIZE use kernel's buffer. */ + if (ctx->optlen > page_size) + ctx->optlen = 0; + return 1; } SEC("cgroup/setsockopt") @@ -80,7 +88,7 @@ int _setsockopt(struct bpf_sockopt *ctx) __u8 *optval = ctx->optval; if (ctx->level != SOL_CUSTOM) - return 1; /* only interested in SOL_CUSTOM */ + goto out; /* only interested in SOL_CUSTOM */ if (optval + 1 > optval_end) return 0; /* EPERM, bounds check */ @@ -93,4 +101,10 @@ int _setsockopt(struct bpf_sockopt *ctx) ctx->optlen = -1; return 1; + +out: + /* optval larger than PAGE_SIZE use kernel's buffer. */ + if (ctx->optlen > page_size) + ctx->optlen = 0; + return 1; } diff --git a/tools/testing/selftests/bpf/progs/sockopt_multi.c b/tools/testing/selftests/bpf/progs/sockopt_multi.c index 177a59069dae..96f29fce050b 100644 --- a/tools/testing/selftests/bpf/progs/sockopt_multi.c +++ b/tools/testing/selftests/bpf/progs/sockopt_multi.c @@ -5,6 +5,8 @@ char _license[] SEC("license") = "GPL"; +__u32 page_size = 0; + SEC("cgroup/getsockopt") int _getsockopt_child(struct bpf_sockopt *ctx) { @@ -12,7 +14,7 @@ int _getsockopt_child(struct bpf_sockopt *ctx) __u8 *optval = ctx->optval; if (ctx->level != SOL_IP || ctx->optname != IP_TOS) - return 1; + goto out; if (optval + 1 > optval_end) return 0; /* EPERM, bounds check */ @@ -26,6 +28,12 @@ int _getsockopt_child(struct bpf_sockopt *ctx) ctx->optlen = 1; return 1; + +out: + /* optval larger than PAGE_SIZE use kernel's buffer. */ + if (ctx->optlen > page_size) + ctx->optlen = 0; + return 1; } SEC("cgroup/getsockopt") @@ -35,7 +43,7 @@ int _getsockopt_parent(struct bpf_sockopt *ctx) __u8 *optval = ctx->optval; if (ctx->level != SOL_IP || ctx->optname != IP_TOS) - return 1; + goto out; if (optval + 1 > optval_end) return 0; /* EPERM, bounds check */ @@ -49,6 +57,12 @@ int _getsockopt_parent(struct bpf_sockopt *ctx) ctx->optlen = 1; return 1; + +out: + /* optval larger than PAGE_SIZE use kernel's buffer. */ + if (ctx->optlen > page_size) + ctx->optlen = 0; + return 1; } SEC("cgroup/setsockopt") @@ -58,7 +72,7 @@ int _setsockopt(struct bpf_sockopt *ctx) __u8 *optval = ctx->optval; if (ctx->level != SOL_IP || ctx->optname != IP_TOS) - return 1; + goto out; if (optval + 1 > optval_end) return 0; /* EPERM, bounds check */ @@ -67,4 +81,10 @@ int _setsockopt(struct bpf_sockopt *ctx) ctx->optlen = 1; return 1; + +out: + /* optval larger than PAGE_SIZE use kernel's buffer. */ + if (ctx->optlen > page_size) + ctx->optlen = 0; + return 1; } diff --git a/tools/testing/selftests/bpf/progs/sockopt_qos_to_cc.c b/tools/testing/selftests/bpf/progs/sockopt_qos_to_cc.c index 1bce83b6e3a7..dbe235ede7f3 100644 --- a/tools/testing/selftests/bpf/progs/sockopt_qos_to_cc.c +++ b/tools/testing/selftests/bpf/progs/sockopt_qos_to_cc.c @@ -9,6 +9,8 @@ char _license[] SEC("license") = "GPL"; +__u32 page_size = 0; + SEC("cgroup/setsockopt") int sockopt_qos_to_cc(struct bpf_sockopt *ctx) { @@ -19,7 +21,7 @@ int sockopt_qos_to_cc(struct bpf_sockopt *ctx) char cc_cubic[TCP_CA_NAME_MAX] = "cubic"; if (ctx->level != SOL_IPV6 || ctx->optname != IPV6_TCLASS) - return 1; + goto out; if (optval + 1 > optval_end) return 0; /* EPERM, bounds check */ @@ -36,4 +38,10 @@ int sockopt_qos_to_cc(struct bpf_sockopt *ctx) return 0; } return 1; + +out: + /* optval larger than PAGE_SIZE use kernel's buffer. */ + if (ctx->optlen > page_size) + ctx->optlen = 0; + return 1; } diff --git a/tools/testing/selftests/bpf/progs/sockopt_sk.c b/tools/testing/selftests/bpf/progs/sockopt_sk.c index fe1df4cd206e..cb990a7d3d45 100644 --- a/tools/testing/selftests/bpf/progs/sockopt_sk.c +++ b/tools/testing/selftests/bpf/progs/sockopt_sk.c @@ -37,7 +37,7 @@ int _getsockopt(struct bpf_sockopt *ctx) /* Bypass AF_NETLINK. */ sk = ctx->sk; if (sk && sk->family == AF_NETLINK) - return 1; + goto out; /* Make sure bpf_get_netns_cookie is callable. */ @@ -52,8 +52,7 @@ int _getsockopt(struct bpf_sockopt *ctx) * let next BPF program in the cgroup chain or kernel * handle it. */ - ctx->optlen = 0; /* bypass optval>PAGE_SIZE */ - return 1; + goto out; } if (ctx->level == SOL_SOCKET && ctx->optname == SO_SNDBUF) { @@ -61,7 +60,7 @@ int _getsockopt(struct bpf_sockopt *ctx) * let next BPF program in the cgroup chain or kernel * handle it. */ - return 1; + goto out; } if (ctx->level == SOL_TCP && ctx->optname == TCP_CONGESTION) { @@ -69,7 +68,7 @@ int _getsockopt(struct bpf_sockopt *ctx) * let next BPF program in the cgroup chain or kernel * handle it. */ - return 1; + goto out; } if (ctx->level == SOL_TCP && ctx->optname == TCP_ZEROCOPY_RECEIVE) { @@ -85,7 +84,7 @@ int _getsockopt(struct bpf_sockopt *ctx) if (((struct tcp_zerocopy_receive *)optval)->address != 0) return 0; /* unexpected data */ - return 1; + goto out; } if (ctx->level == SOL_IP && ctx->optname == IP_FREEBIND) { @@ -129,6 +128,12 @@ int _getsockopt(struct bpf_sockopt *ctx) ctx->optlen = 1; return 1; + +out: + /* optval larger than PAGE_SIZE use kernel's buffer. */ + if (ctx->optlen > page_size) + ctx->optlen = 0; + return 1; } SEC("cgroup/setsockopt") @@ -142,7 +147,7 @@ int _setsockopt(struct bpf_sockopt *ctx) /* Bypass AF_NETLINK. */ sk = ctx->sk; if (sk && sk->family == AF_NETLINK) - return 1; + goto out; /* Make sure bpf_get_netns_cookie is callable. */ @@ -224,4 +229,10 @@ int _setsockopt(struct bpf_sockopt *ctx) */ return 1; + +out: + /* optval larger than PAGE_SIZE use kernel's buffer. */ + if (ctx->optlen > page_size) + ctx->optlen = 0; + return 1; } diff --git a/tools/testing/selftests/bpf/progs/test_global_func1.c b/tools/testing/selftests/bpf/progs/test_global_func1.c index b85fc8c423ba..17a9f59bf5f3 100644 --- a/tools/testing/selftests/bpf/progs/test_global_func1.c +++ b/tools/testing/selftests/bpf/progs/test_global_func1.c @@ -10,6 +10,8 @@ static __attribute__ ((noinline)) int f0(int var, struct __sk_buff *skb) { + asm volatile (""); + return skb->len; } diff --git a/tools/testing/selftests/bpf/progs/test_task_under_cgroup.c b/tools/testing/selftests/bpf/progs/test_task_under_cgroup.c new file mode 100644 index 000000000000..56cdc0a553f0 --- /dev/null +++ b/tools/testing/selftests/bpf/progs/test_task_under_cgroup.c @@ -0,0 +1,51 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (c) 2023 Bytedance */ + +#include <vmlinux.h> +#include <bpf/bpf_tracing.h> +#include <bpf/bpf_helpers.h> + +#include "bpf_misc.h" + +struct cgroup *bpf_cgroup_from_id(u64 cgid) __ksym; +long bpf_task_under_cgroup(struct task_struct *task, struct cgroup *ancestor) __ksym; +void bpf_cgroup_release(struct cgroup *p) __ksym; +struct task_struct *bpf_task_acquire(struct task_struct *p) __ksym; +void bpf_task_release(struct task_struct *p) __ksym; + +const volatile int local_pid; +const volatile __u64 cgid; +int remote_pid; + +SEC("tp_btf/task_newtask") +int BPF_PROG(handle__task_newtask, struct task_struct *task, u64 clone_flags) +{ + struct cgroup *cgrp = NULL; + struct task_struct *acquired; + + if (local_pid != (bpf_get_current_pid_tgid() >> 32)) + return 0; + + acquired = bpf_task_acquire(task); + if (!acquired) + return 0; + + if (local_pid == acquired->tgid) + goto out; + + cgrp = bpf_cgroup_from_id(cgid); + if (!cgrp) + goto out; + + if (bpf_task_under_cgroup(acquired, cgrp)) + remote_pid = acquired->tgid; + +out: + if (cgrp) + bpf_cgroup_release(cgrp); + bpf_task_release(acquired); + + return 0; +} + +char _license[] SEC("license") = "GPL"; diff --git a/tools/testing/selftests/bpf/progs/verifier_subprog_precision.c b/tools/testing/selftests/bpf/progs/verifier_subprog_precision.c new file mode 100644 index 000000000000..db6b3143338b --- /dev/null +++ b/tools/testing/selftests/bpf/progs/verifier_subprog_precision.c @@ -0,0 +1,536 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (c) 2023 Meta Platforms, Inc. and affiliates. */ + +#include <errno.h> +#include <string.h> +#include <linux/bpf.h> +#include <bpf/bpf_helpers.h> +#include "bpf_misc.h" + +#define ARRAY_SIZE(x) (sizeof(x) / sizeof(x[0])) + +int vals[] SEC(".data.vals") = {1, 2, 3, 4}; + +__naked __noinline __used +static unsigned long identity_subprog() +{ + /* the simplest *static* 64-bit identity function */ + asm volatile ( + "r0 = r1;" + "exit;" + ); +} + +__noinline __used +unsigned long global_identity_subprog(__u64 x) +{ + /* the simplest *global* 64-bit identity function */ + return x; +} + +__naked __noinline __used +static unsigned long callback_subprog() +{ + /* the simplest callback function */ + asm volatile ( + "r0 = 0;" + "exit;" + ); +} + +SEC("?raw_tp") +__success __log_level(2) +__msg("7: (0f) r1 += r0") +__msg("mark_precise: frame0: regs=r0 stack= before 6: (bf) r1 = r7") +__msg("mark_precise: frame0: regs=r0 stack= before 5: (27) r0 *= 4") +__msg("mark_precise: frame0: regs=r0 stack= before 11: (95) exit") +__msg("mark_precise: frame1: regs=r0 stack= before 10: (bf) r0 = r1") +__msg("mark_precise: frame1: regs=r1 stack= before 4: (85) call pc+5") +__msg("mark_precise: frame0: regs=r1 stack= before 3: (bf) r1 = r6") +__msg("mark_precise: frame0: regs=r6 stack= before 2: (b7) r6 = 3") +__naked int subprog_result_precise(void) +{ + asm volatile ( + "r6 = 3;" + /* pass r6 through r1 into subprog to get it back as r0; + * this whole chain will have to be marked as precise later + */ + "r1 = r6;" + "call identity_subprog;" + /* now use subprog's returned value (which is a + * r6 -> r1 -> r0 chain), as index into vals array, forcing + * all of that to be known precisely + */ + "r0 *= 4;" + "r1 = %[vals];" + /* here r0->r1->r6 chain is forced to be precise and has to be + * propagated back to the beginning, including through the + * subprog call + */ + "r1 += r0;" + "r0 = *(u32 *)(r1 + 0);" + "exit;" + : + : __imm_ptr(vals) + : __clobber_common, "r6" + ); +} + +SEC("?raw_tp") +__success __log_level(2) +__msg("9: (0f) r1 += r0") +__msg("mark_precise: frame0: last_idx 9 first_idx 0") +__msg("mark_precise: frame0: regs=r0 stack= before 8: (bf) r1 = r7") +__msg("mark_precise: frame0: regs=r0 stack= before 7: (27) r0 *= 4") +__msg("mark_precise: frame0: regs=r0 stack= before 5: (a5) if r0 < 0x4 goto pc+1") +__msg("mark_precise: frame0: regs=r0 stack= before 4: (85) call pc+7") +__naked int global_subprog_result_precise(void) +{ + asm volatile ( + "r6 = 3;" + /* pass r6 through r1 into subprog to get it back as r0; + * given global_identity_subprog is global, precision won't + * propagate all the way back to r6 + */ + "r1 = r6;" + "call global_identity_subprog;" + /* now use subprog's returned value (which is unknown now, so + * we need to clamp it), as index into vals array, forcing r0 + * to be marked precise (with no effect on r6, though) + */ + "if r0 < %[vals_arr_sz] goto 1f;" + "r0 = %[vals_arr_sz] - 1;" + "1:" + "r0 *= 4;" + "r1 = %[vals];" + /* here r0 is forced to be precise and has to be + * propagated back to the global subprog call, but it + * shouldn't go all the way to mark r6 as precise + */ + "r1 += r0;" + "r0 = *(u32 *)(r1 + 0);" + "exit;" + : + : __imm_ptr(vals), + __imm_const(vals_arr_sz, ARRAY_SIZE(vals)) + : __clobber_common, "r6" + ); +} + +SEC("?raw_tp") +__success __log_level(2) +__msg("14: (0f) r1 += r6") +__msg("mark_precise: frame0: last_idx 14 first_idx 10") +__msg("mark_precise: frame0: regs=r6 stack= before 13: (bf) r1 = r7") +__msg("mark_precise: frame0: regs=r6 stack= before 12: (27) r6 *= 4") +__msg("mark_precise: frame0: regs=r6 stack= before 11: (25) if r6 > 0x3 goto pc+4") +__msg("mark_precise: frame0: regs=r6 stack= before 10: (bf) r6 = r0") +__msg("mark_precise: frame0: parent state regs=r0 stack=:") +__msg("mark_precise: frame0: last_idx 18 first_idx 0") +__msg("mark_precise: frame0: regs=r0 stack= before 18: (95) exit") +__naked int callback_result_precise(void) +{ + asm volatile ( + "r6 = 3;" + + /* call subprog and use result; r0 shouldn't propagate back to + * callback_subprog + */ + "r1 = r6;" /* nr_loops */ + "r2 = %[callback_subprog];" /* callback_fn */ + "r3 = 0;" /* callback_ctx */ + "r4 = 0;" /* flags */ + "call %[bpf_loop];" + + "r6 = r0;" + "if r6 > 3 goto 1f;" + "r6 *= 4;" + "r1 = %[vals];" + /* here r6 is forced to be precise and has to be propagated + * back to the bpf_loop() call, but not beyond + */ + "r1 += r6;" + "r0 = *(u32 *)(r1 + 0);" + "1:" + "exit;" + : + : __imm_ptr(vals), + __imm_ptr(callback_subprog), + __imm(bpf_loop) + : __clobber_common, "r6" + ); +} + +SEC("?raw_tp") +__success __log_level(2) +__msg("7: (0f) r1 += r6") +__msg("mark_precise: frame0: last_idx 7 first_idx 0") +__msg("mark_precise: frame0: regs=r6 stack= before 6: (bf) r1 = r7") +__msg("mark_precise: frame0: regs=r6 stack= before 5: (27) r6 *= 4") +__msg("mark_precise: frame0: regs=r6 stack= before 11: (95) exit") +__msg("mark_precise: frame1: regs= stack= before 10: (bf) r0 = r1") +__msg("mark_precise: frame1: regs= stack= before 4: (85) call pc+5") +__msg("mark_precise: frame0: regs=r6 stack= before 3: (b7) r1 = 0") +__msg("mark_precise: frame0: regs=r6 stack= before 2: (b7) r6 = 3") +__naked int parent_callee_saved_reg_precise(void) +{ + asm volatile ( + "r6 = 3;" + + /* call subprog and ignore result; we need this call only to + * complicate jump history + */ + "r1 = 0;" + "call identity_subprog;" + + "r6 *= 4;" + "r1 = %[vals];" + /* here r6 is forced to be precise and has to be propagated + * back to the beginning, handling (and ignoring) subprog call + */ + "r1 += r6;" + "r0 = *(u32 *)(r1 + 0);" + "exit;" + : + : __imm_ptr(vals) + : __clobber_common, "r6" + ); +} + +SEC("?raw_tp") +__success __log_level(2) +__msg("7: (0f) r1 += r6") +__msg("mark_precise: frame0: last_idx 7 first_idx 0") +__msg("mark_precise: frame0: regs=r6 stack= before 6: (bf) r1 = r7") +__msg("mark_precise: frame0: regs=r6 stack= before 5: (27) r6 *= 4") +__msg("mark_precise: frame0: regs=r6 stack= before 4: (85) call pc+5") +__msg("mark_precise: frame0: regs=r6 stack= before 3: (b7) r1 = 0") +__msg("mark_precise: frame0: regs=r6 stack= before 2: (b7) r6 = 3") +__naked int parent_callee_saved_reg_precise_global(void) +{ + asm volatile ( + "r6 = 3;" + + /* call subprog and ignore result; we need this call only to + * complicate jump history + */ + "r1 = 0;" + "call global_identity_subprog;" + + "r6 *= 4;" + "r1 = %[vals];" + /* here r6 is forced to be precise and has to be propagated + * back to the beginning, handling (and ignoring) subprog call + */ + "r1 += r6;" + "r0 = *(u32 *)(r1 + 0);" + "exit;" + : + : __imm_ptr(vals) + : __clobber_common, "r6" + ); +} + +SEC("?raw_tp") +__success __log_level(2) +__msg("12: (0f) r1 += r6") +__msg("mark_precise: frame0: last_idx 12 first_idx 10") +__msg("mark_precise: frame0: regs=r6 stack= before 11: (bf) r1 = r7") +__msg("mark_precise: frame0: regs=r6 stack= before 10: (27) r6 *= 4") +__msg("mark_precise: frame0: parent state regs=r6 stack=:") +__msg("mark_precise: frame0: last_idx 16 first_idx 0") +__msg("mark_precise: frame0: regs=r6 stack= before 16: (95) exit") +__msg("mark_precise: frame1: regs= stack= before 15: (b7) r0 = 0") +__msg("mark_precise: frame1: regs= stack= before 9: (85) call bpf_loop#181") +__msg("mark_precise: frame0: regs=r6 stack= before 8: (b7) r4 = 0") +__msg("mark_precise: frame0: regs=r6 stack= before 7: (b7) r3 = 0") +__msg("mark_precise: frame0: regs=r6 stack= before 6: (bf) r2 = r8") +__msg("mark_precise: frame0: regs=r6 stack= before 5: (b7) r1 = 1") +__msg("mark_precise: frame0: regs=r6 stack= before 4: (b7) r6 = 3") +__naked int parent_callee_saved_reg_precise_with_callback(void) +{ + asm volatile ( + "r6 = 3;" + + /* call subprog and ignore result; we need this call only to + * complicate jump history + */ + "r1 = 1;" /* nr_loops */ + "r2 = %[callback_subprog];" /* callback_fn */ + "r3 = 0;" /* callback_ctx */ + "r4 = 0;" /* flags */ + "call %[bpf_loop];" + + "r6 *= 4;" + "r1 = %[vals];" + /* here r6 is forced to be precise and has to be propagated + * back to the beginning, handling (and ignoring) callback call + */ + "r1 += r6;" + "r0 = *(u32 *)(r1 + 0);" + "exit;" + : + : __imm_ptr(vals), + __imm_ptr(callback_subprog), + __imm(bpf_loop) + : __clobber_common, "r6" + ); +} + +SEC("?raw_tp") +__success __log_level(2) +__msg("9: (0f) r1 += r6") +__msg("mark_precise: frame0: last_idx 9 first_idx 6") +__msg("mark_precise: frame0: regs=r6 stack= before 8: (bf) r1 = r7") +__msg("mark_precise: frame0: regs=r6 stack= before 7: (27) r6 *= 4") +__msg("mark_precise: frame0: regs=r6 stack= before 6: (79) r6 = *(u64 *)(r10 -8)") +__msg("mark_precise: frame0: parent state regs= stack=-8:") +__msg("mark_precise: frame0: last_idx 13 first_idx 0") +__msg("mark_precise: frame0: regs= stack=-8 before 13: (95) exit") +__msg("mark_precise: frame1: regs= stack= before 12: (bf) r0 = r1") +__msg("mark_precise: frame1: regs= stack= before 5: (85) call pc+6") +__msg("mark_precise: frame0: regs= stack=-8 before 4: (b7) r1 = 0") +__msg("mark_precise: frame0: regs= stack=-8 before 3: (7b) *(u64 *)(r10 -8) = r6") +__msg("mark_precise: frame0: regs=r6 stack= before 2: (b7) r6 = 3") +__naked int parent_stack_slot_precise(void) +{ + asm volatile ( + /* spill reg */ + "r6 = 3;" + "*(u64 *)(r10 - 8) = r6;" + + /* call subprog and ignore result; we need this call only to + * complicate jump history + */ + "r1 = 0;" + "call identity_subprog;" + + /* restore reg from stack; in this case we'll be carrying + * stack mask when going back into subprog through jump + * history + */ + "r6 = *(u64 *)(r10 - 8);" + + "r6 *= 4;" + "r1 = %[vals];" + /* here r6 is forced to be precise and has to be propagated + * back to the beginning, handling (and ignoring) subprog call + */ + "r1 += r6;" + "r0 = *(u32 *)(r1 + 0);" + "exit;" + : + : __imm_ptr(vals) + : __clobber_common, "r6" + ); +} + +SEC("?raw_tp") +__success __log_level(2) +__msg("9: (0f) r1 += r6") +__msg("mark_precise: frame0: last_idx 9 first_idx 6") +__msg("mark_precise: frame0: regs=r6 stack= before 8: (bf) r1 = r7") +__msg("mark_precise: frame0: regs=r6 stack= before 7: (27) r6 *= 4") +__msg("mark_precise: frame0: regs=r6 stack= before 6: (79) r6 = *(u64 *)(r10 -8)") +__msg("mark_precise: frame0: parent state regs= stack=-8:") +__msg("mark_precise: frame0: last_idx 5 first_idx 0") +__msg("mark_precise: frame0: regs= stack=-8 before 5: (85) call pc+6") +__msg("mark_precise: frame0: regs= stack=-8 before 4: (b7) r1 = 0") +__msg("mark_precise: frame0: regs= stack=-8 before 3: (7b) *(u64 *)(r10 -8) = r6") +__msg("mark_precise: frame0: regs=r6 stack= before 2: (b7) r6 = 3") +__naked int parent_stack_slot_precise_global(void) +{ + asm volatile ( + /* spill reg */ + "r6 = 3;" + "*(u64 *)(r10 - 8) = r6;" + + /* call subprog and ignore result; we need this call only to + * complicate jump history + */ + "r1 = 0;" + "call global_identity_subprog;" + + /* restore reg from stack; in this case we'll be carrying + * stack mask when going back into subprog through jump + * history + */ + "r6 = *(u64 *)(r10 - 8);" + + "r6 *= 4;" + "r1 = %[vals];" + /* here r6 is forced to be precise and has to be propagated + * back to the beginning, handling (and ignoring) subprog call + */ + "r1 += r6;" + "r0 = *(u32 *)(r1 + 0);" + "exit;" + : + : __imm_ptr(vals) + : __clobber_common, "r6" + ); +} + +SEC("?raw_tp") +__success __log_level(2) +__msg("14: (0f) r1 += r6") +__msg("mark_precise: frame0: last_idx 14 first_idx 11") +__msg("mark_precise: frame0: regs=r6 stack= before 13: (bf) r1 = r7") +__msg("mark_precise: frame0: regs=r6 stack= before 12: (27) r6 *= 4") +__msg("mark_precise: frame0: regs=r6 stack= before 11: (79) r6 = *(u64 *)(r10 -8)") +__msg("mark_precise: frame0: parent state regs= stack=-8:") +__msg("mark_precise: frame0: last_idx 18 first_idx 0") +__msg("mark_precise: frame0: regs= stack=-8 before 18: (95) exit") +__msg("mark_precise: frame1: regs= stack= before 17: (b7) r0 = 0") +__msg("mark_precise: frame1: regs= stack= before 10: (85) call bpf_loop#181") +__msg("mark_precise: frame0: regs= stack=-8 before 9: (b7) r4 = 0") +__msg("mark_precise: frame0: regs= stack=-8 before 8: (b7) r3 = 0") +__msg("mark_precise: frame0: regs= stack=-8 before 7: (bf) r2 = r8") +__msg("mark_precise: frame0: regs= stack=-8 before 6: (bf) r1 = r6") +__msg("mark_precise: frame0: regs= stack=-8 before 5: (7b) *(u64 *)(r10 -8) = r6") +__msg("mark_precise: frame0: regs=r6 stack= before 4: (b7) r6 = 3") +__naked int parent_stack_slot_precise_with_callback(void) +{ + asm volatile ( + /* spill reg */ + "r6 = 3;" + "*(u64 *)(r10 - 8) = r6;" + + /* ensure we have callback frame in jump history */ + "r1 = r6;" /* nr_loops */ + "r2 = %[callback_subprog];" /* callback_fn */ + "r3 = 0;" /* callback_ctx */ + "r4 = 0;" /* flags */ + "call %[bpf_loop];" + + /* restore reg from stack; in this case we'll be carrying + * stack mask when going back into subprog through jump + * history + */ + "r6 = *(u64 *)(r10 - 8);" + + "r6 *= 4;" + "r1 = %[vals];" + /* here r6 is forced to be precise and has to be propagated + * back to the beginning, handling (and ignoring) subprog call + */ + "r1 += r6;" + "r0 = *(u32 *)(r1 + 0);" + "exit;" + : + : __imm_ptr(vals), + __imm_ptr(callback_subprog), + __imm(bpf_loop) + : __clobber_common, "r6" + ); +} + +__noinline __used +static __u64 subprog_with_precise_arg(__u64 x) +{ + return vals[x]; /* x is forced to be precise */ +} + +SEC("?raw_tp") +__success __log_level(2) +__msg("8: (0f) r2 += r1") +__msg("mark_precise: frame1: last_idx 8 first_idx 0") +__msg("mark_precise: frame1: regs=r1 stack= before 6: (18) r2 = ") +__msg("mark_precise: frame1: regs=r1 stack= before 5: (67) r1 <<= 2") +__msg("mark_precise: frame1: regs=r1 stack= before 2: (85) call pc+2") +__msg("mark_precise: frame0: regs=r1 stack= before 1: (bf) r1 = r6") +__msg("mark_precise: frame0: regs=r6 stack= before 0: (b7) r6 = 3") +__naked int subprog_arg_precise(void) +{ + asm volatile ( + "r6 = 3;" + "r1 = r6;" + /* subprog_with_precise_arg expects its argument to be + * precise, so r1->r6 will be marked precise from inside the + * subprog + */ + "call subprog_with_precise_arg;" + "r0 += r6;" + "exit;" + : + : + : __clobber_common, "r6" + ); +} + +/* r1 is pointer to stack slot; + * r2 is a register to spill into that slot + * subprog also spills r2 into its own stack slot + */ +__naked __noinline __used +static __u64 subprog_spill_reg_precise(void) +{ + asm volatile ( + /* spill to parent stack */ + "*(u64 *)(r1 + 0) = r2;" + /* spill to subprog stack (we use -16 offset to avoid + * accidental confusion with parent's -8 stack slot in + * verifier log output) + */ + "*(u64 *)(r10 - 16) = r2;" + /* use both spills as return result to propagete precision everywhere */ + "r0 = *(u64 *)(r10 - 16);" + "r2 = *(u64 *)(r1 + 0);" + "r0 += r2;" + "exit;" + ); +} + +SEC("?raw_tp") +__success __log_level(2) +/* precision backtracking can't currently handle stack access not through r10, + * so we won't be able to mark stack slot fp-8 as precise, and so will + * fallback to forcing all as precise + */ +__msg("mark_precise: frame0: falling back to forcing all scalars precise") +__naked int subprog_spill_into_parent_stack_slot_precise(void) +{ + asm volatile ( + "r6 = 1;" + + /* pass pointer to stack slot and r6 to subprog; + * r6 will be marked precise and spilled into fp-8 slot, which + * also should be marked precise + */ + "r1 = r10;" + "r1 += -8;" + "r2 = r6;" + "call subprog_spill_reg_precise;" + + /* restore reg from stack; in this case we'll be carrying + * stack mask when going back into subprog through jump + * history + */ + "r7 = *(u64 *)(r10 - 8);" + + "r7 *= 4;" + "r1 = %[vals];" + /* here r7 is forced to be precise and has to be propagated + * back to the beginning, handling subprog call and logic + */ + "r1 += r7;" + "r0 = *(u32 *)(r1 + 0);" + "exit;" + : + : __imm_ptr(vals) + : __clobber_common, "r6", "r7" + ); +} + +__naked __noinline __used +static __u64 subprog_with_checkpoint(void) +{ + asm volatile ( + "r0 = 0;" + /* guaranteed checkpoint if BPF_F_TEST_STATE_FREQ is used */ + "goto +0;" + "exit;" + ); +} + +char _license[] SEC("license") = "GPL"; diff --git a/tools/testing/selftests/bpf/progs/xdp_hw_metadata.c b/tools/testing/selftests/bpf/progs/xdp_hw_metadata.c index e1c787815e44..b2dfd7066c6e 100644 --- a/tools/testing/selftests/bpf/progs/xdp_hw_metadata.c +++ b/tools/testing/selftests/bpf/progs/xdp_hw_metadata.c @@ -77,7 +77,9 @@ int rx(struct xdp_md *ctx) } err = bpf_xdp_metadata_rx_timestamp(ctx, &meta->rx_timestamp); - if (err) + if (!err) + meta->xdp_timestamp = bpf_ktime_get_tai_ns(); + else meta->rx_timestamp = 0; /* Used by AF_XDP as not avail signal */ err = bpf_xdp_metadata_rx_hash(ctx, &meta->rx_hash, &meta->rx_hash_type); diff --git a/tools/testing/selftests/bpf/test_progs.c b/tools/testing/selftests/bpf/test_progs.c index ea82921110da..793689dcc170 100644 --- a/tools/testing/selftests/bpf/test_progs.c +++ b/tools/testing/selftests/bpf/test_progs.c @@ -714,7 +714,13 @@ static struct test_state test_states[ARRAY_SIZE(prog_test_defs)]; const char *argp_program_version = "test_progs 0.1"; const char *argp_program_bug_address = "<bpf@vger.kernel.org>"; -static const char argp_program_doc[] = "BPF selftests test runner"; +static const char argp_program_doc[] = +"BPF selftests test runner\v" +"Options accepting the NAMES parameter take either a comma-separated list\n" +"of test names, or a filename prefixed with @. The file contains one name\n" +"(or wildcard pattern) per line, and comments beginning with # are ignored.\n" +"\n" +"These options can be passed repeatedly to read multiple files.\n"; enum ARG_KEYS { ARG_TEST_NUM = 'n', @@ -797,6 +803,7 @@ extern int extra_prog_load_log_flags; static error_t parse_arg(int key, char *arg, struct argp_state *state) { struct test_env *env = state->input; + int err = 0; switch (key) { case ARG_TEST_NUM: { @@ -821,18 +828,28 @@ static error_t parse_arg(int key, char *arg, struct argp_state *state) } case ARG_TEST_NAME_GLOB_ALLOWLIST: case ARG_TEST_NAME: { - if (parse_test_list(arg, - &env->test_selector.whitelist, - key == ARG_TEST_NAME_GLOB_ALLOWLIST)) - return -ENOMEM; + if (arg[0] == '@') + err = parse_test_list_file(arg + 1, + &env->test_selector.whitelist, + key == ARG_TEST_NAME_GLOB_ALLOWLIST); + else + err = parse_test_list(arg, + &env->test_selector.whitelist, + key == ARG_TEST_NAME_GLOB_ALLOWLIST); + break; } case ARG_TEST_NAME_GLOB_DENYLIST: case ARG_TEST_NAME_BLACKLIST: { - if (parse_test_list(arg, - &env->test_selector.blacklist, - key == ARG_TEST_NAME_GLOB_DENYLIST)) - return -ENOMEM; + if (arg[0] == '@') + err = parse_test_list_file(arg + 1, + &env->test_selector.blacklist, + key == ARG_TEST_NAME_GLOB_DENYLIST); + else + err = parse_test_list(arg, + &env->test_selector.blacklist, + key == ARG_TEST_NAME_GLOB_DENYLIST); + break; } case ARG_VERIFIER_STATS: @@ -900,7 +917,7 @@ static error_t parse_arg(int key, char *arg, struct argp_state *state) default: return ARGP_ERR_UNKNOWN; } - return 0; + return err; } /* diff --git a/tools/testing/selftests/bpf/testing_helpers.c b/tools/testing/selftests/bpf/testing_helpers.c index 0b5e0829e5be..dc9595ade8de 100644 --- a/tools/testing/selftests/bpf/testing_helpers.c +++ b/tools/testing/selftests/bpf/testing_helpers.c @@ -1,6 +1,7 @@ // SPDX-License-Identifier: (LGPL-2.1 OR BSD-2-Clause) /* Copyright (C) 2019 Netronome Systems, Inc. */ /* Copyright (C) 2020 Facebook, Inc. */ +#include <ctype.h> #include <stdlib.h> #include <string.h> #include <errno.h> @@ -70,92 +71,168 @@ int parse_num_list(const char *s, bool **num_set, int *num_set_len) return 0; } -int parse_test_list(const char *s, - struct test_filter_set *set, - bool is_glob_pattern) +static int do_insert_test(struct test_filter_set *set, + char *test_str, + char *subtest_str) { - char *input, *state = NULL, *next; - struct test_filter *tmp, *tests = NULL; - int i, j, cnt = 0; + struct test_filter *tmp, *test; + char **ctmp; + int i; - input = strdup(s); - if (!input) + for (i = 0; i < set->cnt; i++) { + test = &set->tests[i]; + + if (strcmp(test_str, test->name) == 0) { + free(test_str); + goto subtest; + } + } + + tmp = realloc(set->tests, sizeof(*test) * (set->cnt + 1)); + if (!tmp) return -ENOMEM; - while ((next = strtok_r(state ? NULL : input, ",", &state))) { - char *subtest_str = strchr(next, '/'); - char *pattern = NULL; - int glob_chars = 0; + set->tests = tmp; + test = &set->tests[set->cnt]; - tmp = realloc(tests, sizeof(*tests) * (cnt + 1)); - if (!tmp) - goto err; - tests = tmp; + test->name = test_str; + test->subtests = NULL; + test->subtest_cnt = 0; - tests[cnt].subtest_cnt = 0; - tests[cnt].subtests = NULL; + set->cnt++; - if (is_glob_pattern) { - pattern = "%s"; - } else { - pattern = "*%s*"; - glob_chars = 2; - } +subtest: + if (!subtest_str) + return 0; - if (subtest_str) { - char **tmp_subtests = NULL; - int subtest_cnt = tests[cnt].subtest_cnt; - - *subtest_str = '\0'; - subtest_str += 1; - tmp_subtests = realloc(tests[cnt].subtests, - sizeof(*tmp_subtests) * - (subtest_cnt + 1)); - if (!tmp_subtests) - goto err; - tests[cnt].subtests = tmp_subtests; - - tests[cnt].subtests[subtest_cnt] = - malloc(strlen(subtest_str) + glob_chars + 1); - if (!tests[cnt].subtests[subtest_cnt]) - goto err; - sprintf(tests[cnt].subtests[subtest_cnt], - pattern, - subtest_str); - - tests[cnt].subtest_cnt++; + for (i = 0; i < test->subtest_cnt; i++) { + if (strcmp(subtest_str, test->subtests[i]) == 0) { + free(subtest_str); + return 0; } + } - tests[cnt].name = malloc(strlen(next) + glob_chars + 1); - if (!tests[cnt].name) - goto err; - sprintf(tests[cnt].name, pattern, next); + ctmp = realloc(test->subtests, + sizeof(*test->subtests) * (test->subtest_cnt + 1)); + if (!ctmp) + return -ENOMEM; - cnt++; + test->subtests = ctmp; + test->subtests[test->subtest_cnt] = subtest_str; + + test->subtest_cnt++; + + return 0; +} + +static int insert_test(struct test_filter_set *set, + char *test_spec, + bool is_glob_pattern) +{ + char *pattern, *subtest_str, *ext_test_str, *ext_subtest_str = NULL; + int glob_chars = 0; + + if (is_glob_pattern) { + pattern = "%s"; + } else { + pattern = "*%s*"; + glob_chars = 2; } - tmp = realloc(set->tests, sizeof(*tests) * (cnt + set->cnt)); - if (!tmp) + subtest_str = strchr(test_spec, '/'); + if (subtest_str) { + *subtest_str = '\0'; + subtest_str += 1; + } + + ext_test_str = malloc(strlen(test_spec) + glob_chars + 1); + if (!ext_test_str) goto err; - memcpy(tmp + set->cnt, tests, sizeof(*tests) * cnt); - set->tests = tmp; - set->cnt += cnt; + sprintf(ext_test_str, pattern, test_spec); - free(tests); - free(input); - return 0; + if (subtest_str) { + ext_subtest_str = malloc(strlen(subtest_str) + glob_chars + 1); + if (!ext_subtest_str) + goto err; + + sprintf(ext_subtest_str, pattern, subtest_str); + } + + return do_insert_test(set, ext_test_str, ext_subtest_str); err: - for (i = 0; i < cnt; i++) { - for (j = 0; j < tests[i].subtest_cnt; j++) - free(tests[i].subtests[j]); + free(ext_test_str); + free(ext_subtest_str); - free(tests[i].name); + return -ENOMEM; +} + +int parse_test_list_file(const char *path, + struct test_filter_set *set, + bool is_glob_pattern) +{ + char *buf = NULL, *capture_start, *capture_end, *scan_end; + size_t buflen = 0; + int err = 0; + FILE *f; + + f = fopen(path, "r"); + if (!f) { + err = -errno; + fprintf(stderr, "Failed to open '%s': %d\n", path, err); + return err; + } + + while (getline(&buf, &buflen, f) != -1) { + capture_start = buf; + + while (isspace(*capture_start)) + ++capture_start; + + capture_end = capture_start; + scan_end = capture_start; + + while (*scan_end && *scan_end != '#') { + if (!isspace(*scan_end)) + capture_end = scan_end; + + ++scan_end; + } + + if (capture_end == capture_start) + continue; + + *(++capture_end) = '\0'; + + err = insert_test(set, capture_start, is_glob_pattern); + if (err) + break; + } + + fclose(f); + return err; +} + +int parse_test_list(const char *s, + struct test_filter_set *set, + bool is_glob_pattern) +{ + char *input, *state = NULL, *test_spec; + int err = 0; + + input = strdup(s); + if (!input) + return -ENOMEM; + + while ((test_spec = strtok_r(state ? NULL : input, ",", &state))) { + err = insert_test(set, test_spec, is_glob_pattern); + if (err) + break; } - free(tests); + free(input); - return -ENOMEM; + return err; } __u32 link_info_prog_id(const struct bpf_link *link, struct bpf_link_info *info) diff --git a/tools/testing/selftests/bpf/testing_helpers.h b/tools/testing/selftests/bpf/testing_helpers.h index eb8790f928e4..98f09bbae86f 100644 --- a/tools/testing/selftests/bpf/testing_helpers.h +++ b/tools/testing/selftests/bpf/testing_helpers.h @@ -20,5 +20,8 @@ struct test_filter_set; int parse_test_list(const char *s, struct test_filter_set *test_set, bool is_glob_pattern); +int parse_test_list_file(const char *path, + struct test_filter_set *test_set, + bool is_glob_pattern); __u64 read_perf_max_sample_freq(void); diff --git a/tools/testing/selftests/bpf/verifier/precise.c b/tools/testing/selftests/bpf/verifier/precise.c index 6c03a7d805f9..b8c0aae8e7ec 100644 --- a/tools/testing/selftests/bpf/verifier/precise.c +++ b/tools/testing/selftests/bpf/verifier/precise.c @@ -38,25 +38,24 @@ .fixup_map_array_48b = { 1 }, .result = VERBOSE_ACCEPT, .errstr = - "26: (85) call bpf_probe_read_kernel#113\ - last_idx 26 first_idx 20\ - regs=4 stack=0 before 25\ - regs=4 stack=0 before 24\ - regs=4 stack=0 before 23\ - regs=4 stack=0 before 22\ - regs=4 stack=0 before 20\ - parent didn't have regs=4 stack=0 marks\ - last_idx 19 first_idx 10\ - regs=4 stack=0 before 19\ - regs=200 stack=0 before 18\ - regs=300 stack=0 before 17\ - regs=201 stack=0 before 15\ - regs=201 stack=0 before 14\ - regs=200 stack=0 before 13\ - regs=200 stack=0 before 12\ - regs=200 stack=0 before 11\ - regs=200 stack=0 before 10\ - parent already had regs=0 stack=0 marks", + "mark_precise: frame0: last_idx 26 first_idx 20\ + mark_precise: frame0: regs=r2 stack= before 25\ + mark_precise: frame0: regs=r2 stack= before 24\ + mark_precise: frame0: regs=r2 stack= before 23\ + mark_precise: frame0: regs=r2 stack= before 22\ + mark_precise: frame0: regs=r2 stack= before 20\ + mark_precise: frame0: parent state regs=r2 stack=:\ + mark_precise: frame0: last_idx 19 first_idx 10\ + mark_precise: frame0: regs=r2 stack= before 19\ + mark_precise: frame0: regs=r9 stack= before 18\ + mark_precise: frame0: regs=r8,r9 stack= before 17\ + mark_precise: frame0: regs=r0,r9 stack= before 15\ + mark_precise: frame0: regs=r0,r9 stack= before 14\ + mark_precise: frame0: regs=r9 stack= before 13\ + mark_precise: frame0: regs=r9 stack= before 12\ + mark_precise: frame0: regs=r9 stack= before 11\ + mark_precise: frame0: regs=r9 stack= before 10\ + mark_precise: frame0: parent state regs= stack=:", }, { "precise: test 2", @@ -100,20 +99,20 @@ .flags = BPF_F_TEST_STATE_FREQ, .errstr = "26: (85) call bpf_probe_read_kernel#113\ - last_idx 26 first_idx 22\ - regs=4 stack=0 before 25\ - regs=4 stack=0 before 24\ - regs=4 stack=0 before 23\ - regs=4 stack=0 before 22\ - parent didn't have regs=4 stack=0 marks\ - last_idx 20 first_idx 20\ - regs=4 stack=0 before 20\ - parent didn't have regs=4 stack=0 marks\ - last_idx 19 first_idx 17\ - regs=4 stack=0 before 19\ - regs=200 stack=0 before 18\ - regs=300 stack=0 before 17\ - parent already had regs=0 stack=0 marks", + mark_precise: frame0: last_idx 26 first_idx 22\ + mark_precise: frame0: regs=r2 stack= before 25\ + mark_precise: frame0: regs=r2 stack= before 24\ + mark_precise: frame0: regs=r2 stack= before 23\ + mark_precise: frame0: regs=r2 stack= before 22\ + mark_precise: frame0: parent state regs=r2 stack=:\ + mark_precise: frame0: last_idx 20 first_idx 20\ + mark_precise: frame0: regs=r2 stack= before 20\ + mark_precise: frame0: parent state regs=r2 stack=:\ + mark_precise: frame0: last_idx 19 first_idx 17\ + mark_precise: frame0: regs=r2 stack= before 19\ + mark_precise: frame0: regs=r9 stack= before 18\ + mark_precise: frame0: regs=r8,r9 stack= before 17\ + mark_precise: frame0: parent state regs= stack=:", }, { "precise: cross frame pruning", @@ -153,15 +152,16 @@ }, .prog_type = BPF_PROG_TYPE_XDP, .flags = BPF_F_TEST_STATE_FREQ, - .errstr = "5: (2d) if r4 > r0 goto pc+0\ - last_idx 5 first_idx 5\ - parent didn't have regs=10 stack=0 marks\ - last_idx 4 first_idx 2\ - regs=10 stack=0 before 4\ - regs=10 stack=0 before 3\ - regs=0 stack=1 before 2\ - last_idx 5 first_idx 5\ - parent didn't have regs=1 stack=0 marks", + .errstr = "mark_precise: frame0: last_idx 5 first_idx 5\ + mark_precise: frame0: parent state regs=r4 stack=:\ + mark_precise: frame0: last_idx 4 first_idx 2\ + mark_precise: frame0: regs=r4 stack= before 4\ + mark_precise: frame0: regs=r4 stack= before 3\ + mark_precise: frame0: regs= stack=-8 before 2\ + mark_precise: frame0: falling back to forcing all scalars precise\ + force_precise: frame0: forcing r0 to be precise\ + mark_precise: frame0: last_idx 5 first_idx 5\ + mark_precise: frame0: parent state regs= stack=:", .result = VERBOSE_ACCEPT, .retval = -1, }, @@ -179,16 +179,19 @@ }, .prog_type = BPF_PROG_TYPE_XDP, .flags = BPF_F_TEST_STATE_FREQ, - .errstr = "last_idx 6 first_idx 6\ - parent didn't have regs=10 stack=0 marks\ - last_idx 5 first_idx 3\ - regs=10 stack=0 before 5\ - regs=10 stack=0 before 4\ - regs=0 stack=1 before 3\ - last_idx 6 first_idx 6\ - parent didn't have regs=1 stack=0 marks\ - last_idx 5 first_idx 3\ - regs=1 stack=0 before 5", + .errstr = "mark_precise: frame0: last_idx 6 first_idx 6\ + mark_precise: frame0: parent state regs=r4 stack=:\ + mark_precise: frame0: last_idx 5 first_idx 3\ + mark_precise: frame0: regs=r4 stack= before 5\ + mark_precise: frame0: regs=r4 stack= before 4\ + mark_precise: frame0: regs= stack=-8 before 3\ + mark_precise: frame0: falling back to forcing all scalars precise\ + force_precise: frame0: forcing r0 to be precise\ + force_precise: frame0: forcing r0 to be precise\ + force_precise: frame0: forcing r0 to be precise\ + force_precise: frame0: forcing r0 to be precise\ + mark_precise: frame0: last_idx 6 first_idx 6\ + mark_precise: frame0: parent state regs= stack=:", .result = VERBOSE_ACCEPT, .retval = -1, }, @@ -217,3 +220,39 @@ .errstr = "invalid access to memory, mem_size=1 off=42 size=8", .result = REJECT, }, +{ + "precise: program doesn't prematurely prune branches", + .insns = { + BPF_ALU64_IMM(BPF_MOV, BPF_REG_6, 0x400), + BPF_ALU64_IMM(BPF_MOV, BPF_REG_7, 0), + BPF_ALU64_IMM(BPF_MOV, BPF_REG_8, 0), + BPF_ALU64_IMM(BPF_MOV, BPF_REG_9, 0x80000000), + BPF_ALU64_IMM(BPF_MOD, BPF_REG_6, 0x401), + BPF_JMP_IMM(BPF_JA, 0, 0, 0), + BPF_JMP_REG(BPF_JLE, BPF_REG_6, BPF_REG_9, 2), + BPF_ALU64_IMM(BPF_MOD, BPF_REG_6, 1), + BPF_ALU64_IMM(BPF_MOV, BPF_REG_9, 0), + BPF_JMP_REG(BPF_JLE, BPF_REG_6, BPF_REG_9, 1), + BPF_ALU64_IMM(BPF_MOV, BPF_REG_6, 0), + BPF_ALU64_IMM(BPF_MOV, BPF_REG_0, 0), + BPF_STX_MEM(BPF_W, BPF_REG_10, BPF_REG_0, -4), + BPF_LD_MAP_FD(BPF_REG_4, 0), + BPF_ALU64_REG(BPF_MOV, BPF_REG_1, BPF_REG_4), + BPF_ALU64_REG(BPF_MOV, BPF_REG_2, BPF_REG_10), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -4), + BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem), + BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1), + BPF_EXIT_INSN(), + BPF_ALU64_IMM(BPF_RSH, BPF_REG_6, 10), + BPF_ALU64_IMM(BPF_MUL, BPF_REG_6, 8192), + BPF_ALU64_REG(BPF_MOV, BPF_REG_1, BPF_REG_0), + BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_6), + BPF_LDX_MEM(BPF_DW, BPF_REG_3, BPF_REG_0, 0), + BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_3, 0), + BPF_EXIT_INSN(), + }, + .fixup_map_array_48b = { 13 }, + .prog_type = BPF_PROG_TYPE_XDP, + .result = REJECT, + .errstr = "register with unbounded min value is not allowed", +}, diff --git a/tools/testing/selftests/bpf/veristat.c b/tools/testing/selftests/bpf/veristat.c index 1db7185181da..655095810d4a 100644 --- a/tools/testing/selftests/bpf/veristat.c +++ b/tools/testing/selftests/bpf/veristat.c @@ -141,6 +141,7 @@ static struct env { bool verbose; bool debug; bool quiet; + bool force_checkpoints; enum resfmt out_fmt; bool show_version; bool comparison_mode; @@ -209,6 +210,8 @@ static const struct argp_option opts[] = { { "log-level", 'l', "LEVEL", 0, "Verifier log level (default 0 for normal mode, 1 for verbose mode)" }, { "log-fixed", OPT_LOG_FIXED, NULL, 0, "Disable verifier log rotation" }, { "log-size", OPT_LOG_SIZE, "BYTES", 0, "Customize verifier log size (default to 16MB)" }, + { "test-states", 't', NULL, 0, + "Force frequent BPF verifier state checkpointing (set BPF_F_TEST_STATE_FREQ program flag)" }, { "quiet", 'q', NULL, 0, "Quiet mode" }, { "emit", 'e', "SPEC", 0, "Specify stats to be emitted" }, { "sort", 's', "SPEC", 0, "Specify sort order" }, @@ -284,6 +287,9 @@ static error_t parse_arg(int key, char *arg, struct argp_state *state) argp_usage(state); } break; + case 't': + env.force_checkpoints = true; + break; case 'C': env.comparison_mode = true; break; @@ -989,6 +995,9 @@ static int process_prog(const char *filename, struct bpf_object *obj, struct bpf /* increase chances of successful BPF object loading */ fixup_obj(obj, prog, base_filename); + if (env.force_checkpoints) + bpf_program__set_flags(prog, bpf_program__flags(prog) | BPF_F_TEST_STATE_FREQ); + err = bpf_object__load(obj); env.progs_processed++; diff --git a/tools/testing/selftests/bpf/xdp_hw_metadata.c b/tools/testing/selftests/bpf/xdp_hw_metadata.c index 987cf0db5ebc..613321eb84c1 100644 --- a/tools/testing/selftests/bpf/xdp_hw_metadata.c +++ b/tools/testing/selftests/bpf/xdp_hw_metadata.c @@ -27,6 +27,7 @@ #include <sys/mman.h> #include <net/if.h> #include <poll.h> +#include <time.h> #include "xdp_metadata.h" @@ -134,18 +135,52 @@ static void refill_rx(struct xsk *xsk, __u64 addr) } } -static void verify_xdp_metadata(void *data) +#define NANOSEC_PER_SEC 1000000000 /* 10^9 */ +static __u64 gettime(clockid_t clock_id) +{ + struct timespec t; + int res; + + /* See man clock_gettime(2) for type of clock_id's */ + res = clock_gettime(clock_id, &t); + + if (res < 0) + error(res, errno, "Error with clock_gettime()"); + + return (__u64) t.tv_sec * NANOSEC_PER_SEC + t.tv_nsec; +} + +static void verify_xdp_metadata(void *data, clockid_t clock_id) { struct xdp_meta *meta; meta = data - sizeof(*meta); - printf("rx_timestamp: %llu\n", meta->rx_timestamp); if (meta->rx_hash_err < 0) printf("No rx_hash err=%d\n", meta->rx_hash_err); else printf("rx_hash: 0x%X with RSS type:0x%X\n", meta->rx_hash, meta->rx_hash_type); + + printf("rx_timestamp: %llu (sec:%0.4f)\n", meta->rx_timestamp, + (double)meta->rx_timestamp / NANOSEC_PER_SEC); + if (meta->rx_timestamp) { + __u64 usr_clock = gettime(clock_id); + __u64 xdp_clock = meta->xdp_timestamp; + __s64 delta_X = xdp_clock - meta->rx_timestamp; + __s64 delta_X2U = usr_clock - xdp_clock; + + printf("XDP RX-time: %llu (sec:%0.4f) delta sec:%0.4f (%0.3f usec)\n", + xdp_clock, (double)xdp_clock / NANOSEC_PER_SEC, + (double)delta_X / NANOSEC_PER_SEC, + (double)delta_X / 1000); + + printf("AF_XDP time: %llu (sec:%0.4f) delta sec:%0.4f (%0.3f usec)\n", + usr_clock, (double)usr_clock / NANOSEC_PER_SEC, + (double)delta_X2U / NANOSEC_PER_SEC, + (double)delta_X2U / 1000); + } + } static void verify_skb_metadata(int fd) @@ -193,7 +228,7 @@ static void verify_skb_metadata(int fd) printf("skb hwtstamp is not found!\n"); } -static int verify_metadata(struct xsk *rx_xsk, int rxq, int server_fd) +static int verify_metadata(struct xsk *rx_xsk, int rxq, int server_fd, clockid_t clock_id) { const struct xdp_desc *rx_desc; struct pollfd fds[rxq + 1]; @@ -243,7 +278,8 @@ static int verify_metadata(struct xsk *rx_xsk, int rxq, int server_fd) addr = xsk_umem__add_offset_to_addr(rx_desc->addr); printf("%p: rx_desc[%u]->addr=%llx addr=%llx comp_addr=%llx\n", xsk, idx, rx_desc->addr, addr, comp_addr); - verify_xdp_metadata(xsk_umem__get_data(xsk->umem_area, addr)); + verify_xdp_metadata(xsk_umem__get_data(xsk->umem_area, addr), + clock_id); xsk_ring_cons__release(&xsk->rx, 1); refill_rx(xsk, comp_addr); } @@ -370,6 +406,7 @@ static void timestamping_enable(int fd, int val) int main(int argc, char *argv[]) { + clockid_t clock_id = CLOCK_TAI; int server_fd = -1; int ret; int i; @@ -443,7 +480,7 @@ int main(int argc, char *argv[]) error(1, -ret, "bpf_xdp_attach"); signal(SIGINT, handle_signal); - ret = verify_metadata(rx_xsk, rxq, server_fd); + ret = verify_metadata(rx_xsk, rxq, server_fd, clock_id); close(server_fd); cleanup(); if (ret) diff --git a/tools/testing/selftests/bpf/xdp_metadata.h b/tools/testing/selftests/bpf/xdp_metadata.h index 0c4624dc6f2f..938a729bd307 100644 --- a/tools/testing/selftests/bpf/xdp_metadata.h +++ b/tools/testing/selftests/bpf/xdp_metadata.h @@ -11,6 +11,7 @@ struct xdp_meta { __u64 rx_timestamp; + __u64 xdp_timestamp; __u32 rx_hash; union { __u32 rx_hash_type; |