summaryrefslogtreecommitdiff
path: root/tools
diff options
context:
space:
mode:
Diffstat (limited to 'tools')
-rw-r--r--tools/bpf/bpftool/common.c8
-rw-r--r--tools/bpf/bpftool/feature.c2
-rw-r--r--tools/bpf/bpftool/main.c6
-rw-r--r--tools/bpf/bpftool/main.h2
-rw-r--r--tools/bpf/bpftool/map.c2
-rw-r--r--tools/bpf/bpftool/perf.c112
-rw-r--r--tools/bpf/bpftool/pids.c1
-rw-r--r--tools/bpf/bpftool/prog.c3
-rw-r--r--tools/bpf/bpftool/struct_ops.c2
-rw-r--r--tools/bpf/bpftool/tracelog.c2
-rw-r--r--tools/bpf/runqslower/runqslower.c18
-rw-r--r--tools/include/uapi/asm/bpf_perf_event.h2
-rw-r--r--tools/include/uapi/linux/bpf.h12
-rw-r--r--tools/lib/bpf/bpf.c34
-rw-r--r--tools/lib/bpf/bpf_helpers.h7
-rw-r--r--tools/lib/bpf/bpf_tracing.h23
-rw-r--r--tools/lib/bpf/btf.c9
-rw-r--r--tools/lib/bpf/libbpf.c322
-rw-r--r--tools/lib/bpf/libbpf.h82
-rw-r--r--tools/lib/bpf/libbpf_internal.h9
-rw-r--r--tools/lib/bpf/relo_core.c104
-rw-r--r--tools/lib/bpf/relo_core.h6
-rw-r--r--tools/lib/bpf/usdt.c191
-rw-r--r--tools/testing/selftests/bpf/bench.c1
-rw-r--r--tools/testing/selftests/bpf/bpf_rlimit.h28
-rw-r--r--tools/testing/selftests/bpf/flow_dissector_load.c6
-rw-r--r--tools/testing/selftests/bpf/get_cgroup_id_user.c4
-rw-r--r--tools/testing/selftests/bpf/prog_tests/arg_parsing.c107
-rw-r--r--tools/testing/selftests/bpf/prog_tests/bpf_iter.c4
-rw-r--r--tools/testing/selftests/bpf/prog_tests/bpf_mod_race.c4
-rw-r--r--tools/testing/selftests/bpf/prog_tests/bpf_tcp_ca.c6
-rw-r--r--tools/testing/selftests/bpf/prog_tests/btf.c100
-rw-r--r--tools/testing/selftests/bpf/prog_tests/fexit_stress.c2
-rw-r--r--tools/testing/selftests/bpf/prog_tests/helper_restricted.c10
-rw-r--r--tools/testing/selftests/bpf/prog_tests/linked_funcs.c6
-rw-r--r--tools/testing/selftests/bpf/prog_tests/log_fixup.c114
-rw-r--r--tools/testing/selftests/bpf/prog_tests/map_kptr.c37
-rw-r--r--tools/testing/selftests/bpf/prog_tests/prog_tests_framework.c56
-rw-r--r--tools/testing/selftests/bpf/prog_tests/reference_tracking.c23
-rw-r--r--tools/testing/selftests/bpf/prog_tests/skb_load_bytes.c45
-rw-r--r--tools/testing/selftests/bpf/prog_tests/snprintf.c4
-rw-r--r--tools/testing/selftests/bpf/prog_tests/tc_redirect.c1
-rw-r--r--tools/testing/selftests/bpf/prog_tests/test_strncmp.c25
-rw-r--r--tools/testing/selftests/bpf/prog_tests/uprobe_autoattach.c3
-rw-r--r--tools/testing/selftests/bpf/progs/exhandler_kern.c15
-rw-r--r--tools/testing/selftests/bpf/progs/linked_funcs1.c7
-rw-r--r--tools/testing/selftests/bpf/progs/linked_funcs2.c7
-rw-r--r--tools/testing/selftests/bpf/progs/map_kptr.c190
-rw-r--r--tools/testing/selftests/bpf/progs/pyperf.h4
-rw-r--r--tools/testing/selftests/bpf/progs/pyperf600.c11
-rw-r--r--tools/testing/selftests/bpf/progs/skb_load_bytes.c19
-rw-r--r--tools/testing/selftests/bpf/progs/strncmp_test.c8
-rw-r--r--tools/testing/selftests/bpf/progs/test_helper_restricted.c16
-rw-r--r--tools/testing/selftests/bpf/progs/test_log_fixup.c38
-rw-r--r--tools/testing/selftests/bpf/progs/test_sk_lookup_kern.c18
-rw-r--r--tools/testing/selftests/bpf/test_cgroup_storage.c4
-rw-r--r--tools/testing/selftests/bpf/test_dev_cgroup.c4
-rw-r--r--tools/testing/selftests/bpf/test_lpm_map.c43
-rw-r--r--tools/testing/selftests/bpf/test_lru_map.c70
-rw-r--r--tools/testing/selftests/bpf/test_progs.c481
-rw-r--r--tools/testing/selftests/bpf/test_progs.h62
-rw-r--r--tools/testing/selftests/bpf/test_skb_cgroup_id_user.c4
-rw-r--r--tools/testing/selftests/bpf/test_sock.c6
-rw-r--r--tools/testing/selftests/bpf/test_sock_addr.c4
-rw-r--r--tools/testing/selftests/bpf/test_sockmap.c5
-rw-r--r--tools/testing/selftests/bpf/test_sysctl.c6
-rw-r--r--tools/testing/selftests/bpf/test_tag.c4
-rw-r--r--tools/testing/selftests/bpf/test_tcp_check_syncookie_user.c4
-rw-r--r--tools/testing/selftests/bpf/test_tcpnotify_user.c1
-rw-r--r--tools/testing/selftests/bpf/test_verifier.c55
-rw-r--r--tools/testing/selftests/bpf/test_verifier_log.c5
-rw-r--r--tools/testing/selftests/bpf/testing_helpers.c89
-rw-r--r--tools/testing/selftests/bpf/testing_helpers.h8
-rw-r--r--tools/testing/selftests/bpf/verifier/calls.c20
-rw-r--r--tools/testing/selftests/bpf/verifier/map_kptr.c469
-rw-r--r--tools/testing/selftests/bpf/verifier/ref_tracking.c2
-rw-r--r--tools/testing/selftests/bpf/verifier/sock.c6
-rw-r--r--tools/testing/selftests/bpf/xdp_redirect_multi.c1
-rw-r--r--tools/testing/selftests/bpf/xdping.c8
-rw-r--r--tools/testing/selftests/bpf/xdpxceiver.c6
80 files changed, 2488 insertions, 757 deletions
diff --git a/tools/bpf/bpftool/common.c b/tools/bpf/bpftool/common.c
index 0c1e06cf50b9..c740142c24d8 100644
--- a/tools/bpf/bpftool/common.c
+++ b/tools/bpf/bpftool/common.c
@@ -17,7 +17,6 @@
#include <linux/magic.h>
#include <net/if.h>
#include <sys/mount.h>
-#include <sys/resource.h>
#include <sys/stat.h>
#include <sys/vfs.h>
@@ -119,13 +118,6 @@ static bool is_bpffs(char *path)
return (unsigned long)st_fs.f_type == BPF_FS_MAGIC;
}
-void set_max_rlimit(void)
-{
- struct rlimit rinf = { RLIM_INFINITY, RLIM_INFINITY };
-
- setrlimit(RLIMIT_MEMLOCK, &rinf);
-}
-
static int
mnt_fs(const char *target, const char *type, char *buff, size_t bufflen)
{
diff --git a/tools/bpf/bpftool/feature.c b/tools/bpf/bpftool/feature.c
index f041c4a6a1f2..be130e35462f 100644
--- a/tools/bpf/bpftool/feature.c
+++ b/tools/bpf/bpftool/feature.c
@@ -1136,8 +1136,6 @@ static int do_probe(int argc, char **argv)
__u32 ifindex = 0;
char *ifname;
- set_max_rlimit();
-
while (argc) {
if (is_prefix(*argv, "kernel")) {
if (target != COMPONENT_UNSPEC) {
diff --git a/tools/bpf/bpftool/main.c b/tools/bpf/bpftool/main.c
index e81227761f5d..9062ef2b8767 100644
--- a/tools/bpf/bpftool/main.c
+++ b/tools/bpf/bpftool/main.c
@@ -507,9 +507,9 @@ int main(int argc, char **argv)
* It will still be rejected if users use LIBBPF_STRICT_ALL
* mode for loading generated skeleton.
*/
- ret = libbpf_set_strict_mode(LIBBPF_STRICT_ALL & ~LIBBPF_STRICT_MAP_DEFINITIONS);
- if (ret)
- p_err("failed to enable libbpf strict mode: %d", ret);
+ libbpf_set_strict_mode(LIBBPF_STRICT_ALL & ~LIBBPF_STRICT_MAP_DEFINITIONS);
+ } else {
+ libbpf_set_strict_mode(LIBBPF_STRICT_AUTO_RLIMIT_MEMLOCK);
}
argc -= optind;
diff --git a/tools/bpf/bpftool/main.h b/tools/bpf/bpftool/main.h
index 6e9277ffc68c..aa99ffab451a 100644
--- a/tools/bpf/bpftool/main.h
+++ b/tools/bpf/bpftool/main.h
@@ -102,8 +102,6 @@ int detect_common_prefix(const char *arg, ...);
void fprint_hex(FILE *f, void *arg, unsigned int n, const char *sep);
void usage(void) __noreturn;
-void set_max_rlimit(void);
-
int mount_tracefs(const char *target);
struct obj_ref {
diff --git a/tools/bpf/bpftool/map.c b/tools/bpf/bpftool/map.c
index c26378f20831..877387ef79c7 100644
--- a/tools/bpf/bpftool/map.c
+++ b/tools/bpf/bpftool/map.c
@@ -1342,8 +1342,6 @@ static int do_create(int argc, char **argv)
goto exit;
}
- set_max_rlimit();
-
fd = bpf_map_create(map_type, map_name, key_size, value_size, max_entries, &attr);
if (fd < 0) {
p_err("map create failed: %s", strerror(errno));
diff --git a/tools/bpf/bpftool/perf.c b/tools/bpf/bpftool/perf.c
index 50de087b0db7..226ec2c39052 100644
--- a/tools/bpf/bpftool/perf.c
+++ b/tools/bpf/bpftool/perf.c
@@ -11,7 +11,7 @@
#include <sys/stat.h>
#include <sys/types.h>
#include <unistd.h>
-#include <ftw.h>
+#include <dirent.h>
#include <bpf/bpf.h>
@@ -147,81 +147,83 @@ static void print_perf_plain(int pid, int fd, __u32 prog_id, __u32 fd_type,
}
}
-static int show_proc(const char *fpath, const struct stat *sb,
- int tflag, struct FTW *ftwbuf)
+static int show_proc(void)
{
+ struct dirent *proc_de, *pid_fd_de;
__u64 probe_offset, probe_addr;
__u32 len, prog_id, fd_type;
- int err, pid = 0, fd = 0;
+ DIR *proc, *pid_fd;
+ int err, pid, fd;
const char *pch;
char buf[4096];
- /* prefix always /proc */
- pch = fpath + 5;
- if (*pch == '\0')
- return 0;
+ proc = opendir("/proc");
+ if (!proc)
+ return -1;
- /* pid should be all numbers */
- pch++;
- while (isdigit(*pch)) {
- pid = pid * 10 + *pch - '0';
- pch++;
- }
- if (*pch == '\0')
- return 0;
- if (*pch != '/')
- return FTW_SKIP_SUBTREE;
-
- /* check /proc/<pid>/fd directory */
- pch++;
- if (strncmp(pch, "fd", 2))
- return FTW_SKIP_SUBTREE;
- pch += 2;
- if (*pch == '\0')
- return 0;
- if (*pch != '/')
- return FTW_SKIP_SUBTREE;
-
- /* check /proc/<pid>/fd/<fd_num> */
- pch++;
- while (isdigit(*pch)) {
- fd = fd * 10 + *pch - '0';
- pch++;
- }
- if (*pch != '\0')
- return FTW_SKIP_SUBTREE;
+ while ((proc_de = readdir(proc))) {
+ pid = 0;
+ pch = proc_de->d_name;
- /* query (pid, fd) for potential perf events */
- len = sizeof(buf);
- err = bpf_task_fd_query(pid, fd, 0, buf, &len, &prog_id, &fd_type,
- &probe_offset, &probe_addr);
- if (err < 0)
- return 0;
+ /* pid should be all numbers */
+ while (isdigit(*pch)) {
+ pid = pid * 10 + *pch - '0';
+ pch++;
+ }
+ if (*pch != '\0')
+ continue;
- if (json_output)
- print_perf_json(pid, fd, prog_id, fd_type, buf, probe_offset,
- probe_addr);
- else
- print_perf_plain(pid, fd, prog_id, fd_type, buf, probe_offset,
- probe_addr);
+ err = snprintf(buf, sizeof(buf), "/proc/%s/fd", proc_de->d_name);
+ if (err < 0 || err >= (int)sizeof(buf))
+ continue;
+
+ pid_fd = opendir(buf);
+ if (!pid_fd)
+ continue;
+ while ((pid_fd_de = readdir(pid_fd))) {
+ fd = 0;
+ pch = pid_fd_de->d_name;
+
+ /* fd should be all numbers */
+ while (isdigit(*pch)) {
+ fd = fd * 10 + *pch - '0';
+ pch++;
+ }
+ if (*pch != '\0')
+ continue;
+
+ /* query (pid, fd) for potential perf events */
+ len = sizeof(buf);
+ err = bpf_task_fd_query(pid, fd, 0, buf, &len,
+ &prog_id, &fd_type,
+ &probe_offset, &probe_addr);
+ if (err < 0)
+ continue;
+
+ if (json_output)
+ print_perf_json(pid, fd, prog_id, fd_type, buf,
+ probe_offset, probe_addr);
+ else
+ print_perf_plain(pid, fd, prog_id, fd_type, buf,
+ probe_offset, probe_addr);
+ }
+ closedir(pid_fd);
+ }
+ closedir(proc);
return 0;
}
static int do_show(int argc, char **argv)
{
- int flags = FTW_ACTIONRETVAL | FTW_PHYS;
- int err = 0, nopenfd = 16;
+ int err;
if (!has_perf_query_support())
return -1;
if (json_output)
jsonw_start_array(json_wtr);
- if (nftw("/proc", show_proc, nopenfd, flags) == -1) {
- p_err("%s", strerror(errno));
- err = -1;
- }
+ err = show_proc();
if (json_output)
jsonw_end_array(json_wtr);
diff --git a/tools/bpf/bpftool/pids.c b/tools/bpf/bpftool/pids.c
index bb6c969a114a..e2d00d3cd868 100644
--- a/tools/bpf/bpftool/pids.c
+++ b/tools/bpf/bpftool/pids.c
@@ -108,7 +108,6 @@ int build_obj_refs_table(struct hashmap **map, enum bpf_obj_type type)
p_err("failed to create hashmap for PID references");
return -1;
}
- set_max_rlimit();
skel = pid_iter_bpf__open();
if (!skel) {
diff --git a/tools/bpf/bpftool/prog.c b/tools/bpf/bpftool/prog.c
index 8643b37d4e43..5c2c63df92e8 100644
--- a/tools/bpf/bpftool/prog.c
+++ b/tools/bpf/bpftool/prog.c
@@ -1604,8 +1604,6 @@ static int load_with_options(int argc, char **argv, bool first_prog_only)
}
}
- set_max_rlimit();
-
if (verifier_logs)
/* log_level1 + log_level2 + stats, but not stable UAPI */
open_opts.kernel_log_level = 1 + 2 + 4;
@@ -2303,7 +2301,6 @@ static int do_profile(int argc, char **argv)
}
}
- set_max_rlimit();
err = profiler_bpf__load(profile_obj);
if (err) {
p_err("failed to load profile_obj");
diff --git a/tools/bpf/bpftool/struct_ops.c b/tools/bpf/bpftool/struct_ops.c
index e08a6ff2866c..2535f079ed67 100644
--- a/tools/bpf/bpftool/struct_ops.c
+++ b/tools/bpf/bpftool/struct_ops.c
@@ -501,8 +501,6 @@ static int do_register(int argc, char **argv)
if (libbpf_get_error(obj))
return -1;
- set_max_rlimit();
-
if (bpf_object__load(obj)) {
bpf_object__close(obj);
return -1;
diff --git a/tools/bpf/bpftool/tracelog.c b/tools/bpf/bpftool/tracelog.c
index e80a5c79b38f..bf1f02212797 100644
--- a/tools/bpf/bpftool/tracelog.c
+++ b/tools/bpf/bpftool/tracelog.c
@@ -9,7 +9,7 @@
#include <string.h>
#include <unistd.h>
#include <linux/magic.h>
-#include <sys/fcntl.h>
+#include <fcntl.h>
#include <sys/vfs.h>
#include "main.h"
diff --git a/tools/bpf/runqslower/runqslower.c b/tools/bpf/runqslower/runqslower.c
index d78f4148597f..83c5993a139a 100644
--- a/tools/bpf/runqslower/runqslower.c
+++ b/tools/bpf/runqslower/runqslower.c
@@ -4,7 +4,6 @@
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
-#include <sys/resource.h>
#include <time.h>
#include <bpf/libbpf.h>
#include <bpf/bpf.h>
@@ -88,16 +87,6 @@ int libbpf_print_fn(enum libbpf_print_level level,
return vfprintf(stderr, format, args);
}
-static int bump_memlock_rlimit(void)
-{
- struct rlimit rlim_new = {
- .rlim_cur = RLIM_INFINITY,
- .rlim_max = RLIM_INFINITY,
- };
-
- return setrlimit(RLIMIT_MEMLOCK, &rlim_new);
-}
-
void handle_event(void *ctx, int cpu, void *data, __u32 data_sz)
{
const struct runq_event *e = data;
@@ -133,11 +122,8 @@ int main(int argc, char **argv)
libbpf_set_print(libbpf_print_fn);
- err = bump_memlock_rlimit();
- if (err) {
- fprintf(stderr, "failed to increase rlimit: %d", err);
- return 1;
- }
+ /* Use libbpf 1.0 API mode */
+ libbpf_set_strict_mode(LIBBPF_STRICT_ALL);
obj = runqslower_bpf__open();
if (!obj) {
diff --git a/tools/include/uapi/asm/bpf_perf_event.h b/tools/include/uapi/asm/bpf_perf_event.h
index 39acc149d843..d7dfeab0d71a 100644
--- a/tools/include/uapi/asm/bpf_perf_event.h
+++ b/tools/include/uapi/asm/bpf_perf_event.h
@@ -1,5 +1,7 @@
#if defined(__aarch64__)
#include "../../arch/arm64/include/uapi/asm/bpf_perf_event.h"
+#elif defined(__arc__)
+#include "../../arch/arc/include/uapi/asm/bpf_perf_event.h"
#elif defined(__s390__)
#include "../../arch/s390/include/uapi/asm/bpf_perf_event.h"
#elif defined(__riscv)
diff --git a/tools/include/uapi/linux/bpf.h b/tools/include/uapi/linux/bpf.h
index d14b10b85e51..444fe6f1cf35 100644
--- a/tools/include/uapi/linux/bpf.h
+++ b/tools/include/uapi/linux/bpf.h
@@ -5143,6 +5143,17 @@ union bpf_attr {
* The **hash_algo** is returned on success,
* **-EOPNOTSUP** if the hash calculation failed or **-EINVAL** if
* invalid arguments are passed.
+ *
+ * void *bpf_kptr_xchg(void *map_value, void *ptr)
+ * Description
+ * Exchange kptr at pointer *map_value* with *ptr*, and return the
+ * old value. *ptr* can be NULL, otherwise it must be a referenced
+ * pointer which will be released when this helper is called.
+ * Return
+ * The old value of kptr (which can be NULL). The returned pointer
+ * if not NULL, is a reference which must be released using its
+ * corresponding release function, or moved into a BPF map before
+ * program exit.
*/
#define __BPF_FUNC_MAPPER(FN) \
FN(unspec), \
@@ -5339,6 +5350,7 @@ union bpf_attr {
FN(copy_from_user_task), \
FN(skb_set_tstamp), \
FN(ima_file_hash), \
+ FN(kptr_xchg), \
/* */
/* integer value in 'imm' field of BPF_CALL instruction selects which helper
diff --git a/tools/lib/bpf/bpf.c b/tools/lib/bpf/bpf.c
index cf27251adb92..a9d292c106c2 100644
--- a/tools/lib/bpf/bpf.c
+++ b/tools/lib/bpf/bpf.c
@@ -817,7 +817,7 @@ int bpf_link_create(int prog_fd, int target_fd,
{
__u32 target_btf_id, iter_info_len;
union bpf_attr attr;
- int fd;
+ int fd, err;
if (!OPTS_VALID(opts, bpf_link_create_opts))
return libbpf_err(-EINVAL);
@@ -870,7 +870,37 @@ int bpf_link_create(int prog_fd, int target_fd,
}
proceed:
fd = sys_bpf_fd(BPF_LINK_CREATE, &attr, sizeof(attr));
- return libbpf_err_errno(fd);
+ if (fd >= 0)
+ return fd;
+ /* we'll get EINVAL if LINK_CREATE doesn't support attaching fentry
+ * and other similar programs
+ */
+ err = -errno;
+ if (err != -EINVAL)
+ return libbpf_err(err);
+
+ /* if user used features not supported by
+ * BPF_RAW_TRACEPOINT_OPEN command, then just give up immediately
+ */
+ if (attr.link_create.target_fd || attr.link_create.target_btf_id)
+ return libbpf_err(err);
+ if (!OPTS_ZEROED(opts, sz))
+ return libbpf_err(err);
+
+ /* otherwise, for few select kinds of programs that can be
+ * attached using BPF_RAW_TRACEPOINT_OPEN command, try that as
+ * a fallback for older kernels
+ */
+ switch (attach_type) {
+ case BPF_TRACE_RAW_TP:
+ case BPF_LSM_MAC:
+ case BPF_TRACE_FENTRY:
+ case BPF_TRACE_FEXIT:
+ case BPF_MODIFY_RETURN:
+ return bpf_raw_tracepoint_open(NULL, prog_fd);
+ default:
+ return libbpf_err(err);
+ }
}
int bpf_link_detach(int link_fd)
diff --git a/tools/lib/bpf/bpf_helpers.h b/tools/lib/bpf/bpf_helpers.h
index 44df982d2a5c..5de3eb267125 100644
--- a/tools/lib/bpf/bpf_helpers.h
+++ b/tools/lib/bpf/bpf_helpers.h
@@ -149,6 +149,13 @@ enum libbpf_tristate {
#define __kconfig __attribute__((section(".kconfig")))
#define __ksym __attribute__((section(".ksyms")))
+#if __has_attribute(btf_type_tag)
+#define __kptr __attribute__((btf_type_tag("kptr")))
+#define __kptr_ref __attribute__((btf_type_tag("kptr_ref")))
+#else
+#define __kptr
+#define __kptr_ref
+#endif
#ifndef ___bpf_concat
#define ___bpf_concat(a, b) a ## b
diff --git a/tools/lib/bpf/bpf_tracing.h b/tools/lib/bpf/bpf_tracing.h
index e3a8c947e89f..01ce121c302d 100644
--- a/tools/lib/bpf/bpf_tracing.h
+++ b/tools/lib/bpf/bpf_tracing.h
@@ -27,6 +27,9 @@
#elif defined(__TARGET_ARCH_riscv)
#define bpf_target_riscv
#define bpf_target_defined
+#elif defined(__TARGET_ARCH_arc)
+ #define bpf_target_arc
+ #define bpf_target_defined
#else
/* Fall back to what the compiler says */
@@ -54,6 +57,9 @@
#elif defined(__riscv) && __riscv_xlen == 64
#define bpf_target_riscv
#define bpf_target_defined
+#elif defined(__arc__)
+ #define bpf_target_arc
+ #define bpf_target_defined
#endif /* no compiler target */
#endif
@@ -233,6 +239,23 @@ struct pt_regs___arm64 {
/* riscv does not select ARCH_HAS_SYSCALL_WRAPPER. */
#define PT_REGS_SYSCALL_REGS(ctx) ctx
+#elif defined(bpf_target_arc)
+
+/* arc provides struct user_pt_regs instead of struct pt_regs to userspace */
+#define __PT_REGS_CAST(x) ((const struct user_regs_struct *)(x))
+#define __PT_PARM1_REG scratch.r0
+#define __PT_PARM2_REG scratch.r1
+#define __PT_PARM3_REG scratch.r2
+#define __PT_PARM4_REG scratch.r3
+#define __PT_PARM5_REG scratch.r4
+#define __PT_RET_REG scratch.blink
+#define __PT_FP_REG __unsupported__
+#define __PT_RC_REG scratch.r0
+#define __PT_SP_REG scratch.sp
+#define __PT_IP_REG scratch.ret
+/* arc does not select ARCH_HAS_SYSCALL_WRAPPER. */
+#define PT_REGS_SYSCALL_REGS(ctx) ctx
+
#endif
#if defined(bpf_target_defined)
diff --git a/tools/lib/bpf/btf.c b/tools/lib/bpf/btf.c
index d124e9e533f0..bb1e06eb1eca 100644
--- a/tools/lib/bpf/btf.c
+++ b/tools/lib/bpf/btf.c
@@ -2626,6 +2626,7 @@ static int btf_ext_setup_info(struct btf_ext *btf_ext,
const struct btf_ext_info_sec *sinfo;
struct btf_ext_info *ext_info;
__u32 info_left, record_size;
+ size_t sec_cnt = 0;
/* The start of the info sec (including the __u32 record_size). */
void *info;
@@ -2689,8 +2690,7 @@ static int btf_ext_setup_info(struct btf_ext *btf_ext,
return -EINVAL;
}
- total_record_size = sec_hdrlen +
- (__u64)num_records * record_size;
+ total_record_size = sec_hdrlen + (__u64)num_records * record_size;
if (info_left < total_record_size) {
pr_debug("%s section has incorrect num_records in .BTF.ext\n",
ext_sec->desc);
@@ -2699,12 +2699,14 @@ static int btf_ext_setup_info(struct btf_ext *btf_ext,
info_left -= total_record_size;
sinfo = (void *)sinfo + total_record_size;
+ sec_cnt++;
}
ext_info = ext_sec->ext_info;
ext_info->len = ext_sec->len - sizeof(__u32);
ext_info->rec_size = record_size;
ext_info->info = info + sizeof(__u32);
+ ext_info->sec_cnt = sec_cnt;
return 0;
}
@@ -2788,6 +2790,9 @@ void btf_ext__free(struct btf_ext *btf_ext)
{
if (IS_ERR_OR_NULL(btf_ext))
return;
+ free(btf_ext->func_info.sec_idxs);
+ free(btf_ext->line_info.sec_idxs);
+ free(btf_ext->core_relo_info.sec_idxs);
free(btf_ext->data);
free(btf_ext);
}
diff --git a/tools/lib/bpf/libbpf.c b/tools/lib/bpf/libbpf.c
index 465b7c0996f1..73a5192defb3 100644
--- a/tools/lib/bpf/libbpf.c
+++ b/tools/lib/bpf/libbpf.c
@@ -302,7 +302,7 @@ struct bpf_program {
void *priv;
bpf_program_clear_priv_t clear_priv;
- bool load;
+ bool autoload;
bool mark_btf_static;
enum bpf_prog_type type;
enum bpf_attach_type expected_attach_type;
@@ -672,7 +672,18 @@ bpf_object__init_prog(struct bpf_object *obj, struct bpf_program *prog,
prog->insns_cnt = prog->sec_insn_cnt;
prog->type = BPF_PROG_TYPE_UNSPEC;
- prog->load = true;
+
+ /* libbpf's convention for SEC("?abc...") is that it's just like
+ * SEC("abc...") but the corresponding bpf_program starts out with
+ * autoload set to false.
+ */
+ if (sec_name[0] == '?') {
+ prog->autoload = false;
+ /* from now on forget there was ? in section name */
+ sec_name++;
+ } else {
+ prog->autoload = true;
+ }
prog->instances.fds = NULL;
prog->instances.nr = -1;
@@ -1222,10 +1233,8 @@ static void bpf_object__elf_finish(struct bpf_object *obj)
if (!obj->efile.elf)
return;
- if (obj->efile.elf) {
- elf_end(obj->efile.elf);
- obj->efile.elf = NULL;
- }
+ elf_end(obj->efile.elf);
+ obj->efile.elf = NULL;
obj->efile.symbols = NULL;
obj->efile.st_ops_data = NULL;
@@ -2756,6 +2765,9 @@ static int bpf_object__init_btf(struct bpf_object *obj,
btf__set_pointer_size(obj->btf, 8);
}
if (btf_ext_data) {
+ struct btf_ext_info *ext_segs[3];
+ int seg_num, sec_num;
+
if (!obj->btf) {
pr_debug("Ignore ELF section %s because its depending ELF section %s is not found.\n",
BTF_EXT_ELF_SEC, BTF_ELF_SEC);
@@ -2769,6 +2781,43 @@ static int bpf_object__init_btf(struct bpf_object *obj,
obj->btf_ext = NULL;
goto out;
}
+
+ /* setup .BTF.ext to ELF section mapping */
+ ext_segs[0] = &obj->btf_ext->func_info;
+ ext_segs[1] = &obj->btf_ext->line_info;
+ ext_segs[2] = &obj->btf_ext->core_relo_info;
+ for (seg_num = 0; seg_num < ARRAY_SIZE(ext_segs); seg_num++) {
+ struct btf_ext_info *seg = ext_segs[seg_num];
+ const struct btf_ext_info_sec *sec;
+ const char *sec_name;
+ Elf_Scn *scn;
+
+ if (seg->sec_cnt == 0)
+ continue;
+
+ seg->sec_idxs = calloc(seg->sec_cnt, sizeof(*seg->sec_idxs));
+ if (!seg->sec_idxs) {
+ err = -ENOMEM;
+ goto out;
+ }
+
+ sec_num = 0;
+ for_each_btf_ext_sec(seg, sec) {
+ /* preventively increment index to avoid doing
+ * this before every continue below
+ */
+ sec_num++;
+
+ sec_name = btf__name_by_offset(obj->btf, sec->sec_name_off);
+ if (str_is_empty(sec_name))
+ continue;
+ scn = elf_sec_by_name(obj, sec_name);
+ if (!scn)
+ continue;
+
+ seg->sec_idxs[sec_num - 1] = elf_ndxscn(scn);
+ }
+ }
}
out:
if (err && libbpf_needs_btf(obj)) {
@@ -2927,7 +2976,7 @@ static bool obj_needs_vmlinux_btf(const struct bpf_object *obj)
}
bpf_object__for_each_program(prog, obj) {
- if (!prog->load)
+ if (!prog->autoload)
continue;
if (prog_needs_vmlinux_btf(prog))
return true;
@@ -4594,7 +4643,7 @@ static int probe_kern_probe_read_kernel(void)
};
int fd, insn_cnt = ARRAY_SIZE(insns);
- fd = bpf_prog_load(BPF_PROG_TYPE_KPROBE, NULL, "GPL", insns, insn_cnt, NULL);
+ fd = bpf_prog_load(BPF_PROG_TYPE_TRACEPOINT, NULL, "GPL", insns, insn_cnt, NULL);
return probe_fd(fd);
}
@@ -5577,6 +5626,22 @@ static int record_relo_core(struct bpf_program *prog,
return 0;
}
+static const struct bpf_core_relo *find_relo_core(struct bpf_program *prog, int insn_idx)
+{
+ struct reloc_desc *relo;
+ int i;
+
+ for (i = 0; i < prog->nr_reloc; i++) {
+ relo = &prog->reloc_desc[i];
+ if (relo->type != RELO_CORE || relo->insn_idx != insn_idx)
+ continue;
+
+ return relo->core_relo;
+ }
+
+ return NULL;
+}
+
static int bpf_core_resolve_relo(struct bpf_program *prog,
const struct bpf_core_relo *relo,
int relo_idx,
@@ -5633,7 +5698,7 @@ bpf_object__relocate_core(struct bpf_object *obj, const char *targ_btf_path)
struct bpf_program *prog;
struct bpf_insn *insn;
const char *sec_name;
- int i, err = 0, insn_idx, sec_idx;
+ int i, err = 0, insn_idx, sec_idx, sec_num;
if (obj->btf_ext->core_relo_info.len == 0)
return 0;
@@ -5654,32 +5719,18 @@ bpf_object__relocate_core(struct bpf_object *obj, const char *targ_btf_path)
}
seg = &obj->btf_ext->core_relo_info;
+ sec_num = 0;
for_each_btf_ext_sec(seg, sec) {
+ sec_idx = seg->sec_idxs[sec_num];
+ sec_num++;
+
sec_name = btf__name_by_offset(obj->btf, sec->sec_name_off);
if (str_is_empty(sec_name)) {
err = -EINVAL;
goto out;
}
- /* bpf_object's ELF is gone by now so it's not easy to find
- * section index by section name, but we can find *any*
- * bpf_program within desired section name and use it's
- * prog->sec_idx to do a proper search by section index and
- * instruction offset
- */
- prog = NULL;
- for (i = 0; i < obj->nr_programs; i++) {
- prog = &obj->programs[i];
- if (strcmp(prog->sec_name, sec_name) == 0)
- break;
- }
- if (!prog) {
- pr_warn("sec '%s': failed to find a BPF program\n", sec_name);
- return -ENOENT;
- }
- sec_idx = prog->sec_idx;
- pr_debug("sec '%s': found %d CO-RE relocations\n",
- sec_name, sec->num_info);
+ pr_debug("sec '%s': found %d CO-RE relocations\n", sec_name, sec->num_info);
for_each_btf_ext_rec(seg, sec, i, rec) {
if (rec->insn_off % BPF_INSN_SZ)
@@ -5702,7 +5753,7 @@ bpf_object__relocate_core(struct bpf_object *obj, const char *targ_btf_path)
/* no need to apply CO-RE relocation if the program is
* not going to be loaded
*/
- if (!prog->load)
+ if (!prog->autoload)
continue;
/* adjust insn_idx from section frame of reference to the local
@@ -5714,16 +5765,16 @@ bpf_object__relocate_core(struct bpf_object *obj, const char *targ_btf_path)
return -EINVAL;
insn = &prog->insns[insn_idx];
- if (prog->obj->gen_loader) {
- err = record_relo_core(prog, rec, insn_idx);
- if (err) {
- pr_warn("prog '%s': relo #%d: failed to record relocation: %d\n",
- prog->name, i, err);
- goto out;
- }
- continue;
+ err = record_relo_core(prog, rec, insn_idx);
+ if (err) {
+ pr_warn("prog '%s': relo #%d: failed to record relocation: %d\n",
+ prog->name, i, err);
+ goto out;
}
+ if (prog->obj->gen_loader)
+ continue;
+
err = bpf_core_resolve_relo(prog, rec, i, obj->btf, cand_cache, &targ_res);
if (err) {
pr_warn("prog '%s': relo #%d: failed to relocate: %d\n",
@@ -5863,14 +5914,13 @@ static int adjust_prog_btf_ext_info(const struct bpf_object *obj,
void *rec, *rec_end, *new_prog_info;
const struct btf_ext_info_sec *sec;
size_t old_sz, new_sz;
- const char *sec_name;
- int i, off_adj;
+ int i, sec_num, sec_idx, off_adj;
+ sec_num = 0;
for_each_btf_ext_sec(ext_info, sec) {
- sec_name = btf__name_by_offset(obj->btf, sec->sec_name_off);
- if (!sec_name)
- return -EINVAL;
- if (strcmp(sec_name, prog->sec_name) != 0)
+ sec_idx = ext_info->sec_idxs[sec_num];
+ sec_num++;
+ if (prog->sec_idx != sec_idx)
continue;
for_each_btf_ext_rec(ext_info, sec, i, rec) {
@@ -6265,7 +6315,6 @@ bpf_object__relocate_calls(struct bpf_object *obj, struct bpf_program *prog)
if (err)
return err;
-
return 0;
}
@@ -6326,8 +6375,7 @@ bpf_object__relocate(struct bpf_object *obj, const char *targ_btf_path)
err);
return err;
}
- if (obj->gen_loader)
- bpf_object__sort_relos(obj);
+ bpf_object__sort_relos(obj);
}
/* Before relocating calls pre-process relocations and mark
@@ -6363,7 +6411,7 @@ bpf_object__relocate(struct bpf_object *obj, const char *targ_btf_path)
*/
if (prog_is_subprog(obj, prog))
continue;
- if (!prog->load)
+ if (!prog->autoload)
continue;
err = bpf_object__relocate_calls(obj, prog);
@@ -6378,7 +6426,7 @@ bpf_object__relocate(struct bpf_object *obj, const char *targ_btf_path)
prog = &obj->programs[i];
if (prog_is_subprog(obj, prog))
continue;
- if (!prog->load)
+ if (!prog->autoload)
continue;
err = bpf_object__relocate_data(obj, prog);
if (err) {
@@ -6387,8 +6435,7 @@ bpf_object__relocate(struct bpf_object *obj, const char *targ_btf_path)
return err;
}
}
- if (!obj->gen_loader)
- bpf_object__free_relocs(obj);
+
return 0;
}
@@ -6665,6 +6712,8 @@ static int libbpf_prepare_prog_load(struct bpf_program *prog,
return 0;
}
+static void fixup_verifier_log(struct bpf_program *prog, char *buf, size_t buf_sz);
+
static int bpf_object_load_prog_instance(struct bpf_object *obj, struct bpf_program *prog,
struct bpf_insn *insns, int insns_cnt,
const char *license, __u32 kern_version,
@@ -6811,6 +6860,10 @@ retry_load:
goto retry_load;
ret = -errno;
+
+ /* post-process verifier log to improve error descriptions */
+ fixup_verifier_log(prog, log_buf, log_buf_size);
+
cp = libbpf_strerror_r(errno, errmsg, sizeof(errmsg));
pr_warn("prog '%s': BPF program load failed: %s\n", prog->name, cp);
pr_perm_msg(ret);
@@ -6819,10 +6872,6 @@ retry_load:
pr_warn("prog '%s': -- BEGIN PROG LOAD LOG --\n%s-- END PROG LOAD LOG --\n",
prog->name, log_buf);
}
- if (insns_cnt >= BPF_MAXINSNS) {
- pr_warn("prog '%s': program too large (%d insns), at most %d insns\n",
- prog->name, insns_cnt, BPF_MAXINSNS);
- }
out:
if (own_log_buf)
@@ -6830,6 +6879,128 @@ out:
return ret;
}
+static char *find_prev_line(char *buf, char *cur)
+{
+ char *p;
+
+ if (cur == buf) /* end of a log buf */
+ return NULL;
+
+ p = cur - 1;
+ while (p - 1 >= buf && *(p - 1) != '\n')
+ p--;
+
+ return p;
+}
+
+static void patch_log(char *buf, size_t buf_sz, size_t log_sz,
+ char *orig, size_t orig_sz, const char *patch)
+{
+ /* size of the remaining log content to the right from the to-be-replaced part */
+ size_t rem_sz = (buf + log_sz) - (orig + orig_sz);
+ size_t patch_sz = strlen(patch);
+
+ if (patch_sz != orig_sz) {
+ /* If patch line(s) are longer than original piece of verifier log,
+ * shift log contents by (patch_sz - orig_sz) bytes to the right
+ * starting from after to-be-replaced part of the log.
+ *
+ * If patch line(s) are shorter than original piece of verifier log,
+ * shift log contents by (orig_sz - patch_sz) bytes to the left
+ * starting from after to-be-replaced part of the log
+ *
+ * We need to be careful about not overflowing available
+ * buf_sz capacity. If that's the case, we'll truncate the end
+ * of the original log, as necessary.
+ */
+ if (patch_sz > orig_sz) {
+ if (orig + patch_sz >= buf + buf_sz) {
+ /* patch is big enough to cover remaining space completely */
+ patch_sz -= (orig + patch_sz) - (buf + buf_sz) + 1;
+ rem_sz = 0;
+ } else if (patch_sz - orig_sz > buf_sz - log_sz) {
+ /* patch causes part of remaining log to be truncated */
+ rem_sz -= (patch_sz - orig_sz) - (buf_sz - log_sz);
+ }
+ }
+ /* shift remaining log to the right by calculated amount */
+ memmove(orig + patch_sz, orig + orig_sz, rem_sz);
+ }
+
+ memcpy(orig, patch, patch_sz);
+}
+
+static void fixup_log_failed_core_relo(struct bpf_program *prog,
+ char *buf, size_t buf_sz, size_t log_sz,
+ char *line1, char *line2, char *line3)
+{
+ /* Expected log for failed and not properly guarded CO-RE relocation:
+ * line1 -> 123: (85) call unknown#195896080
+ * line2 -> invalid func unknown#195896080
+ * line3 -> <anything else or end of buffer>
+ *
+ * "123" is the index of the instruction that was poisoned. We extract
+ * instruction index to find corresponding CO-RE relocation and
+ * replace this part of the log with more relevant information about
+ * failed CO-RE relocation.
+ */
+ const struct bpf_core_relo *relo;
+ struct bpf_core_spec spec;
+ char patch[512], spec_buf[256];
+ int insn_idx, err;
+
+ if (sscanf(line1, "%d: (%*d) call unknown#195896080\n", &insn_idx) != 1)
+ return;
+
+ relo = find_relo_core(prog, insn_idx);
+ if (!relo)
+ return;
+
+ err = bpf_core_parse_spec(prog->name, prog->obj->btf, relo, &spec);
+ if (err)
+ return;
+
+ bpf_core_format_spec(spec_buf, sizeof(spec_buf), &spec);
+ snprintf(patch, sizeof(patch),
+ "%d: <invalid CO-RE relocation>\n"
+ "failed to resolve CO-RE relocation %s\n",
+ insn_idx, spec_buf);
+
+ patch_log(buf, buf_sz, log_sz, line1, line3 - line1, patch);
+}
+
+static void fixup_verifier_log(struct bpf_program *prog, char *buf, size_t buf_sz)
+{
+ /* look for familiar error patterns in last N lines of the log */
+ const size_t max_last_line_cnt = 10;
+ char *prev_line, *cur_line, *next_line;
+ size_t log_sz;
+ int i;
+
+ if (!buf)
+ return;
+
+ log_sz = strlen(buf) + 1;
+ next_line = buf + log_sz - 1;
+
+ for (i = 0; i < max_last_line_cnt; i++, next_line = cur_line) {
+ cur_line = find_prev_line(buf, next_line);
+ if (!cur_line)
+ return;
+
+ /* failed CO-RE relocation case */
+ if (str_has_pfx(cur_line, "invalid func unknown#195896080\n")) {
+ prev_line = find_prev_line(buf, cur_line);
+ if (!prev_line)
+ continue;
+
+ fixup_log_failed_core_relo(prog, buf, buf_sz, log_sz,
+ prev_line, cur_line, next_line);
+ return;
+ }
+ }
+}
+
static int bpf_program_record_relos(struct bpf_program *prog)
{
struct bpf_object *obj = prog->obj;
@@ -6975,7 +7146,7 @@ bpf_object__load_progs(struct bpf_object *obj, int log_level)
prog = &obj->programs[i];
if (prog_is_subprog(obj, prog))
continue;
- if (!prog->load) {
+ if (!prog->autoload) {
pr_debug("prog '%s': skipped loading\n", prog->name);
continue;
}
@@ -6984,8 +7155,8 @@ bpf_object__load_progs(struct bpf_object *obj, int log_level)
if (err)
return err;
}
- if (obj->gen_loader)
- bpf_object__free_relocs(obj);
+
+ bpf_object__free_relocs(obj);
return 0;
}
@@ -7005,8 +7176,8 @@ static int bpf_object_init_progs(struct bpf_object *obj, const struct bpf_object
continue;
}
- bpf_program__set_type(prog, prog->sec_def->prog_type);
- bpf_program__set_expected_attach_type(prog, prog->sec_def->expected_attach_type);
+ prog->type = prog->sec_def->prog_type;
+ prog->expected_attach_type = prog->sec_def->expected_attach_type;
#pragma GCC diagnostic push
#pragma GCC diagnostic ignored "-Wdeprecated-declarations"
@@ -8455,7 +8626,7 @@ const char *bpf_program__title(const struct bpf_program *prog, bool needs_copy)
bool bpf_program__autoload(const struct bpf_program *prog)
{
- return prog->load;
+ return prog->autoload;
}
int bpf_program__set_autoload(struct bpf_program *prog, bool autoload)
@@ -8463,7 +8634,7 @@ int bpf_program__set_autoload(struct bpf_program *prog, bool autoload)
if (prog->obj->loaded)
return libbpf_err(-EINVAL);
- prog->load = autoload;
+ prog->autoload = autoload;
return 0;
}
@@ -8551,9 +8722,13 @@ enum bpf_prog_type bpf_program__type(const struct bpf_program *prog)
return prog->type;
}
-void bpf_program__set_type(struct bpf_program *prog, enum bpf_prog_type type)
+int bpf_program__set_type(struct bpf_program *prog, enum bpf_prog_type type)
{
+ if (prog->obj->loaded)
+ return libbpf_err(-EBUSY);
+
prog->type = type;
+ return 0;
}
static bool bpf_program__is_type(const struct bpf_program *prog,
@@ -8567,8 +8742,7 @@ int bpf_program__set_##NAME(struct bpf_program *prog) \
{ \
if (!prog) \
return libbpf_err(-EINVAL); \
- bpf_program__set_type(prog, TYPE); \
- return 0; \
+ return bpf_program__set_type(prog, TYPE); \
} \
\
bool bpf_program__is_##NAME(const struct bpf_program *prog) \
@@ -8598,10 +8772,14 @@ enum bpf_attach_type bpf_program__expected_attach_type(const struct bpf_program
return prog->expected_attach_type;
}
-void bpf_program__set_expected_attach_type(struct bpf_program *prog,
+int bpf_program__set_expected_attach_type(struct bpf_program *prog,
enum bpf_attach_type type)
{
+ if (prog->obj->loaded)
+ return libbpf_err(-EBUSY);
+
prog->expected_attach_type = type;
+ return 0;
}
__u32 bpf_program__flags(const struct bpf_program *prog)
@@ -9671,9 +9849,8 @@ static int bpf_prog_load_xattr2(const struct bpf_prog_load_attr *attr,
* bpf_object__open guessed
*/
if (attr->prog_type != BPF_PROG_TYPE_UNSPEC) {
- bpf_program__set_type(prog, attr->prog_type);
- bpf_program__set_expected_attach_type(prog,
- attach_type);
+ prog->type = attr->prog_type;
+ prog->expected_attach_type = attach_type;
}
if (bpf_program__type(prog) == BPF_PROG_TYPE_UNSPEC) {
/*
@@ -10982,7 +11159,7 @@ struct bpf_link *bpf_program__attach_usdt(const struct bpf_program *prog,
char resolved_path[512];
struct bpf_object *obj = prog->obj;
struct bpf_link *link;
- long usdt_cookie;
+ __u64 usdt_cookie;
int err;
if (!OPTS_VALID(opts, bpf_uprobe_opts))
@@ -11245,7 +11422,8 @@ static struct bpf_link *bpf_program__attach_btf_id(const struct bpf_program *pro
return libbpf_err_ptr(-ENOMEM);
link->detach = &bpf_link__detach_fd;
- pfd = bpf_raw_tracepoint_open(NULL, prog_fd);
+ /* libbpf is smart enough to redirect to BPF_RAW_TRACEPOINT_OPEN on old kernels */
+ pfd = bpf_link_create(prog_fd, 0, bpf_program__expected_attach_type(prog), NULL);
if (pfd < 0) {
pfd = -errno;
free(link);
@@ -11254,7 +11432,7 @@ static struct bpf_link *bpf_program__attach_btf_id(const struct bpf_program *pro
return libbpf_err_ptr(pfd);
}
link->fd = pfd;
- return (struct bpf_link *)link;
+ return link;
}
struct bpf_link *bpf_program__attach_trace(const struct bpf_program *prog)
@@ -12665,7 +12843,7 @@ int bpf_object__attach_skeleton(struct bpf_object_skeleton *s)
struct bpf_program *prog = *s->progs[i].prog;
struct bpf_link **link = s->progs[i].link;
- if (!prog->load)
+ if (!prog->autoload)
continue;
/* auto-attaching not supported for this program */
diff --git a/tools/lib/bpf/libbpf.h b/tools/lib/bpf/libbpf.h
index 63d66f1adf1a..cdbfee60ea3e 100644
--- a/tools/lib/bpf/libbpf.h
+++ b/tools/lib/bpf/libbpf.h
@@ -378,7 +378,31 @@ struct bpf_link;
LIBBPF_API struct bpf_link *bpf_link__open(const char *path);
LIBBPF_API int bpf_link__fd(const struct bpf_link *link);
LIBBPF_API const char *bpf_link__pin_path(const struct bpf_link *link);
+/**
+ * @brief **bpf_link__pin()** pins the BPF link to a file
+ * in the BPF FS specified by a path. This increments the links
+ * reference count, allowing it to stay loaded after the process
+ * which loaded it has exited.
+ *
+ * @param link BPF link to pin, must already be loaded
+ * @param path file path in a BPF file system
+ * @return 0, on success; negative error code, otherwise
+ */
+
LIBBPF_API int bpf_link__pin(struct bpf_link *link, const char *path);
+
+/**
+ * @brief **bpf_link__unpin()** unpins the BPF link from a file
+ * in the BPFFS specified by a path. This decrements the links
+ * reference count.
+ *
+ * The file pinning the BPF link can also be unlinked by a different
+ * process in which case this function will return an error.
+ *
+ * @param prog BPF program to unpin
+ * @param path file path to the pin in a BPF file system
+ * @return 0, on success; negative error code, otherwise
+ */
LIBBPF_API int bpf_link__unpin(struct bpf_link *link);
LIBBPF_API int bpf_link__update_program(struct bpf_link *link,
struct bpf_program *prog);
@@ -386,6 +410,22 @@ LIBBPF_API void bpf_link__disconnect(struct bpf_link *link);
LIBBPF_API int bpf_link__detach(struct bpf_link *link);
LIBBPF_API int bpf_link__destroy(struct bpf_link *link);
+/**
+ * @brief **bpf_program__attach()** is a generic function for attaching
+ * a BPF program based on auto-detection of program type, attach type,
+ * and extra paremeters, where applicable.
+ *
+ * @param prog BPF program to attach
+ * @return Reference to the newly created BPF link; or NULL is returned on error,
+ * error code is stored in errno
+ *
+ * This is supported for:
+ * - kprobe/kretprobe (depends on SEC() definition)
+ * - uprobe/uretprobe (depends on SEC() definition)
+ * - tracepoint
+ * - raw tracepoint
+ * - tracing programs (typed raw TP/fentry/fexit/fmod_ret)
+ */
LIBBPF_API struct bpf_link *
bpf_program__attach(const struct bpf_program *prog);
@@ -686,12 +726,37 @@ LIBBPF_DEPRECATED_SINCE(0, 8, "use bpf_program__set_type() instead")
LIBBPF_API int bpf_program__set_sk_lookup(struct bpf_program *prog);
LIBBPF_API enum bpf_prog_type bpf_program__type(const struct bpf_program *prog);
-LIBBPF_API void bpf_program__set_type(struct bpf_program *prog,
- enum bpf_prog_type type);
+
+/**
+ * @brief **bpf_program__set_type()** sets the program
+ * type of the passed BPF program.
+ * @param prog BPF program to set the program type for
+ * @param type program type to set the BPF map to have
+ * @return error code; or 0 if no error. An error occurs
+ * if the object is already loaded.
+ *
+ * This must be called before the BPF object is loaded,
+ * otherwise it has no effect and an error is returned.
+ */
+LIBBPF_API int bpf_program__set_type(struct bpf_program *prog,
+ enum bpf_prog_type type);
LIBBPF_API enum bpf_attach_type
bpf_program__expected_attach_type(const struct bpf_program *prog);
-LIBBPF_API void
+
+/**
+ * @brief **bpf_program__set_expected_attach_type()** sets the
+ * attach type of the passed BPF program. This is used for
+ * auto-detection of attachment when programs are loaded.
+ * @param prog BPF program to set the attach type for
+ * @param type attach type to set the BPF map to have
+ * @return error code; or 0 if no error. An error occurs
+ * if the object is already loaded.
+ *
+ * This must be called before the BPF object is loaded,
+ * otherwise it has no effect and an error is returned.
+ */
+LIBBPF_API int
bpf_program__set_expected_attach_type(struct bpf_program *prog,
enum bpf_attach_type type);
@@ -707,6 +772,17 @@ LIBBPF_API int bpf_program__set_log_level(struct bpf_program *prog, __u32 log_le
LIBBPF_API const char *bpf_program__log_buf(const struct bpf_program *prog, size_t *log_size);
LIBBPF_API int bpf_program__set_log_buf(struct bpf_program *prog, char *log_buf, size_t log_size);
+/**
+ * @brief **bpf_program__set_attach_target()** sets BTF-based attach target
+ * for supported BPF program types:
+ * - BTF-aware raw tracepoints (tp_btf);
+ * - fentry/fexit/fmod_ret;
+ * - lsm;
+ * - freplace.
+ * @param prog BPF program to set the attach type for
+ * @param type attach type to set the BPF map to have
+ * @return error code; or 0 if no error occurred.
+ */
LIBBPF_API int
bpf_program__set_attach_target(struct bpf_program *prog, int attach_prog_fd,
const char *attach_func_name);
diff --git a/tools/lib/bpf/libbpf_internal.h b/tools/lib/bpf/libbpf_internal.h
index 080272421f6c..4abdbe2fea9d 100644
--- a/tools/lib/bpf/libbpf_internal.h
+++ b/tools/lib/bpf/libbpf_internal.h
@@ -376,6 +376,13 @@ struct btf_ext_info {
void *info;
__u32 rec_size;
__u32 len;
+ /* optional (maintained internally by libbpf) mapping between .BTF.ext
+ * section and corresponding ELF section. This is used to join
+ * information like CO-RE relocation records with corresponding BPF
+ * programs defined in ELF sections
+ */
+ __u32 *sec_idxs;
+ int sec_cnt;
};
#define for_each_btf_ext_sec(seg, sec) \
@@ -571,6 +578,6 @@ struct bpf_link * usdt_manager_attach_usdt(struct usdt_manager *man,
const struct bpf_program *prog,
pid_t pid, const char *path,
const char *usdt_provider, const char *usdt_name,
- long usdt_cookie);
+ __u64 usdt_cookie);
#endif /* __LIBBPF_LIBBPF_INTERNAL_H */
diff --git a/tools/lib/bpf/relo_core.c b/tools/lib/bpf/relo_core.c
index f946f23eab20..ba4453dfd1ed 100644
--- a/tools/lib/bpf/relo_core.c
+++ b/tools/lib/bpf/relo_core.c
@@ -178,29 +178,28 @@ static bool core_relo_is_enumval_based(enum bpf_core_relo_kind kind)
* Enum value-based relocations (ENUMVAL_EXISTS/ENUMVAL_VALUE) use access
* string to specify enumerator's value index that need to be relocated.
*/
-static int bpf_core_parse_spec(const char *prog_name, const struct btf *btf,
- __u32 type_id,
- const char *spec_str,
- enum bpf_core_relo_kind relo_kind,
- struct bpf_core_spec *spec)
+int bpf_core_parse_spec(const char *prog_name, const struct btf *btf,
+ const struct bpf_core_relo *relo,
+ struct bpf_core_spec *spec)
{
int access_idx, parsed_len, i;
struct bpf_core_accessor *acc;
const struct btf_type *t;
- const char *name;
+ const char *name, *spec_str;
__u32 id;
__s64 sz;
+ spec_str = btf__name_by_offset(btf, relo->access_str_off);
if (str_is_empty(spec_str) || *spec_str == ':')
return -EINVAL;
memset(spec, 0, sizeof(*spec));
spec->btf = btf;
- spec->root_type_id = type_id;
- spec->relo_kind = relo_kind;
+ spec->root_type_id = relo->type_id;
+ spec->relo_kind = relo->kind;
/* type-based relocations don't have a field access string */
- if (core_relo_is_type_based(relo_kind)) {
+ if (core_relo_is_type_based(relo->kind)) {
if (strcmp(spec_str, "0"))
return -EINVAL;
return 0;
@@ -221,7 +220,7 @@ static int bpf_core_parse_spec(const char *prog_name, const struct btf *btf,
if (spec->raw_len == 0)
return -EINVAL;
- t = skip_mods_and_typedefs(btf, type_id, &id);
+ t = skip_mods_and_typedefs(btf, relo->type_id, &id);
if (!t)
return -EINVAL;
@@ -231,7 +230,7 @@ static int bpf_core_parse_spec(const char *prog_name, const struct btf *btf,
acc->idx = access_idx;
spec->len++;
- if (core_relo_is_enumval_based(relo_kind)) {
+ if (core_relo_is_enumval_based(relo->kind)) {
if (!btf_is_enum(t) || spec->raw_len > 1 || access_idx >= btf_vlen(t))
return -EINVAL;
@@ -240,7 +239,7 @@ static int bpf_core_parse_spec(const char *prog_name, const struct btf *btf,
return 0;
}
- if (!core_relo_is_field_based(relo_kind))
+ if (!core_relo_is_field_based(relo->kind))
return -EINVAL;
sz = btf__resolve_size(btf, id);
@@ -301,7 +300,7 @@ static int bpf_core_parse_spec(const char *prog_name, const struct btf *btf,
spec->bit_offset += access_idx * sz * 8;
} else {
pr_warn("prog '%s': relo for [%u] %s (at idx %d) captures type [%d] of unexpected kind %s\n",
- prog_name, type_id, spec_str, i, id, btf_kind_str(t));
+ prog_name, relo->type_id, spec_str, i, id, btf_kind_str(t));
return -EINVAL;
}
}
@@ -1055,51 +1054,66 @@ poison:
* [<type-id>] (<type-name>) + <raw-spec> => <offset>@<spec>,
* where <spec> is a C-syntax view of recorded field access, e.g.: x.a[3].b
*/
-static void bpf_core_dump_spec(const char *prog_name, int level, const struct bpf_core_spec *spec)
+int bpf_core_format_spec(char *buf, size_t buf_sz, const struct bpf_core_spec *spec)
{
const struct btf_type *t;
const struct btf_enum *e;
const char *s;
__u32 type_id;
- int i;
+ int i, len = 0;
+
+#define append_buf(fmt, args...) \
+ ({ \
+ int r; \
+ r = snprintf(buf, buf_sz, fmt, ##args); \
+ len += r; \
+ if (r >= buf_sz) \
+ r = buf_sz; \
+ buf += r; \
+ buf_sz -= r; \
+ })
type_id = spec->root_type_id;
t = btf_type_by_id(spec->btf, type_id);
s = btf__name_by_offset(spec->btf, t->name_off);
- libbpf_print(level, "[%u] %s %s", type_id, btf_kind_str(t), str_is_empty(s) ? "<anon>" : s);
+ append_buf("<%s> [%u] %s %s",
+ core_relo_kind_str(spec->relo_kind),
+ type_id, btf_kind_str(t), str_is_empty(s) ? "<anon>" : s);
if (core_relo_is_type_based(spec->relo_kind))
- return;
+ return len;
if (core_relo_is_enumval_based(spec->relo_kind)) {
t = skip_mods_and_typedefs(spec->btf, type_id, NULL);
e = btf_enum(t) + spec->raw_spec[0];
s = btf__name_by_offset(spec->btf, e->name_off);
- libbpf_print(level, "::%s = %u", s, e->val);
- return;
+ append_buf("::%s = %u", s, e->val);
+ return len;
}
if (core_relo_is_field_based(spec->relo_kind)) {
for (i = 0; i < spec->len; i++) {
if (spec->spec[i].name)
- libbpf_print(level, ".%s", spec->spec[i].name);
+ append_buf(".%s", spec->spec[i].name);
else if (i > 0 || spec->spec[i].idx > 0)
- libbpf_print(level, "[%u]", spec->spec[i].idx);
+ append_buf("[%u]", spec->spec[i].idx);
}
- libbpf_print(level, " (");
+ append_buf(" (");
for (i = 0; i < spec->raw_len; i++)
- libbpf_print(level, "%s%d", i == 0 ? "" : ":", spec->raw_spec[i]);
+ append_buf("%s%d", i == 0 ? "" : ":", spec->raw_spec[i]);
if (spec->bit_offset % 8)
- libbpf_print(level, " @ offset %u.%u)",
- spec->bit_offset / 8, spec->bit_offset % 8);
+ append_buf(" @ offset %u.%u)", spec->bit_offset / 8, spec->bit_offset % 8);
else
- libbpf_print(level, " @ offset %u)", spec->bit_offset / 8);
- return;
+ append_buf(" @ offset %u)", spec->bit_offset / 8);
+ return len;
}
+
+ return len;
+#undef append_buf
}
/*
@@ -1167,7 +1181,7 @@ int bpf_core_calc_relo_insn(const char *prog_name,
const struct btf_type *local_type;
const char *local_name;
__u32 local_id;
- const char *spec_str;
+ char spec_buf[256];
int i, j, err;
local_id = relo->type_id;
@@ -1176,24 +1190,20 @@ int bpf_core_calc_relo_insn(const char *prog_name,
if (!local_name)
return -EINVAL;
- spec_str = btf__name_by_offset(local_btf, relo->access_str_off);
- if (str_is_empty(spec_str))
- return -EINVAL;
-
- err = bpf_core_parse_spec(prog_name, local_btf, local_id, spec_str,
- relo->kind, local_spec);
+ err = bpf_core_parse_spec(prog_name, local_btf, relo, local_spec);
if (err) {
+ const char *spec_str;
+
+ spec_str = btf__name_by_offset(local_btf, relo->access_str_off);
pr_warn("prog '%s': relo #%d: parsing [%d] %s %s + %s failed: %d\n",
prog_name, relo_idx, local_id, btf_kind_str(local_type),
str_is_empty(local_name) ? "<anon>" : local_name,
- spec_str, err);
+ spec_str ?: "<?>", err);
return -EINVAL;
}
- pr_debug("prog '%s': relo #%d: kind <%s> (%d), spec is ", prog_name,
- relo_idx, core_relo_kind_str(relo->kind), relo->kind);
- bpf_core_dump_spec(prog_name, LIBBPF_DEBUG, local_spec);
- libbpf_print(LIBBPF_DEBUG, "\n");
+ bpf_core_format_spec(spec_buf, sizeof(spec_buf), local_spec);
+ pr_debug("prog '%s': relo #%d: %s\n", prog_name, relo_idx, spec_buf);
/* TYPE_ID_LOCAL relo is special and doesn't need candidate search */
if (relo->kind == BPF_CORE_TYPE_ID_LOCAL) {
@@ -1207,7 +1217,7 @@ int bpf_core_calc_relo_insn(const char *prog_name,
}
/* libbpf doesn't support candidate search for anonymous types */
- if (str_is_empty(spec_str)) {
+ if (str_is_empty(local_name)) {
pr_warn("prog '%s': relo #%d: <%s> (%d) relocation doesn't support anonymous types\n",
prog_name, relo_idx, core_relo_kind_str(relo->kind), relo->kind);
return -EOPNOTSUPP;
@@ -1217,17 +1227,15 @@ int bpf_core_calc_relo_insn(const char *prog_name,
err = bpf_core_spec_match(local_spec, cands->cands[i].btf,
cands->cands[i].id, cand_spec);
if (err < 0) {
- pr_warn("prog '%s': relo #%d: error matching candidate #%d ",
- prog_name, relo_idx, i);
- bpf_core_dump_spec(prog_name, LIBBPF_WARN, cand_spec);
- libbpf_print(LIBBPF_WARN, ": %d\n", err);
+ bpf_core_format_spec(spec_buf, sizeof(spec_buf), cand_spec);
+ pr_warn("prog '%s': relo #%d: error matching candidate #%d %s: %d\n ",
+ prog_name, relo_idx, i, spec_buf, err);
return err;
}
- pr_debug("prog '%s': relo #%d: %s candidate #%d ", prog_name,
- relo_idx, err == 0 ? "non-matching" : "matching", i);
- bpf_core_dump_spec(prog_name, LIBBPF_DEBUG, cand_spec);
- libbpf_print(LIBBPF_DEBUG, "\n");
+ bpf_core_format_spec(spec_buf, sizeof(spec_buf), cand_spec);
+ pr_debug("prog '%s': relo #%d: %s candidate #%d %s\n", prog_name,
+ relo_idx, err == 0 ? "non-matching" : "matching", i, spec_buf);
if (err == 0)
continue;
diff --git a/tools/lib/bpf/relo_core.h b/tools/lib/bpf/relo_core.h
index a28bf3711ce2..073039d8ca4f 100644
--- a/tools/lib/bpf/relo_core.h
+++ b/tools/lib/bpf/relo_core.h
@@ -84,4 +84,10 @@ int bpf_core_patch_insn(const char *prog_name, struct bpf_insn *insn,
int insn_idx, const struct bpf_core_relo *relo,
int relo_idx, const struct bpf_core_relo_res *res);
+int bpf_core_parse_spec(const char *prog_name, const struct btf *btf,
+ const struct bpf_core_relo *relo,
+ struct bpf_core_spec *spec);
+
+int bpf_core_format_spec(char *buf, size_t buf_sz, const struct bpf_core_spec *spec);
+
#endif
diff --git a/tools/lib/bpf/usdt.c b/tools/lib/bpf/usdt.c
index acf2d99a9e77..f1c9339cfbbc 100644
--- a/tools/lib/bpf/usdt.c
+++ b/tools/lib/bpf/usdt.c
@@ -10,6 +10,11 @@
#include <linux/ptrace.h>
#include <linux/kernel.h>
+/* s8 will be marked as poison while it's a reg of riscv */
+#if defined(__riscv)
+#define rv_s8 s8
+#endif
+
#include "bpf.h"
#include "libbpf.h"
#include "libbpf_common.h"
@@ -557,10 +562,10 @@ static int parse_usdt_note(Elf *elf, const char *path, long base_addr,
GElf_Nhdr *nhdr, const char *data, size_t name_off, size_t desc_off,
struct usdt_note *usdt_note);
-static int parse_usdt_spec(struct usdt_spec *spec, const struct usdt_note *note, long usdt_cookie);
+static int parse_usdt_spec(struct usdt_spec *spec, const struct usdt_note *note, __u64 usdt_cookie);
static int collect_usdt_targets(struct usdt_manager *man, Elf *elf, const char *path, pid_t pid,
- const char *usdt_provider, const char *usdt_name, long usdt_cookie,
+ const char *usdt_provider, const char *usdt_name, __u64 usdt_cookie,
struct usdt_target **out_targets, size_t *out_target_cnt)
{
size_t off, name_off, desc_off, seg_cnt = 0, lib_seg_cnt = 0, target_cnt = 0;
@@ -939,7 +944,7 @@ static int allocate_spec_id(struct usdt_manager *man, struct hashmap *specs_hash
struct bpf_link *usdt_manager_attach_usdt(struct usdt_manager *man, const struct bpf_program *prog,
pid_t pid, const char *path,
const char *usdt_provider, const char *usdt_name,
- long usdt_cookie)
+ __u64 usdt_cookie)
{
int i, fd, err, spec_map_fd, ip_map_fd;
LIBBPF_OPTS(bpf_uprobe_opts, opts);
@@ -1141,7 +1146,7 @@ static int parse_usdt_note(Elf *elf, const char *path, long base_addr,
static int parse_usdt_arg(const char *arg_str, int arg_num, struct usdt_arg_spec *arg);
-static int parse_usdt_spec(struct usdt_spec *spec, const struct usdt_note *note, long usdt_cookie)
+static int parse_usdt_spec(struct usdt_spec *spec, const struct usdt_note *note, __u64 usdt_cookie)
{
const char *s;
int len;
@@ -1324,6 +1329,184 @@ static int parse_usdt_arg(const char *arg_str, int arg_num, struct usdt_arg_spec
return len;
}
+#elif defined(__aarch64__)
+
+static int calc_pt_regs_off(const char *reg_name)
+{
+ int reg_num;
+
+ if (sscanf(reg_name, "x%d", &reg_num) == 1) {
+ if (reg_num >= 0 && reg_num < 31)
+ return offsetof(struct user_pt_regs, regs[reg_num]);
+ } else if (strcmp(reg_name, "sp") == 0) {
+ return offsetof(struct user_pt_regs, sp);
+ }
+ pr_warn("usdt: unrecognized register '%s'\n", reg_name);
+ return -ENOENT;
+}
+
+static int parse_usdt_arg(const char *arg_str, int arg_num, struct usdt_arg_spec *arg)
+{
+ char *reg_name = NULL;
+ int arg_sz, len, reg_off;
+ long off;
+
+ if (sscanf(arg_str, " %d @ \[ %m[a-z0-9], %ld ] %n", &arg_sz, &reg_name, &off, &len) == 3) {
+ /* Memory dereference case, e.g., -4@[sp, 96] */
+ arg->arg_type = USDT_ARG_REG_DEREF;
+ arg->val_off = off;
+ reg_off = calc_pt_regs_off(reg_name);
+ free(reg_name);
+ if (reg_off < 0)
+ return reg_off;
+ arg->reg_off = reg_off;
+ } else if (sscanf(arg_str, " %d @ \[ %m[a-z0-9] ] %n", &arg_sz, &reg_name, &len) == 2) {
+ /* Memory dereference case, e.g., -4@[sp] */
+ arg->arg_type = USDT_ARG_REG_DEREF;
+ arg->val_off = 0;
+ reg_off = calc_pt_regs_off(reg_name);
+ free(reg_name);
+ if (reg_off < 0)
+ return reg_off;
+ arg->reg_off = reg_off;
+ } else if (sscanf(arg_str, " %d @ %ld %n", &arg_sz, &off, &len) == 2) {
+ /* Constant value case, e.g., 4@5 */
+ arg->arg_type = USDT_ARG_CONST;
+ arg->val_off = off;
+ arg->reg_off = 0;
+ } else if (sscanf(arg_str, " %d @ %m[a-z0-9] %n", &arg_sz, &reg_name, &len) == 2) {
+ /* Register read case, e.g., -8@x4 */
+ arg->arg_type = USDT_ARG_REG;
+ arg->val_off = 0;
+ reg_off = calc_pt_regs_off(reg_name);
+ free(reg_name);
+ if (reg_off < 0)
+ return reg_off;
+ arg->reg_off = reg_off;
+ } else {
+ pr_warn("usdt: unrecognized arg #%d spec '%s'\n", arg_num, arg_str);
+ return -EINVAL;
+ }
+
+ arg->arg_signed = arg_sz < 0;
+ if (arg_sz < 0)
+ arg_sz = -arg_sz;
+
+ switch (arg_sz) {
+ case 1: case 2: case 4: case 8:
+ arg->arg_bitshift = 64 - arg_sz * 8;
+ break;
+ default:
+ pr_warn("usdt: unsupported arg #%d (spec '%s') size: %d\n",
+ arg_num, arg_str, arg_sz);
+ return -EINVAL;
+ }
+
+ return len;
+}
+
+#elif defined(__riscv)
+
+static int calc_pt_regs_off(const char *reg_name)
+{
+ static struct {
+ const char *name;
+ size_t pt_regs_off;
+ } reg_map[] = {
+ { "ra", offsetof(struct user_regs_struct, ra) },
+ { "sp", offsetof(struct user_regs_struct, sp) },
+ { "gp", offsetof(struct user_regs_struct, gp) },
+ { "tp", offsetof(struct user_regs_struct, tp) },
+ { "a0", offsetof(struct user_regs_struct, a0) },
+ { "a1", offsetof(struct user_regs_struct, a1) },
+ { "a2", offsetof(struct user_regs_struct, a2) },
+ { "a3", offsetof(struct user_regs_struct, a3) },
+ { "a4", offsetof(struct user_regs_struct, a4) },
+ { "a5", offsetof(struct user_regs_struct, a5) },
+ { "a6", offsetof(struct user_regs_struct, a6) },
+ { "a7", offsetof(struct user_regs_struct, a7) },
+ { "s0", offsetof(struct user_regs_struct, s0) },
+ { "s1", offsetof(struct user_regs_struct, s1) },
+ { "s2", offsetof(struct user_regs_struct, s2) },
+ { "s3", offsetof(struct user_regs_struct, s3) },
+ { "s4", offsetof(struct user_regs_struct, s4) },
+ { "s5", offsetof(struct user_regs_struct, s5) },
+ { "s6", offsetof(struct user_regs_struct, s6) },
+ { "s7", offsetof(struct user_regs_struct, s7) },
+ { "s8", offsetof(struct user_regs_struct, rv_s8) },
+ { "s9", offsetof(struct user_regs_struct, s9) },
+ { "s10", offsetof(struct user_regs_struct, s10) },
+ { "s11", offsetof(struct user_regs_struct, s11) },
+ { "t0", offsetof(struct user_regs_struct, t0) },
+ { "t1", offsetof(struct user_regs_struct, t1) },
+ { "t2", offsetof(struct user_regs_struct, t2) },
+ { "t3", offsetof(struct user_regs_struct, t3) },
+ { "t4", offsetof(struct user_regs_struct, t4) },
+ { "t5", offsetof(struct user_regs_struct, t5) },
+ { "t6", offsetof(struct user_regs_struct, t6) },
+ };
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(reg_map); i++) {
+ if (strcmp(reg_name, reg_map[i].name) == 0)
+ return reg_map[i].pt_regs_off;
+ }
+
+ pr_warn("usdt: unrecognized register '%s'\n", reg_name);
+ return -ENOENT;
+}
+
+static int parse_usdt_arg(const char *arg_str, int arg_num, struct usdt_arg_spec *arg)
+{
+ char *reg_name = NULL;
+ int arg_sz, len, reg_off;
+ long off;
+
+ if (sscanf(arg_str, " %d @ %ld ( %m[a-z0-9] ) %n", &arg_sz, &off, &reg_name, &len) == 3) {
+ /* Memory dereference case, e.g., -8@-88(s0) */
+ arg->arg_type = USDT_ARG_REG_DEREF;
+ arg->val_off = off;
+ reg_off = calc_pt_regs_off(reg_name);
+ free(reg_name);
+ if (reg_off < 0)
+ return reg_off;
+ arg->reg_off = reg_off;
+ } else if (sscanf(arg_str, " %d @ %ld %n", &arg_sz, &off, &len) == 2) {
+ /* Constant value case, e.g., 4@5 */
+ arg->arg_type = USDT_ARG_CONST;
+ arg->val_off = off;
+ arg->reg_off = 0;
+ } else if (sscanf(arg_str, " %d @ %m[a-z0-9] %n", &arg_sz, &reg_name, &len) == 2) {
+ /* Register read case, e.g., -8@a1 */
+ arg->arg_type = USDT_ARG_REG;
+ arg->val_off = 0;
+ reg_off = calc_pt_regs_off(reg_name);
+ free(reg_name);
+ if (reg_off < 0)
+ return reg_off;
+ arg->reg_off = reg_off;
+ } else {
+ pr_warn("usdt: unrecognized arg #%d spec '%s'\n", arg_num, arg_str);
+ return -EINVAL;
+ }
+
+ arg->arg_signed = arg_sz < 0;
+ if (arg_sz < 0)
+ arg_sz = -arg_sz;
+
+ switch (arg_sz) {
+ case 1: case 2: case 4: case 8:
+ arg->arg_bitshift = 64 - arg_sz * 8;
+ break;
+ default:
+ pr_warn("usdt: unsupported arg #%d (spec '%s') size: %d\n",
+ arg_num, arg_str, arg_sz);
+ return -EINVAL;
+ }
+
+ return len;
+}
+
#else
static int parse_usdt_arg(const char *arg_str, int arg_num, struct usdt_arg_spec *arg)
diff --git a/tools/testing/selftests/bpf/bench.c b/tools/testing/selftests/bpf/bench.c
index f973320e6dbf..f061cc20e776 100644
--- a/tools/testing/selftests/bpf/bench.c
+++ b/tools/testing/selftests/bpf/bench.c
@@ -8,7 +8,6 @@
#include <fcntl.h>
#include <pthread.h>
#include <sys/sysinfo.h>
-#include <sys/resource.h>
#include <signal.h>
#include "bench.h"
#include "testing_helpers.h"
diff --git a/tools/testing/selftests/bpf/bpf_rlimit.h b/tools/testing/selftests/bpf/bpf_rlimit.h
deleted file mode 100644
index 9dac9b30f8ef..000000000000
--- a/tools/testing/selftests/bpf/bpf_rlimit.h
+++ /dev/null
@@ -1,28 +0,0 @@
-#include <sys/resource.h>
-#include <stdio.h>
-
-static __attribute__((constructor)) void bpf_rlimit_ctor(void)
-{
- struct rlimit rlim_old, rlim_new = {
- .rlim_cur = RLIM_INFINITY,
- .rlim_max = RLIM_INFINITY,
- };
-
- getrlimit(RLIMIT_MEMLOCK, &rlim_old);
- /* For the sake of running the test cases, we temporarily
- * set rlimit to infinity in order for kernel to focus on
- * errors from actual test cases and not getting noise
- * from hitting memlock limits. The limit is on per-process
- * basis and not a global one, hence destructor not really
- * needed here.
- */
- if (setrlimit(RLIMIT_MEMLOCK, &rlim_new) < 0) {
- perror("Unable to lift memlock rlimit");
- /* Trying out lower limit, but expect potential test
- * case failures from this!
- */
- rlim_new.rlim_cur = rlim_old.rlim_cur + (1UL << 20);
- rlim_new.rlim_max = rlim_old.rlim_max + (1UL << 20);
- setrlimit(RLIMIT_MEMLOCK, &rlim_new);
- }
-}
diff --git a/tools/testing/selftests/bpf/flow_dissector_load.c b/tools/testing/selftests/bpf/flow_dissector_load.c
index 87fd1aa323a9..c8be6406777f 100644
--- a/tools/testing/selftests/bpf/flow_dissector_load.c
+++ b/tools/testing/selftests/bpf/flow_dissector_load.c
@@ -11,7 +11,6 @@
#include <bpf/bpf.h>
#include <bpf/libbpf.h>
-#include "bpf_rlimit.h"
#include "flow_dissector_load.h"
const char *cfg_pin_path = "/sys/fs/bpf/flow_dissector";
@@ -25,9 +24,8 @@ static void load_and_attach_program(void)
int prog_fd, ret;
struct bpf_object *obj;
- ret = libbpf_set_strict_mode(LIBBPF_STRICT_ALL);
- if (ret)
- error(1, 0, "failed to enable libbpf strict mode: %d", ret);
+ /* Use libbpf 1.0 API mode */
+ libbpf_set_strict_mode(LIBBPF_STRICT_ALL);
ret = bpf_flow_load(&obj, cfg_path_name, cfg_prog_name,
cfg_map_name, NULL, &prog_fd, NULL);
diff --git a/tools/testing/selftests/bpf/get_cgroup_id_user.c b/tools/testing/selftests/bpf/get_cgroup_id_user.c
index 3a7b82bd9e94..e021cc67dc02 100644
--- a/tools/testing/selftests/bpf/get_cgroup_id_user.c
+++ b/tools/testing/selftests/bpf/get_cgroup_id_user.c
@@ -20,7 +20,6 @@
#include "cgroup_helpers.h"
#include "testing_helpers.h"
-#include "bpf_rlimit.h"
#define CHECK(condition, tag, format...) ({ \
int __ret = !!(condition); \
@@ -67,6 +66,9 @@ int main(int argc, char **argv)
if (CHECK(cgroup_fd < 0, "cgroup_setup_and_join", "err %d errno %d\n", cgroup_fd, errno))
return 1;
+ /* Use libbpf 1.0 API mode */
+ libbpf_set_strict_mode(LIBBPF_STRICT_ALL);
+
err = bpf_prog_test_load(file, BPF_PROG_TYPE_TRACEPOINT, &obj, &prog_fd);
if (CHECK(err, "bpf_prog_test_load", "err %d errno %d\n", err, errno))
goto cleanup_cgroup_env;
diff --git a/tools/testing/selftests/bpf/prog_tests/arg_parsing.c b/tools/testing/selftests/bpf/prog_tests/arg_parsing.c
new file mode 100644
index 000000000000..b17bfa0e0aac
--- /dev/null
+++ b/tools/testing/selftests/bpf/prog_tests/arg_parsing.c
@@ -0,0 +1,107 @@
+// SPDX-License-Identifier: (LGPL-2.1 OR BSD-2-Clause)
+
+#include "test_progs.h"
+#include "testing_helpers.h"
+
+static void init_test_filter_set(struct test_filter_set *set)
+{
+ set->cnt = 0;
+ set->tests = NULL;
+}
+
+static void free_test_filter_set(struct test_filter_set *set)
+{
+ int i, j;
+
+ for (i = 0; i < set->cnt; i++) {
+ for (j = 0; j < set->tests[i].subtest_cnt; j++)
+ free((void *)set->tests[i].subtests[j]);
+ free(set->tests[i].subtests);
+ free(set->tests[i].name);
+ }
+
+ free(set->tests);
+ init_test_filter_set(set);
+}
+
+static void test_parse_test_list(void)
+{
+ struct test_filter_set set;
+
+ init_test_filter_set(&set);
+
+ ASSERT_OK(parse_test_list("arg_parsing", &set, true), "parsing");
+ if (!ASSERT_EQ(set.cnt, 1, "test filters count"))
+ goto error;
+ if (!ASSERT_OK_PTR(set.tests, "test filters initialized"))
+ goto error;
+ ASSERT_EQ(set.tests[0].subtest_cnt, 0, "subtest filters count");
+ ASSERT_OK(strcmp("arg_parsing", set.tests[0].name), "subtest name");
+ free_test_filter_set(&set);
+
+ ASSERT_OK(parse_test_list("arg_parsing,bpf_cookie", &set, true),
+ "parsing");
+ if (!ASSERT_EQ(set.cnt, 2, "count of test filters"))
+ goto error;
+ if (!ASSERT_OK_PTR(set.tests, "test filters initialized"))
+ goto error;
+ ASSERT_EQ(set.tests[0].subtest_cnt, 0, "subtest filters count");
+ ASSERT_EQ(set.tests[1].subtest_cnt, 0, "subtest filters count");
+ ASSERT_OK(strcmp("arg_parsing", set.tests[0].name), "test name");
+ ASSERT_OK(strcmp("bpf_cookie", set.tests[1].name), "test name");
+ free_test_filter_set(&set);
+
+ ASSERT_OK(parse_test_list("arg_parsing/arg_parsing,bpf_cookie",
+ &set,
+ true),
+ "parsing");
+ if (!ASSERT_EQ(set.cnt, 2, "count of test filters"))
+ goto error;
+ if (!ASSERT_OK_PTR(set.tests, "test filters initialized"))
+ goto error;
+ if (!ASSERT_EQ(set.tests[0].subtest_cnt, 1, "subtest filters count"))
+ goto error;
+ ASSERT_EQ(set.tests[1].subtest_cnt, 0, "subtest filters count");
+ ASSERT_OK(strcmp("arg_parsing", set.tests[0].name), "test name");
+ ASSERT_OK(strcmp("arg_parsing", set.tests[0].subtests[0]),
+ "subtest name");
+ ASSERT_OK(strcmp("bpf_cookie", set.tests[1].name), "test name");
+ free_test_filter_set(&set);
+
+ ASSERT_OK(parse_test_list("arg_parsing/arg_parsing", &set, true),
+ "parsing");
+ ASSERT_OK(parse_test_list("bpf_cookie", &set, true), "parsing");
+ ASSERT_OK(parse_test_list("send_signal", &set, true), "parsing");
+ if (!ASSERT_EQ(set.cnt, 3, "count of test filters"))
+ goto error;
+ if (!ASSERT_OK_PTR(set.tests, "test filters initialized"))
+ goto error;
+ if (!ASSERT_EQ(set.tests[0].subtest_cnt, 1, "subtest filters count"))
+ goto error;
+ ASSERT_EQ(set.tests[1].subtest_cnt, 0, "subtest filters count");
+ ASSERT_EQ(set.tests[2].subtest_cnt, 0, "subtest filters count");
+ ASSERT_OK(strcmp("arg_parsing", set.tests[0].name), "test name");
+ ASSERT_OK(strcmp("arg_parsing", set.tests[0].subtests[0]),
+ "subtest name");
+ ASSERT_OK(strcmp("bpf_cookie", set.tests[1].name), "test name");
+ ASSERT_OK(strcmp("send_signal", set.tests[2].name), "test name");
+ free_test_filter_set(&set);
+
+ ASSERT_OK(parse_test_list("bpf_cookie/trace", &set, false), "parsing");
+ if (!ASSERT_EQ(set.cnt, 1, "count of test filters"))
+ goto error;
+ if (!ASSERT_OK_PTR(set.tests, "test filters initialized"))
+ goto error;
+ if (!ASSERT_EQ(set.tests[0].subtest_cnt, 1, "subtest filters count"))
+ goto error;
+ ASSERT_OK(strcmp("*bpf_cookie*", set.tests[0].name), "test name");
+ ASSERT_OK(strcmp("*trace*", set.tests[0].subtests[0]), "subtest name");
+error:
+ free_test_filter_set(&set);
+}
+
+void test_arg_parsing(void)
+{
+ if (test__start_subtest("test_parse_test_list"))
+ test_parse_test_list();
+}
diff --git a/tools/testing/selftests/bpf/prog_tests/bpf_iter.c b/tools/testing/selftests/bpf/prog_tests/bpf_iter.c
index 5142a7d130b2..2c403ddc8076 100644
--- a/tools/testing/selftests/bpf/prog_tests/bpf_iter.c
+++ b/tools/testing/selftests/bpf/prog_tests/bpf_iter.c
@@ -1192,8 +1192,6 @@ static void str_strip_first_line(char *str)
*dst = '\0';
}
-#define min(a, b) ((a) < (b) ? (a) : (b))
-
static void test_task_vma(void)
{
int err, iter_fd = -1, proc_maps_fd = -1;
@@ -1229,7 +1227,7 @@ static void test_task_vma(void)
len = 0;
while (len < CMP_BUFFER_SIZE) {
err = read_fd_into_buffer(iter_fd, task_vma_output + len,
- min(read_size, CMP_BUFFER_SIZE - len));
+ MIN(read_size, CMP_BUFFER_SIZE - len));
if (!err)
break;
if (CHECK(err < 0, "read_iter_fd", "read_iter_fd failed\n"))
diff --git a/tools/testing/selftests/bpf/prog_tests/bpf_mod_race.c b/tools/testing/selftests/bpf/prog_tests/bpf_mod_race.c
index d43f548c572c..a4d0cc9d3367 100644
--- a/tools/testing/selftests/bpf/prog_tests/bpf_mod_race.c
+++ b/tools/testing/selftests/bpf/prog_tests/bpf_mod_race.c
@@ -36,13 +36,13 @@ struct test_config {
void (*bpf_destroy)(void *);
};
-enum test_state {
+enum bpf_test_state {
_TS_INVALID,
TS_MODULE_LOAD,
TS_MODULE_LOAD_FAIL,
};
-static _Atomic enum test_state state = _TS_INVALID;
+static _Atomic enum bpf_test_state state = _TS_INVALID;
static int sys_finit_module(int fd, const char *param_values, int flags)
{
diff --git a/tools/testing/selftests/bpf/prog_tests/bpf_tcp_ca.c b/tools/testing/selftests/bpf/prog_tests/bpf_tcp_ca.c
index 8f7a1cef7d87..e9a9a31b2ffe 100644
--- a/tools/testing/selftests/bpf/prog_tests/bpf_tcp_ca.c
+++ b/tools/testing/selftests/bpf/prog_tests/bpf_tcp_ca.c
@@ -10,8 +10,6 @@
#include "bpf_tcp_nogpl.skel.h"
#include "bpf_dctcp_release.skel.h"
-#define min(a, b) ((a) < (b) ? (a) : (b))
-
#ifndef ENOTSUPP
#define ENOTSUPP 524
#endif
@@ -53,7 +51,7 @@ static void *server(void *arg)
while (bytes < total_bytes && !READ_ONCE(stop)) {
nr_sent = send(fd, &batch,
- min(total_bytes - bytes, sizeof(batch)), 0);
+ MIN(total_bytes - bytes, sizeof(batch)), 0);
if (nr_sent == -1 && errno == EINTR)
continue;
if (nr_sent == -1) {
@@ -146,7 +144,7 @@ static void do_test(const char *tcp_ca, const struct bpf_map *sk_stg_map)
/* recv total_bytes */
while (bytes < total_bytes && !READ_ONCE(stop)) {
nr_recv = recv(fd, &batch,
- min(total_bytes - bytes, sizeof(batch)), 0);
+ MIN(total_bytes - bytes, sizeof(batch)), 0);
if (nr_recv == -1 && errno == EINTR)
continue;
if (nr_recv == -1)
diff --git a/tools/testing/selftests/bpf/prog_tests/btf.c b/tools/testing/selftests/bpf/prog_tests/btf.c
index ec823561b912..ba5bde53d418 100644
--- a/tools/testing/selftests/bpf/prog_tests/btf.c
+++ b/tools/testing/selftests/bpf/prog_tests/btf.c
@@ -8,7 +8,6 @@
#include <linux/filter.h>
#include <linux/unistd.h>
#include <bpf/bpf.h>
-#include <sys/resource.h>
#include <libelf.h>
#include <gelf.h>
#include <string.h>
@@ -3974,6 +3973,105 @@ static struct btf_raw_test raw_tests[] = {
.value_type_id = 1,
.max_entries = 1,
},
+{
+ .descr = "type_tag test #2, type tag order",
+ .raw_types = {
+ BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4), /* [1] */
+ BTF_CONST_ENC(3), /* [2] */
+ BTF_TYPE_TAG_ENC(NAME_TBD, 1), /* [3] */
+ BTF_END_RAW,
+ },
+ BTF_STR_SEC("\0tag"),
+ .map_type = BPF_MAP_TYPE_ARRAY,
+ .map_name = "tag_type_check_btf",
+ .key_size = sizeof(int),
+ .value_size = 4,
+ .key_type_id = 1,
+ .value_type_id = 1,
+ .max_entries = 1,
+ .btf_load_err = true,
+ .err_str = "Type tags don't precede modifiers",
+},
+{
+ .descr = "type_tag test #3, type tag order",
+ .raw_types = {
+ BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4), /* [1] */
+ BTF_TYPE_TAG_ENC(NAME_TBD, 3), /* [2] */
+ BTF_CONST_ENC(4), /* [3] */
+ BTF_TYPE_TAG_ENC(NAME_TBD, 1), /* [4] */
+ BTF_END_RAW,
+ },
+ BTF_STR_SEC("\0tag\0tag"),
+ .map_type = BPF_MAP_TYPE_ARRAY,
+ .map_name = "tag_type_check_btf",
+ .key_size = sizeof(int),
+ .value_size = 4,
+ .key_type_id = 1,
+ .value_type_id = 1,
+ .max_entries = 1,
+ .btf_load_err = true,
+ .err_str = "Type tags don't precede modifiers",
+},
+{
+ .descr = "type_tag test #4, type tag order",
+ .raw_types = {
+ BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4), /* [1] */
+ BTF_TYPEDEF_ENC(NAME_TBD, 3), /* [2] */
+ BTF_CONST_ENC(4), /* [3] */
+ BTF_TYPE_TAG_ENC(NAME_TBD, 1), /* [4] */
+ BTF_END_RAW,
+ },
+ BTF_STR_SEC("\0tag\0tag"),
+ .map_type = BPF_MAP_TYPE_ARRAY,
+ .map_name = "tag_type_check_btf",
+ .key_size = sizeof(int),
+ .value_size = 4,
+ .key_type_id = 1,
+ .value_type_id = 1,
+ .max_entries = 1,
+ .btf_load_err = true,
+ .err_str = "Type tags don't precede modifiers",
+},
+{
+ .descr = "type_tag test #5, type tag order",
+ .raw_types = {
+ BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4), /* [1] */
+ BTF_TYPE_TAG_ENC(NAME_TBD, 3), /* [2] */
+ BTF_CONST_ENC(1), /* [3] */
+ BTF_TYPE_TAG_ENC(NAME_TBD, 2), /* [4] */
+ BTF_END_RAW,
+ },
+ BTF_STR_SEC("\0tag\0tag"),
+ .map_type = BPF_MAP_TYPE_ARRAY,
+ .map_name = "tag_type_check_btf",
+ .key_size = sizeof(int),
+ .value_size = 4,
+ .key_type_id = 1,
+ .value_type_id = 1,
+ .max_entries = 1,
+},
+{
+ .descr = "type_tag test #6, type tag order",
+ .raw_types = {
+ BTF_PTR_ENC(2), /* [1] */
+ BTF_TYPE_TAG_ENC(NAME_TBD, 3), /* [2] */
+ BTF_CONST_ENC(4), /* [3] */
+ BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4), /* [4] */
+ BTF_PTR_ENC(6), /* [5] */
+ BTF_CONST_ENC(2), /* [6] */
+ BTF_END_RAW,
+ },
+ BTF_STR_SEC("\0tag"),
+ .map_type = BPF_MAP_TYPE_ARRAY,
+ .map_name = "tag_type_check_btf",
+ .key_size = sizeof(int),
+ .value_size = 4,
+ .key_type_id = 1,
+ .value_type_id = 1,
+ .max_entries = 1,
+ .btf_load_err = true,
+ .err_str = "Type tags don't precede modifiers",
+},
}; /* struct btf_raw_test raw_tests[] */
diff --git a/tools/testing/selftests/bpf/prog_tests/fexit_stress.c b/tools/testing/selftests/bpf/prog_tests/fexit_stress.c
index 3ee2107bbf7a..fe1f0f26ea14 100644
--- a/tools/testing/selftests/bpf/prog_tests/fexit_stress.c
+++ b/tools/testing/selftests/bpf/prog_tests/fexit_stress.c
@@ -53,7 +53,7 @@ void test_fexit_stress(void)
&trace_opts);
if (!ASSERT_GE(fexit_fd[i], 0, "fexit load"))
goto out;
- link_fd[i] = bpf_raw_tracepoint_open(NULL, fexit_fd[i]);
+ link_fd[i] = bpf_link_create(fexit_fd[i], 0, BPF_TRACE_FEXIT, NULL);
if (!ASSERT_GE(link_fd[i], 0, "fexit attach"))
goto out;
}
diff --git a/tools/testing/selftests/bpf/prog_tests/helper_restricted.c b/tools/testing/selftests/bpf/prog_tests/helper_restricted.c
index e1de5f80c3b2..0354f9b82c65 100644
--- a/tools/testing/selftests/bpf/prog_tests/helper_restricted.c
+++ b/tools/testing/selftests/bpf/prog_tests/helper_restricted.c
@@ -6,11 +6,10 @@
void test_helper_restricted(void)
{
int prog_i = 0, prog_cnt;
- int duration = 0;
do {
struct test_helper_restricted *test;
- int maybeOK;
+ int err;
test = test_helper_restricted__open();
if (!ASSERT_OK_PTR(test, "open"))
@@ -21,12 +20,11 @@ void test_helper_restricted(void)
for (int j = 0; j < prog_cnt; ++j) {
struct bpf_program *prog = *test->skeleton->progs[j].prog;
- maybeOK = bpf_program__set_autoload(prog, prog_i == j);
- ASSERT_OK(maybeOK, "set autoload");
+ bpf_program__set_autoload(prog, true);
}
- maybeOK = test_helper_restricted__load(test);
- CHECK(!maybeOK, test->skeleton->progs[prog_i].name, "helper isn't restricted");
+ err = test_helper_restricted__load(test);
+ ASSERT_ERR(err, "load_should_fail");
test_helper_restricted__destroy(test);
} while (++prog_i < prog_cnt);
diff --git a/tools/testing/selftests/bpf/prog_tests/linked_funcs.c b/tools/testing/selftests/bpf/prog_tests/linked_funcs.c
index e9916f2817ec..cad664546912 100644
--- a/tools/testing/selftests/bpf/prog_tests/linked_funcs.c
+++ b/tools/testing/selftests/bpf/prog_tests/linked_funcs.c
@@ -14,6 +14,12 @@ void test_linked_funcs(void)
if (!ASSERT_OK_PTR(skel, "skel_open"))
return;
+ /* handler1 and handler2 are marked as SEC("?raw_tp/sys_enter") and
+ * are set to not autoload by default
+ */
+ bpf_program__set_autoload(skel->progs.handler1, true);
+ bpf_program__set_autoload(skel->progs.handler2, true);
+
skel->rodata->my_tid = syscall(SYS_gettid);
skel->bss->syscall_id = SYS_getpgid;
diff --git a/tools/testing/selftests/bpf/prog_tests/log_fixup.c b/tools/testing/selftests/bpf/prog_tests/log_fixup.c
new file mode 100644
index 000000000000..be3a956cb3a5
--- /dev/null
+++ b/tools/testing/selftests/bpf/prog_tests/log_fixup.c
@@ -0,0 +1,114 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2022 Meta Platforms, Inc. and affiliates. */
+#include <test_progs.h>
+#include <bpf/btf.h>
+
+#include "test_log_fixup.skel.h"
+
+enum trunc_type {
+ TRUNC_NONE,
+ TRUNC_PARTIAL,
+ TRUNC_FULL,
+};
+
+static void bad_core_relo(size_t log_buf_size, enum trunc_type trunc_type)
+{
+ char log_buf[8 * 1024];
+ struct test_log_fixup* skel;
+ int err;
+
+ skel = test_log_fixup__open();
+ if (!ASSERT_OK_PTR(skel, "skel_open"))
+ return;
+
+ bpf_program__set_autoload(skel->progs.bad_relo, true);
+ memset(log_buf, 0, sizeof(log_buf));
+ bpf_program__set_log_buf(skel->progs.bad_relo, log_buf, log_buf_size ?: sizeof(log_buf));
+
+ err = test_log_fixup__load(skel);
+ if (!ASSERT_ERR(err, "load_fail"))
+ goto cleanup;
+
+ ASSERT_HAS_SUBSTR(log_buf,
+ "0: <invalid CO-RE relocation>\n"
+ "failed to resolve CO-RE relocation <byte_sz> ",
+ "log_buf_part1");
+
+ switch (trunc_type) {
+ case TRUNC_NONE:
+ ASSERT_HAS_SUBSTR(log_buf,
+ "struct task_struct___bad.fake_field (0:1 @ offset 4)\n",
+ "log_buf_part2");
+ ASSERT_HAS_SUBSTR(log_buf,
+ "max_states_per_insn 0 total_states 0 peak_states 0 mark_read 0\n",
+ "log_buf_end");
+ break;
+ case TRUNC_PARTIAL:
+ /* we should get full libbpf message patch */
+ ASSERT_HAS_SUBSTR(log_buf,
+ "struct task_struct___bad.fake_field (0:1 @ offset 4)\n",
+ "log_buf_part2");
+ /* we shouldn't get full end of BPF verifier log */
+ ASSERT_NULL(strstr(log_buf, "max_states_per_insn 0 total_states 0 peak_states 0 mark_read 0\n"),
+ "log_buf_end");
+ break;
+ case TRUNC_FULL:
+ /* we shouldn't get second part of libbpf message patch */
+ ASSERT_NULL(strstr(log_buf, "struct task_struct___bad.fake_field (0:1 @ offset 4)\n"),
+ "log_buf_part2");
+ /* we shouldn't get full end of BPF verifier log */
+ ASSERT_NULL(strstr(log_buf, "max_states_per_insn 0 total_states 0 peak_states 0 mark_read 0\n"),
+ "log_buf_end");
+ break;
+ }
+
+ if (env.verbosity > VERBOSE_NONE)
+ printf("LOG: \n=================\n%s=================\n", log_buf);
+cleanup:
+ test_log_fixup__destroy(skel);
+}
+
+static void bad_core_relo_subprog(void)
+{
+ char log_buf[8 * 1024];
+ struct test_log_fixup* skel;
+ int err;
+
+ skel = test_log_fixup__open();
+ if (!ASSERT_OK_PTR(skel, "skel_open"))
+ return;
+
+ bpf_program__set_autoload(skel->progs.bad_relo_subprog, true);
+ bpf_program__set_log_buf(skel->progs.bad_relo_subprog, log_buf, sizeof(log_buf));
+
+ err = test_log_fixup__load(skel);
+ if (!ASSERT_ERR(err, "load_fail"))
+ goto cleanup;
+
+ /* there should be no prog loading log because we specified per-prog log buf */
+ ASSERT_HAS_SUBSTR(log_buf,
+ ": <invalid CO-RE relocation>\n"
+ "failed to resolve CO-RE relocation <byte_off> ",
+ "log_buf");
+ ASSERT_HAS_SUBSTR(log_buf,
+ "struct task_struct___bad.fake_field_subprog (0:2 @ offset 8)\n",
+ "log_buf");
+
+ if (env.verbosity > VERBOSE_NONE)
+ printf("LOG: \n=================\n%s=================\n", log_buf);
+
+cleanup:
+ test_log_fixup__destroy(skel);
+}
+
+void test_log_fixup(void)
+{
+ if (test__start_subtest("bad_core_relo_trunc_none"))
+ bad_core_relo(0, TRUNC_NONE /* full buf */);
+ if (test__start_subtest("bad_core_relo_trunc_partial"))
+ bad_core_relo(300, TRUNC_PARTIAL /* truncate original log a bit */);
+ if (test__start_subtest("bad_core_relo_trunc_full"))
+ bad_core_relo(250, TRUNC_FULL /* truncate also libbpf's message patch */);
+ if (test__start_subtest("bad_core_relo_subprog"))
+ bad_core_relo_subprog();
+}
diff --git a/tools/testing/selftests/bpf/prog_tests/map_kptr.c b/tools/testing/selftests/bpf/prog_tests/map_kptr.c
new file mode 100644
index 000000000000..9e2fbda64a65
--- /dev/null
+++ b/tools/testing/selftests/bpf/prog_tests/map_kptr.c
@@ -0,0 +1,37 @@
+// SPDX-License-Identifier: GPL-2.0
+#include <test_progs.h>
+
+#include "map_kptr.skel.h"
+
+void test_map_kptr(void)
+{
+ struct map_kptr *skel;
+ int key = 0, ret;
+ char buf[24];
+
+ skel = map_kptr__open_and_load();
+ if (!ASSERT_OK_PTR(skel, "map_kptr__open_and_load"))
+ return;
+
+ ret = bpf_map_update_elem(bpf_map__fd(skel->maps.array_map), &key, buf, 0);
+ ASSERT_OK(ret, "array_map update");
+ ret = bpf_map_update_elem(bpf_map__fd(skel->maps.array_map), &key, buf, 0);
+ ASSERT_OK(ret, "array_map update2");
+
+ ret = bpf_map_update_elem(bpf_map__fd(skel->maps.hash_map), &key, buf, 0);
+ ASSERT_OK(ret, "hash_map update");
+ ret = bpf_map_delete_elem(bpf_map__fd(skel->maps.hash_map), &key);
+ ASSERT_OK(ret, "hash_map delete");
+
+ ret = bpf_map_update_elem(bpf_map__fd(skel->maps.hash_malloc_map), &key, buf, 0);
+ ASSERT_OK(ret, "hash_malloc_map update");
+ ret = bpf_map_delete_elem(bpf_map__fd(skel->maps.hash_malloc_map), &key);
+ ASSERT_OK(ret, "hash_malloc_map delete");
+
+ ret = bpf_map_update_elem(bpf_map__fd(skel->maps.lru_hash_map), &key, buf, 0);
+ ASSERT_OK(ret, "lru_hash_map update");
+ ret = bpf_map_delete_elem(bpf_map__fd(skel->maps.lru_hash_map), &key);
+ ASSERT_OK(ret, "lru_hash_map delete");
+
+ map_kptr__destroy(skel);
+}
diff --git a/tools/testing/selftests/bpf/prog_tests/prog_tests_framework.c b/tools/testing/selftests/bpf/prog_tests/prog_tests_framework.c
new file mode 100644
index 000000000000..14f2796076e0
--- /dev/null
+++ b/tools/testing/selftests/bpf/prog_tests/prog_tests_framework.c
@@ -0,0 +1,56 @@
+// SPDX-License-Identifier: (LGPL-2.1 OR BSD-2-Clause)
+
+#include "test_progs.h"
+#include "testing_helpers.h"
+
+static void clear_test_state(struct test_state *state)
+{
+ state->error_cnt = 0;
+ state->sub_succ_cnt = 0;
+ state->skip_cnt = 0;
+}
+
+void test_prog_tests_framework(void)
+{
+ struct test_state *state = env.test_state;
+
+ /* in all the ASSERT calls below we need to return on the first
+ * error due to the fact that we are cleaning the test state after
+ * each dummy subtest
+ */
+
+ /* test we properly count skipped tests with subtests */
+ if (test__start_subtest("test_good_subtest"))
+ test__end_subtest();
+ if (!ASSERT_EQ(state->skip_cnt, 0, "skip_cnt_check"))
+ return;
+ if (!ASSERT_EQ(state->error_cnt, 0, "error_cnt_check"))
+ return;
+ if (!ASSERT_EQ(state->subtest_num, 1, "subtest_num_check"))
+ return;
+ clear_test_state(state);
+
+ if (test__start_subtest("test_skip_subtest")) {
+ test__skip();
+ test__end_subtest();
+ }
+ if (test__start_subtest("test_skip_subtest")) {
+ test__skip();
+ test__end_subtest();
+ }
+ if (!ASSERT_EQ(state->skip_cnt, 2, "skip_cnt_check"))
+ return;
+ if (!ASSERT_EQ(state->subtest_num, 3, "subtest_num_check"))
+ return;
+ clear_test_state(state);
+
+ if (test__start_subtest("test_fail_subtest")) {
+ test__fail();
+ test__end_subtest();
+ }
+ if (!ASSERT_EQ(state->error_cnt, 1, "error_cnt_check"))
+ return;
+ if (!ASSERT_EQ(state->subtest_num, 4, "subtest_num_check"))
+ return;
+ clear_test_state(state);
+}
diff --git a/tools/testing/selftests/bpf/prog_tests/reference_tracking.c b/tools/testing/selftests/bpf/prog_tests/reference_tracking.c
index 873323fb18ba..739d2ea6ca55 100644
--- a/tools/testing/selftests/bpf/prog_tests/reference_tracking.c
+++ b/tools/testing/selftests/bpf/prog_tests/reference_tracking.c
@@ -1,21 +1,6 @@
// SPDX-License-Identifier: GPL-2.0
#include <test_progs.h>
-static void toggle_object_autoload_progs(const struct bpf_object *obj,
- const char *name_load)
-{
- struct bpf_program *prog;
-
- bpf_object__for_each_program(prog, obj) {
- const char *name = bpf_program__name(prog);
-
- if (!strcmp(name_load, name))
- bpf_program__set_autoload(prog, true);
- else
- bpf_program__set_autoload(prog, false);
- }
-}
-
void test_reference_tracking(void)
{
const char *file = "test_sk_lookup_kern.o";
@@ -39,6 +24,7 @@ void test_reference_tracking(void)
goto cleanup;
bpf_object__for_each_program(prog, obj_iter) {
+ struct bpf_program *p;
const char *name;
name = bpf_program__name(prog);
@@ -49,7 +35,12 @@ void test_reference_tracking(void)
if (!ASSERT_OK_PTR(obj, "obj_open_file"))
goto cleanup;
- toggle_object_autoload_progs(obj, name);
+ /* all programs are not loaded by default, so just set
+ * autoload to true for the single prog under test
+ */
+ p = bpf_object__find_program_by_name(obj, name);
+ bpf_program__set_autoload(p, true);
+
/* Expect verifier failure if test name has 'err' */
if (strncmp(name, "err_", sizeof("err_") - 1) == 0) {
libbpf_print_fn_t old_print_fn;
diff --git a/tools/testing/selftests/bpf/prog_tests/skb_load_bytes.c b/tools/testing/selftests/bpf/prog_tests/skb_load_bytes.c
new file mode 100644
index 000000000000..d7f83c0a40a5
--- /dev/null
+++ b/tools/testing/selftests/bpf/prog_tests/skb_load_bytes.c
@@ -0,0 +1,45 @@
+// SPDX-License-Identifier: GPL-2.0
+#include <test_progs.h>
+#include <network_helpers.h>
+#include "skb_load_bytes.skel.h"
+
+void test_skb_load_bytes(void)
+{
+ struct skb_load_bytes *skel;
+ int err, prog_fd, test_result;
+ struct __sk_buff skb = { 0 };
+
+ LIBBPF_OPTS(bpf_test_run_opts, tattr,
+ .data_in = &pkt_v4,
+ .data_size_in = sizeof(pkt_v4),
+ .ctx_in = &skb,
+ .ctx_size_in = sizeof(skb),
+ );
+
+ skel = skb_load_bytes__open_and_load();
+ if (!ASSERT_OK_PTR(skel, "skel_open_and_load"))
+ return;
+
+ prog_fd = bpf_program__fd(skel->progs.skb_process);
+ if (!ASSERT_GE(prog_fd, 0, "prog_fd"))
+ goto out;
+
+ skel->bss->load_offset = (uint32_t)(-1);
+ err = bpf_prog_test_run_opts(prog_fd, &tattr);
+ if (!ASSERT_OK(err, "bpf_prog_test_run_opts"))
+ goto out;
+ test_result = skel->bss->test_result;
+ if (!ASSERT_EQ(test_result, -EFAULT, "offset -1"))
+ goto out;
+
+ skel->bss->load_offset = (uint32_t)10;
+ err = bpf_prog_test_run_opts(prog_fd, &tattr);
+ if (!ASSERT_OK(err, "bpf_prog_test_run_opts"))
+ goto out;
+ test_result = skel->bss->test_result;
+ if (!ASSERT_EQ(test_result, 0, "offset 10"))
+ goto out;
+
+out:
+ skb_load_bytes__destroy(skel);
+}
diff --git a/tools/testing/selftests/bpf/prog_tests/snprintf.c b/tools/testing/selftests/bpf/prog_tests/snprintf.c
index 394ebfc3bbf3..4be6fdb78c6a 100644
--- a/tools/testing/selftests/bpf/prog_tests/snprintf.c
+++ b/tools/testing/selftests/bpf/prog_tests/snprintf.c
@@ -83,8 +83,6 @@ cleanup:
test_snprintf__destroy(skel);
}
-#define min(a, b) ((a) < (b) ? (a) : (b))
-
/* Loads an eBPF object calling bpf_snprintf with up to 10 characters of fmt */
static int load_single_snprintf(char *fmt)
{
@@ -95,7 +93,7 @@ static int load_single_snprintf(char *fmt)
if (!skel)
return -EINVAL;
- memcpy(skel->rodata->fmt, fmt, min(strlen(fmt) + 1, 10));
+ memcpy(skel->rodata->fmt, fmt, MIN(strlen(fmt) + 1, 10));
ret = test_snprintf_single__load(skel);
test_snprintf_single__destroy(skel);
diff --git a/tools/testing/selftests/bpf/prog_tests/tc_redirect.c b/tools/testing/selftests/bpf/prog_tests/tc_redirect.c
index 7ad66a247c02..958dae769c52 100644
--- a/tools/testing/selftests/bpf/prog_tests/tc_redirect.c
+++ b/tools/testing/selftests/bpf/prog_tests/tc_redirect.c
@@ -949,7 +949,6 @@ fail:
return -1;
}
-#define MAX(a, b) ((a) > (b) ? (a) : (b))
enum {
SRC_TO_TARGET = 0,
TARGET_TO_SRC = 1,
diff --git a/tools/testing/selftests/bpf/prog_tests/test_strncmp.c b/tools/testing/selftests/bpf/prog_tests/test_strncmp.c
index b57a3009465f..7ddd6615b7e7 100644
--- a/tools/testing/selftests/bpf/prog_tests/test_strncmp.c
+++ b/tools/testing/selftests/bpf/prog_tests/test_strncmp.c
@@ -44,16 +44,12 @@ static void strncmp_full_str_cmp(struct strncmp_test *skel, const char *name,
static void test_strncmp_ret(void)
{
struct strncmp_test *skel;
- struct bpf_program *prog;
int err, got;
skel = strncmp_test__open();
if (!ASSERT_OK_PTR(skel, "strncmp_test open"))
return;
- bpf_object__for_each_program(prog, skel->obj)
- bpf_program__set_autoload(prog, false);
-
bpf_program__set_autoload(skel->progs.do_strncmp, true);
err = strncmp_test__load(skel);
@@ -91,18 +87,13 @@ out:
static void test_strncmp_bad_not_const_str_size(void)
{
struct strncmp_test *skel;
- struct bpf_program *prog;
int err;
skel = strncmp_test__open();
if (!ASSERT_OK_PTR(skel, "strncmp_test open"))
return;
- bpf_object__for_each_program(prog, skel->obj)
- bpf_program__set_autoload(prog, false);
-
- bpf_program__set_autoload(skel->progs.strncmp_bad_not_const_str_size,
- true);
+ bpf_program__set_autoload(skel->progs.strncmp_bad_not_const_str_size, true);
err = strncmp_test__load(skel);
ASSERT_ERR(err, "strncmp_test load bad_not_const_str_size");
@@ -113,18 +104,13 @@ static void test_strncmp_bad_not_const_str_size(void)
static void test_strncmp_bad_writable_target(void)
{
struct strncmp_test *skel;
- struct bpf_program *prog;
int err;
skel = strncmp_test__open();
if (!ASSERT_OK_PTR(skel, "strncmp_test open"))
return;
- bpf_object__for_each_program(prog, skel->obj)
- bpf_program__set_autoload(prog, false);
-
- bpf_program__set_autoload(skel->progs.strncmp_bad_writable_target,
- true);
+ bpf_program__set_autoload(skel->progs.strncmp_bad_writable_target, true);
err = strncmp_test__load(skel);
ASSERT_ERR(err, "strncmp_test load bad_writable_target");
@@ -135,18 +121,13 @@ static void test_strncmp_bad_writable_target(void)
static void test_strncmp_bad_not_null_term_target(void)
{
struct strncmp_test *skel;
- struct bpf_program *prog;
int err;
skel = strncmp_test__open();
if (!ASSERT_OK_PTR(skel, "strncmp_test open"))
return;
- bpf_object__for_each_program(prog, skel->obj)
- bpf_program__set_autoload(prog, false);
-
- bpf_program__set_autoload(skel->progs.strncmp_bad_not_null_term_target,
- true);
+ bpf_program__set_autoload(skel->progs.strncmp_bad_not_null_term_target, true);
err = strncmp_test__load(skel);
ASSERT_ERR(err, "strncmp_test load bad_not_null_term_target");
diff --git a/tools/testing/selftests/bpf/prog_tests/uprobe_autoattach.c b/tools/testing/selftests/bpf/prog_tests/uprobe_autoattach.c
index d6003dc8cc99..35b87c7ba5be 100644
--- a/tools/testing/selftests/bpf/prog_tests/uprobe_autoattach.c
+++ b/tools/testing/selftests/bpf/prog_tests/uprobe_autoattach.c
@@ -34,7 +34,6 @@ void test_uprobe_autoattach(void)
/* trigger & validate shared library u[ret]probes attached by name */
mem = malloc(malloc_sz);
- free(mem);
ASSERT_EQ(skel->bss->uprobe_byname_parm1, trigger_val, "check_uprobe_byname_parm1");
ASSERT_EQ(skel->bss->uprobe_byname_ran, 1, "check_uprobe_byname_ran");
@@ -44,6 +43,8 @@ void test_uprobe_autoattach(void)
ASSERT_EQ(skel->bss->uprobe_byname2_ran, 3, "check_uprobe_byname2_ran");
ASSERT_EQ(skel->bss->uretprobe_byname2_rc, mem, "check_uretprobe_byname2_rc");
ASSERT_EQ(skel->bss->uretprobe_byname2_ran, 4, "check_uretprobe_byname2_ran");
+
+ free(mem);
cleanup:
test_uprobe_autoattach__destroy(skel);
}
diff --git a/tools/testing/selftests/bpf/progs/exhandler_kern.c b/tools/testing/selftests/bpf/progs/exhandler_kern.c
index f5ca142abf8f..dd9b30a0f0fc 100644
--- a/tools/testing/selftests/bpf/progs/exhandler_kern.c
+++ b/tools/testing/selftests/bpf/progs/exhandler_kern.c
@@ -7,6 +7,8 @@
#include <bpf/bpf_tracing.h>
#include <bpf/bpf_core_read.h>
+#define barrier_var(var) asm volatile("" : "=r"(var) : "0"(var))
+
char _license[] SEC("license") = "GPL";
unsigned int exception_triggered;
@@ -37,7 +39,16 @@ int BPF_PROG(trace_task_newtask, struct task_struct *task, u64 clone_flags)
*/
work = task->task_works;
func = work->func;
- if (!work && !func)
- exception_triggered++;
+ /* Currently verifier will fail for `btf_ptr |= btf_ptr` * instruction.
+ * To workaround the issue, use barrier_var() and rewrite as below to
+ * prevent compiler from generating verifier-unfriendly code.
+ */
+ barrier_var(work);
+ if (work)
+ return 0;
+ barrier_var(func);
+ if (func)
+ return 0;
+ exception_triggered++;
return 0;
}
diff --git a/tools/testing/selftests/bpf/progs/linked_funcs1.c b/tools/testing/selftests/bpf/progs/linked_funcs1.c
index 963b393c37e8..b05571bc67d5 100644
--- a/tools/testing/selftests/bpf/progs/linked_funcs1.c
+++ b/tools/testing/selftests/bpf/progs/linked_funcs1.c
@@ -61,12 +61,17 @@ extern int set_output_val2(int x);
/* here we'll force set_output_ctx2() to be __hidden in the final obj file */
__hidden extern void set_output_ctx2(__u64 *ctx);
-SEC("raw_tp/sys_enter")
+SEC("?raw_tp/sys_enter")
int BPF_PROG(handler1, struct pt_regs *regs, long id)
{
+ static volatile int whatever;
+
if (my_tid != (u32)bpf_get_current_pid_tgid() || id != syscall_id)
return 0;
+ /* make sure we have CO-RE relocations in main program */
+ whatever = bpf_core_type_size(struct task_struct);
+
set_output_val2(1000);
set_output_ctx2(ctx); /* ctx definition is hidden in BPF_PROG macro */
diff --git a/tools/testing/selftests/bpf/progs/linked_funcs2.c b/tools/testing/selftests/bpf/progs/linked_funcs2.c
index db195872f4eb..ee7e3848ee4f 100644
--- a/tools/testing/selftests/bpf/progs/linked_funcs2.c
+++ b/tools/testing/selftests/bpf/progs/linked_funcs2.c
@@ -61,12 +61,17 @@ extern int set_output_val1(int x);
/* here we'll force set_output_ctx1() to be __hidden in the final obj file */
__hidden extern void set_output_ctx1(__u64 *ctx);
-SEC("raw_tp/sys_enter")
+SEC("?raw_tp/sys_enter")
int BPF_PROG(handler2, struct pt_regs *regs, long id)
{
+ static volatile int whatever;
+
if (my_tid != (u32)bpf_get_current_pid_tgid() || id != syscall_id)
return 0;
+ /* make sure we have CO-RE relocations in main program */
+ whatever = bpf_core_type_size(struct task_struct);
+
set_output_val1(2000);
set_output_ctx1(ctx); /* ctx definition is hidden in BPF_PROG macro */
diff --git a/tools/testing/selftests/bpf/progs/map_kptr.c b/tools/testing/selftests/bpf/progs/map_kptr.c
new file mode 100644
index 000000000000..1b0e0409eaa5
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/map_kptr.c
@@ -0,0 +1,190 @@
+// SPDX-License-Identifier: GPL-2.0
+#include <vmlinux.h>
+#include <bpf/bpf_tracing.h>
+#include <bpf/bpf_helpers.h>
+
+struct map_value {
+ struct prog_test_ref_kfunc __kptr *unref_ptr;
+ struct prog_test_ref_kfunc __kptr_ref *ref_ptr;
+};
+
+struct array_map {
+ __uint(type, BPF_MAP_TYPE_ARRAY);
+ __type(key, int);
+ __type(value, struct map_value);
+ __uint(max_entries, 1);
+} array_map SEC(".maps");
+
+struct hash_map {
+ __uint(type, BPF_MAP_TYPE_HASH);
+ __type(key, int);
+ __type(value, struct map_value);
+ __uint(max_entries, 1);
+} hash_map SEC(".maps");
+
+struct hash_malloc_map {
+ __uint(type, BPF_MAP_TYPE_HASH);
+ __type(key, int);
+ __type(value, struct map_value);
+ __uint(max_entries, 1);
+ __uint(map_flags, BPF_F_NO_PREALLOC);
+} hash_malloc_map SEC(".maps");
+
+struct lru_hash_map {
+ __uint(type, BPF_MAP_TYPE_LRU_HASH);
+ __type(key, int);
+ __type(value, struct map_value);
+ __uint(max_entries, 1);
+} lru_hash_map SEC(".maps");
+
+#define DEFINE_MAP_OF_MAP(map_type, inner_map_type, name) \
+ struct { \
+ __uint(type, map_type); \
+ __uint(max_entries, 1); \
+ __uint(key_size, sizeof(int)); \
+ __uint(value_size, sizeof(int)); \
+ __array(values, struct inner_map_type); \
+ } name SEC(".maps") = { \
+ .values = { [0] = &inner_map_type }, \
+ }
+
+DEFINE_MAP_OF_MAP(BPF_MAP_TYPE_ARRAY_OF_MAPS, array_map, array_of_array_maps);
+DEFINE_MAP_OF_MAP(BPF_MAP_TYPE_ARRAY_OF_MAPS, hash_map, array_of_hash_maps);
+DEFINE_MAP_OF_MAP(BPF_MAP_TYPE_ARRAY_OF_MAPS, hash_malloc_map, array_of_hash_malloc_maps);
+DEFINE_MAP_OF_MAP(BPF_MAP_TYPE_ARRAY_OF_MAPS, lru_hash_map, array_of_lru_hash_maps);
+DEFINE_MAP_OF_MAP(BPF_MAP_TYPE_HASH_OF_MAPS, array_map, hash_of_array_maps);
+DEFINE_MAP_OF_MAP(BPF_MAP_TYPE_HASH_OF_MAPS, hash_map, hash_of_hash_maps);
+DEFINE_MAP_OF_MAP(BPF_MAP_TYPE_HASH_OF_MAPS, hash_malloc_map, hash_of_hash_malloc_maps);
+DEFINE_MAP_OF_MAP(BPF_MAP_TYPE_HASH_OF_MAPS, lru_hash_map, hash_of_lru_hash_maps);
+
+extern struct prog_test_ref_kfunc *bpf_kfunc_call_test_acquire(unsigned long *sp) __ksym;
+extern struct prog_test_ref_kfunc *
+bpf_kfunc_call_test_kptr_get(struct prog_test_ref_kfunc **p, int a, int b) __ksym;
+extern void bpf_kfunc_call_test_release(struct prog_test_ref_kfunc *p) __ksym;
+
+static void test_kptr_unref(struct map_value *v)
+{
+ struct prog_test_ref_kfunc *p;
+
+ p = v->unref_ptr;
+ /* store untrusted_ptr_or_null_ */
+ v->unref_ptr = p;
+ if (!p)
+ return;
+ if (p->a + p->b > 100)
+ return;
+ /* store untrusted_ptr_ */
+ v->unref_ptr = p;
+ /* store NULL */
+ v->unref_ptr = NULL;
+}
+
+static void test_kptr_ref(struct map_value *v)
+{
+ struct prog_test_ref_kfunc *p;
+
+ p = v->ref_ptr;
+ /* store ptr_or_null_ */
+ v->unref_ptr = p;
+ if (!p)
+ return;
+ if (p->a + p->b > 100)
+ return;
+ /* store NULL */
+ p = bpf_kptr_xchg(&v->ref_ptr, NULL);
+ if (!p)
+ return;
+ if (p->a + p->b > 100) {
+ bpf_kfunc_call_test_release(p);
+ return;
+ }
+ /* store ptr_ */
+ v->unref_ptr = p;
+ bpf_kfunc_call_test_release(p);
+
+ p = bpf_kfunc_call_test_acquire(&(unsigned long){0});
+ if (!p)
+ return;
+ /* store ptr_ */
+ p = bpf_kptr_xchg(&v->ref_ptr, p);
+ if (!p)
+ return;
+ if (p->a + p->b > 100) {
+ bpf_kfunc_call_test_release(p);
+ return;
+ }
+ bpf_kfunc_call_test_release(p);
+}
+
+static void test_kptr_get(struct map_value *v)
+{
+ struct prog_test_ref_kfunc *p;
+
+ p = bpf_kfunc_call_test_kptr_get(&v->ref_ptr, 0, 0);
+ if (!p)
+ return;
+ if (p->a + p->b > 100) {
+ bpf_kfunc_call_test_release(p);
+ return;
+ }
+ bpf_kfunc_call_test_release(p);
+}
+
+static void test_kptr(struct map_value *v)
+{
+ test_kptr_unref(v);
+ test_kptr_ref(v);
+ test_kptr_get(v);
+}
+
+SEC("tc")
+int test_map_kptr(struct __sk_buff *ctx)
+{
+ struct map_value *v;
+ int i, key = 0;
+
+#define TEST(map) \
+ v = bpf_map_lookup_elem(&map, &key); \
+ if (!v) \
+ return 0; \
+ test_kptr(v)
+
+ TEST(array_map);
+ TEST(hash_map);
+ TEST(hash_malloc_map);
+ TEST(lru_hash_map);
+
+#undef TEST
+ return 0;
+}
+
+SEC("tc")
+int test_map_in_map_kptr(struct __sk_buff *ctx)
+{
+ struct map_value *v;
+ int i, key = 0;
+ void *map;
+
+#define TEST(map_in_map) \
+ map = bpf_map_lookup_elem(&map_in_map, &key); \
+ if (!map) \
+ return 0; \
+ v = bpf_map_lookup_elem(map, &key); \
+ if (!v) \
+ return 0; \
+ test_kptr(v)
+
+ TEST(array_of_array_maps);
+ TEST(array_of_hash_maps);
+ TEST(array_of_hash_malloc_maps);
+ TEST(array_of_lru_hash_maps);
+ TEST(hash_of_array_maps);
+ TEST(hash_of_hash_maps);
+ TEST(hash_of_hash_malloc_maps);
+ TEST(hash_of_lru_hash_maps);
+
+#undef TEST
+ return 0;
+}
+
+char _license[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/progs/pyperf.h b/tools/testing/selftests/bpf/progs/pyperf.h
index 1ed28882daf3..5d3dc4d66d47 100644
--- a/tools/testing/selftests/bpf/progs/pyperf.h
+++ b/tools/testing/selftests/bpf/progs/pyperf.h
@@ -299,7 +299,11 @@ int __on_event(struct bpf_raw_tracepoint_args *ctx)
#ifdef NO_UNROLL
#pragma clang loop unroll(disable)
#else
+#ifdef UNROLL_COUNT
+#pragma clang loop unroll_count(UNROLL_COUNT)
+#else
#pragma clang loop unroll(full)
+#endif
#endif /* NO_UNROLL */
/* Unwind python stack */
for (int i = 0; i < STACK_MAX_LEN; ++i) {
diff --git a/tools/testing/selftests/bpf/progs/pyperf600.c b/tools/testing/selftests/bpf/progs/pyperf600.c
index cb49b89e37cd..ce1aa5189cc4 100644
--- a/tools/testing/selftests/bpf/progs/pyperf600.c
+++ b/tools/testing/selftests/bpf/progs/pyperf600.c
@@ -1,9 +1,12 @@
// SPDX-License-Identifier: GPL-2.0
// Copyright (c) 2019 Facebook
#define STACK_MAX_LEN 600
-/* clang will not unroll the loop 600 times.
- * Instead it will unroll it to the amount it deemed
- * appropriate, but the loop will still execute 600 times.
- * Total program size is around 90k insns
+/* Full unroll of 600 iterations will have total
+ * program size close to 298k insns and this may
+ * cause BPF_JMP insn out of 16-bit integer range.
+ * So limit the unroll size to 150 so the
+ * total program size is around 80k insns but
+ * the loop will still execute 600 times.
*/
+#define UNROLL_COUNT 150
#include "pyperf.h"
diff --git a/tools/testing/selftests/bpf/progs/skb_load_bytes.c b/tools/testing/selftests/bpf/progs/skb_load_bytes.c
new file mode 100644
index 000000000000..e4252fd973be
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/skb_load_bytes.c
@@ -0,0 +1,19 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#include <linux/bpf.h>
+#include <bpf/bpf_helpers.h>
+
+char _license[] SEC("license") = "GPL";
+
+__u32 load_offset = 0;
+int test_result = 0;
+
+SEC("tc")
+int skb_process(struct __sk_buff *skb)
+{
+ char buf[16];
+
+ test_result = bpf_skb_load_bytes(skb, load_offset, buf, 10);
+
+ return 0;
+}
diff --git a/tools/testing/selftests/bpf/progs/strncmp_test.c b/tools/testing/selftests/bpf/progs/strncmp_test.c
index 900d930d48a8..769668feed48 100644
--- a/tools/testing/selftests/bpf/progs/strncmp_test.c
+++ b/tools/testing/selftests/bpf/progs/strncmp_test.c
@@ -19,7 +19,7 @@ unsigned int no_const_str_size = STRNCMP_STR_SZ;
char _license[] SEC("license") = "GPL";
-SEC("tp/syscalls/sys_enter_nanosleep")
+SEC("?tp/syscalls/sys_enter_nanosleep")
int do_strncmp(void *ctx)
{
if ((bpf_get_current_pid_tgid() >> 32) != target_pid)
@@ -29,7 +29,7 @@ int do_strncmp(void *ctx)
return 0;
}
-SEC("tp/syscalls/sys_enter_nanosleep")
+SEC("?tp/syscalls/sys_enter_nanosleep")
int strncmp_bad_not_const_str_size(void *ctx)
{
/* The value of string size is not const, so will fail */
@@ -37,7 +37,7 @@ int strncmp_bad_not_const_str_size(void *ctx)
return 0;
}
-SEC("tp/syscalls/sys_enter_nanosleep")
+SEC("?tp/syscalls/sys_enter_nanosleep")
int strncmp_bad_writable_target(void *ctx)
{
/* Compared target is not read-only, so will fail */
@@ -45,7 +45,7 @@ int strncmp_bad_writable_target(void *ctx)
return 0;
}
-SEC("tp/syscalls/sys_enter_nanosleep")
+SEC("?tp/syscalls/sys_enter_nanosleep")
int strncmp_bad_not_null_term_target(void *ctx)
{
/* Compared target is not null-terminated, so will fail */
diff --git a/tools/testing/selftests/bpf/progs/test_helper_restricted.c b/tools/testing/selftests/bpf/progs/test_helper_restricted.c
index 68d64c365f90..20ef9d433b97 100644
--- a/tools/testing/selftests/bpf/progs/test_helper_restricted.c
+++ b/tools/testing/selftests/bpf/progs/test_helper_restricted.c
@@ -56,7 +56,7 @@ static void spin_lock_work(void)
}
}
-SEC("raw_tp/sys_enter")
+SEC("?raw_tp/sys_enter")
int raw_tp_timer(void *ctx)
{
timer_work();
@@ -64,7 +64,7 @@ int raw_tp_timer(void *ctx)
return 0;
}
-SEC("tp/syscalls/sys_enter_nanosleep")
+SEC("?tp/syscalls/sys_enter_nanosleep")
int tp_timer(void *ctx)
{
timer_work();
@@ -72,7 +72,7 @@ int tp_timer(void *ctx)
return 0;
}
-SEC("kprobe/sys_nanosleep")
+SEC("?kprobe/sys_nanosleep")
int kprobe_timer(void *ctx)
{
timer_work();
@@ -80,7 +80,7 @@ int kprobe_timer(void *ctx)
return 0;
}
-SEC("perf_event")
+SEC("?perf_event")
int perf_event_timer(void *ctx)
{
timer_work();
@@ -88,7 +88,7 @@ int perf_event_timer(void *ctx)
return 0;
}
-SEC("raw_tp/sys_enter")
+SEC("?raw_tp/sys_enter")
int raw_tp_spin_lock(void *ctx)
{
spin_lock_work();
@@ -96,7 +96,7 @@ int raw_tp_spin_lock(void *ctx)
return 0;
}
-SEC("tp/syscalls/sys_enter_nanosleep")
+SEC("?tp/syscalls/sys_enter_nanosleep")
int tp_spin_lock(void *ctx)
{
spin_lock_work();
@@ -104,7 +104,7 @@ int tp_spin_lock(void *ctx)
return 0;
}
-SEC("kprobe/sys_nanosleep")
+SEC("?kprobe/sys_nanosleep")
int kprobe_spin_lock(void *ctx)
{
spin_lock_work();
@@ -112,7 +112,7 @@ int kprobe_spin_lock(void *ctx)
return 0;
}
-SEC("perf_event")
+SEC("?perf_event")
int perf_event_spin_lock(void *ctx)
{
spin_lock_work();
diff --git a/tools/testing/selftests/bpf/progs/test_log_fixup.c b/tools/testing/selftests/bpf/progs/test_log_fixup.c
new file mode 100644
index 000000000000..a78980d897b3
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/test_log_fixup.c
@@ -0,0 +1,38 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2022 Meta Platforms, Inc. and affiliates. */
+
+#include <linux/bpf.h>
+#include <bpf/bpf_helpers.h>
+#include <bpf/bpf_core_read.h>
+
+struct task_struct___bad {
+ int pid;
+ int fake_field;
+ void *fake_field_subprog;
+} __attribute__((preserve_access_index));
+
+SEC("?raw_tp/sys_enter")
+int bad_relo(const void *ctx)
+{
+ static struct task_struct___bad *t;
+
+ return bpf_core_field_size(t->fake_field);
+}
+
+static __noinline int bad_subprog(void)
+{
+ static struct task_struct___bad *t;
+
+ /* ugliness below is a field offset relocation */
+ return (void *)&t->fake_field_subprog - (void *)t;
+}
+
+SEC("?raw_tp/sys_enter")
+int bad_relo_subprog(const void *ctx)
+{
+ static struct task_struct___bad *t;
+
+ return bad_subprog() + bpf_core_field_size(t->pid);
+}
+
+char _license[] SEC("license") = "GPL";
diff --git a/tools/testing/selftests/bpf/progs/test_sk_lookup_kern.c b/tools/testing/selftests/bpf/progs/test_sk_lookup_kern.c
index 40f161480a2f..b502e5c92e33 100644
--- a/tools/testing/selftests/bpf/progs/test_sk_lookup_kern.c
+++ b/tools/testing/selftests/bpf/progs/test_sk_lookup_kern.c
@@ -52,7 +52,7 @@ static struct bpf_sock_tuple *get_tuple(void *data, __u64 nh_off,
return result;
}
-SEC("tc")
+SEC("?tc")
int sk_lookup_success(struct __sk_buff *skb)
{
void *data_end = (void *)(long)skb->data_end;
@@ -78,7 +78,7 @@ int sk_lookup_success(struct __sk_buff *skb)
return sk ? TC_ACT_OK : TC_ACT_UNSPEC;
}
-SEC("tc")
+SEC("?tc")
int sk_lookup_success_simple(struct __sk_buff *skb)
{
struct bpf_sock_tuple tuple = {};
@@ -90,7 +90,7 @@ int sk_lookup_success_simple(struct __sk_buff *skb)
return 0;
}
-SEC("tc")
+SEC("?tc")
int err_use_after_free(struct __sk_buff *skb)
{
struct bpf_sock_tuple tuple = {};
@@ -105,7 +105,7 @@ int err_use_after_free(struct __sk_buff *skb)
return family;
}
-SEC("tc")
+SEC("?tc")
int err_modify_sk_pointer(struct __sk_buff *skb)
{
struct bpf_sock_tuple tuple = {};
@@ -120,7 +120,7 @@ int err_modify_sk_pointer(struct __sk_buff *skb)
return 0;
}
-SEC("tc")
+SEC("?tc")
int err_modify_sk_or_null_pointer(struct __sk_buff *skb)
{
struct bpf_sock_tuple tuple = {};
@@ -134,7 +134,7 @@ int err_modify_sk_or_null_pointer(struct __sk_buff *skb)
return 0;
}
-SEC("tc")
+SEC("?tc")
int err_no_release(struct __sk_buff *skb)
{
struct bpf_sock_tuple tuple = {};
@@ -143,7 +143,7 @@ int err_no_release(struct __sk_buff *skb)
return 0;
}
-SEC("tc")
+SEC("?tc")
int err_release_twice(struct __sk_buff *skb)
{
struct bpf_sock_tuple tuple = {};
@@ -155,7 +155,7 @@ int err_release_twice(struct __sk_buff *skb)
return 0;
}
-SEC("tc")
+SEC("?tc")
int err_release_unchecked(struct __sk_buff *skb)
{
struct bpf_sock_tuple tuple = {};
@@ -172,7 +172,7 @@ void lookup_no_release(struct __sk_buff *skb)
bpf_sk_lookup_tcp(skb, &tuple, sizeof(tuple), BPF_F_CURRENT_NETNS, 0);
}
-SEC("tc")
+SEC("?tc")
int err_no_release_subcall(struct __sk_buff *skb)
{
lookup_no_release(skb);
diff --git a/tools/testing/selftests/bpf/test_cgroup_storage.c b/tools/testing/selftests/bpf/test_cgroup_storage.c
index 2ffa08198d1c..0861ea60dcdd 100644
--- a/tools/testing/selftests/bpf/test_cgroup_storage.c
+++ b/tools/testing/selftests/bpf/test_cgroup_storage.c
@@ -6,7 +6,6 @@
#include <stdlib.h>
#include <sys/sysinfo.h>
-#include "bpf_rlimit.h"
#include "bpf_util.h"
#include "cgroup_helpers.h"
#include "testing_helpers.h"
@@ -52,6 +51,9 @@ int main(int argc, char **argv)
goto err;
}
+ /* Use libbpf 1.0 API mode */
+ libbpf_set_strict_mode(LIBBPF_STRICT_ALL);
+
map_fd = bpf_map_create(BPF_MAP_TYPE_CGROUP_STORAGE, NULL, sizeof(key),
sizeof(value), 0, NULL);
if (map_fd < 0) {
diff --git a/tools/testing/selftests/bpf/test_dev_cgroup.c b/tools/testing/selftests/bpf/test_dev_cgroup.c
index c299d3452695..7886265846a0 100644
--- a/tools/testing/selftests/bpf/test_dev_cgroup.c
+++ b/tools/testing/selftests/bpf/test_dev_cgroup.c
@@ -15,7 +15,6 @@
#include "cgroup_helpers.h"
#include "testing_helpers.h"
-#include "bpf_rlimit.h"
#define DEV_CGROUP_PROG "./dev_cgroup.o"
@@ -28,6 +27,9 @@ int main(int argc, char **argv)
int prog_fd, cgroup_fd;
__u32 prog_cnt;
+ /* Use libbpf 1.0 API mode */
+ libbpf_set_strict_mode(LIBBPF_STRICT_ALL);
+
if (bpf_prog_test_load(DEV_CGROUP_PROG, BPF_PROG_TYPE_CGROUP_DEVICE,
&obj, &prog_fd)) {
printf("Failed to load DEV_CGROUP program\n");
diff --git a/tools/testing/selftests/bpf/test_lpm_map.c b/tools/testing/selftests/bpf/test_lpm_map.c
index aa294612e0a7..c028d621c744 100644
--- a/tools/testing/selftests/bpf/test_lpm_map.c
+++ b/tools/testing/selftests/bpf/test_lpm_map.c
@@ -26,7 +26,6 @@
#include <bpf/bpf.h>
#include "bpf_util.h"
-#include "bpf_rlimit.h"
struct tlpm_node {
struct tlpm_node *next;
@@ -409,16 +408,13 @@ static void test_lpm_ipaddr(void)
/* Test some lookups that should not match any entry */
inet_pton(AF_INET, "10.0.0.1", key_ipv4->data);
- assert(bpf_map_lookup_elem(map_fd_ipv4, key_ipv4, &value) == -1 &&
- errno == ENOENT);
+ assert(bpf_map_lookup_elem(map_fd_ipv4, key_ipv4, &value) == -ENOENT);
inet_pton(AF_INET, "11.11.11.11", key_ipv4->data);
- assert(bpf_map_lookup_elem(map_fd_ipv4, key_ipv4, &value) == -1 &&
- errno == ENOENT);
+ assert(bpf_map_lookup_elem(map_fd_ipv4, key_ipv4, &value) == -ENOENT);
inet_pton(AF_INET6, "2a00:ffff::", key_ipv6->data);
- assert(bpf_map_lookup_elem(map_fd_ipv6, key_ipv6, &value) == -1 &&
- errno == ENOENT);
+ assert(bpf_map_lookup_elem(map_fd_ipv6, key_ipv6, &value) == -ENOENT);
close(map_fd_ipv4);
close(map_fd_ipv6);
@@ -475,18 +471,15 @@ static void test_lpm_delete(void)
/* remove non-existent node */
key->prefixlen = 32;
inet_pton(AF_INET, "10.0.0.1", key->data);
- assert(bpf_map_lookup_elem(map_fd, key, &value) == -1 &&
- errno == ENOENT);
+ assert(bpf_map_lookup_elem(map_fd, key, &value) == -ENOENT);
key->prefixlen = 30; // unused prefix so far
inet_pton(AF_INET, "192.255.0.0", key->data);
- assert(bpf_map_delete_elem(map_fd, key) == -1 &&
- errno == ENOENT);
+ assert(bpf_map_delete_elem(map_fd, key) == -ENOENT);
key->prefixlen = 16; // same prefix as the root node
inet_pton(AF_INET, "192.255.0.0", key->data);
- assert(bpf_map_delete_elem(map_fd, key) == -1 &&
- errno == ENOENT);
+ assert(bpf_map_delete_elem(map_fd, key) == -ENOENT);
/* assert initial lookup */
key->prefixlen = 32;
@@ -531,8 +524,7 @@ static void test_lpm_delete(void)
key->prefixlen = 32;
inet_pton(AF_INET, "192.168.128.1", key->data);
- assert(bpf_map_lookup_elem(map_fd, key, &value) == -1 &&
- errno == ENOENT);
+ assert(bpf_map_lookup_elem(map_fd, key, &value) == -ENOENT);
close(map_fd);
}
@@ -553,8 +545,7 @@ static void test_lpm_get_next_key(void)
assert(map_fd >= 0);
/* empty tree. get_next_key should return ENOENT */
- assert(bpf_map_get_next_key(map_fd, NULL, key_p) == -1 &&
- errno == ENOENT);
+ assert(bpf_map_get_next_key(map_fd, NULL, key_p) == -ENOENT);
/* get and verify the first key, get the second one should fail. */
key_p->prefixlen = 16;
@@ -566,8 +557,7 @@ static void test_lpm_get_next_key(void)
assert(key_p->prefixlen == 16 && key_p->data[0] == 192 &&
key_p->data[1] == 168);
- assert(bpf_map_get_next_key(map_fd, key_p, next_key_p) == -1 &&
- errno == ENOENT);
+ assert(bpf_map_get_next_key(map_fd, key_p, next_key_p) == -ENOENT);
/* no exact matching key should get the first one in post order. */
key_p->prefixlen = 8;
@@ -591,8 +581,7 @@ static void test_lpm_get_next_key(void)
next_key_p->data[1] == 168);
memcpy(key_p, next_key_p, key_size);
- assert(bpf_map_get_next_key(map_fd, key_p, next_key_p) == -1 &&
- errno == ENOENT);
+ assert(bpf_map_get_next_key(map_fd, key_p, next_key_p) == -ENOENT);
/* Add one more element (total three) */
key_p->prefixlen = 24;
@@ -615,8 +604,7 @@ static void test_lpm_get_next_key(void)
next_key_p->data[1] == 168);
memcpy(key_p, next_key_p, key_size);
- assert(bpf_map_get_next_key(map_fd, key_p, next_key_p) == -1 &&
- errno == ENOENT);
+ assert(bpf_map_get_next_key(map_fd, key_p, next_key_p) == -ENOENT);
/* Add one more element (total four) */
key_p->prefixlen = 24;
@@ -644,8 +632,7 @@ static void test_lpm_get_next_key(void)
next_key_p->data[1] == 168);
memcpy(key_p, next_key_p, key_size);
- assert(bpf_map_get_next_key(map_fd, key_p, next_key_p) == -1 &&
- errno == ENOENT);
+ assert(bpf_map_get_next_key(map_fd, key_p, next_key_p) == -ENOENT);
/* Add one more element (total five) */
key_p->prefixlen = 28;
@@ -679,8 +666,7 @@ static void test_lpm_get_next_key(void)
next_key_p->data[1] == 168);
memcpy(key_p, next_key_p, key_size);
- assert(bpf_map_get_next_key(map_fd, key_p, next_key_p) == -1 &&
- errno == ENOENT);
+ assert(bpf_map_get_next_key(map_fd, key_p, next_key_p) == -ENOENT);
/* no exact matching key should return the first one in post order */
key_p->prefixlen = 22;
@@ -791,6 +777,9 @@ int main(void)
/* we want predictable, pseudo random tests */
srand(0xf00ba1);
+ /* Use libbpf 1.0 API mode */
+ libbpf_set_strict_mode(LIBBPF_STRICT_ALL);
+
test_lpm_basic();
test_lpm_order();
diff --git a/tools/testing/selftests/bpf/test_lru_map.c b/tools/testing/selftests/bpf/test_lru_map.c
index 563bbe18c172..4d0650cfb5cd 100644
--- a/tools/testing/selftests/bpf/test_lru_map.c
+++ b/tools/testing/selftests/bpf/test_lru_map.c
@@ -18,7 +18,6 @@
#include <bpf/libbpf.h>
#include "bpf_util.h"
-#include "bpf_rlimit.h"
#include "../../../include/linux/filter.h"
#define LOCAL_FREE_TARGET (128)
@@ -176,24 +175,20 @@ static void test_lru_sanity0(int map_type, int map_flags)
BPF_NOEXIST));
/* BPF_NOEXIST means: add new element if it doesn't exist */
- assert(bpf_map_update_elem(lru_map_fd, &key, value, BPF_NOEXIST) == -1
- /* key=1 already exists */
- && errno == EEXIST);
+ assert(bpf_map_update_elem(lru_map_fd, &key, value, BPF_NOEXIST) == -EEXIST);
+ /* key=1 already exists */
- assert(bpf_map_update_elem(lru_map_fd, &key, value, -1) == -1 &&
- errno == EINVAL);
+ assert(bpf_map_update_elem(lru_map_fd, &key, value, -1) == -EINVAL);
/* insert key=2 element */
/* check that key=2 is not found */
key = 2;
- assert(bpf_map_lookup_elem(lru_map_fd, &key, value) == -1 &&
- errno == ENOENT);
+ assert(bpf_map_lookup_elem(lru_map_fd, &key, value) == -ENOENT);
/* BPF_EXIST means: update existing element */
- assert(bpf_map_update_elem(lru_map_fd, &key, value, BPF_EXIST) == -1 &&
- /* key=2 is not there */
- errno == ENOENT);
+ assert(bpf_map_update_elem(lru_map_fd, &key, value, BPF_EXIST) == -ENOENT);
+ /* key=2 is not there */
assert(!bpf_map_update_elem(lru_map_fd, &key, value, BPF_NOEXIST));
@@ -201,8 +196,7 @@ static void test_lru_sanity0(int map_type, int map_flags)
/* check that key=3 is not found */
key = 3;
- assert(bpf_map_lookup_elem(lru_map_fd, &key, value) == -1 &&
- errno == ENOENT);
+ assert(bpf_map_lookup_elem(lru_map_fd, &key, value) == -ENOENT);
/* check that key=1 can be found and mark the ref bit to
* stop LRU from removing key=1
@@ -218,8 +212,7 @@ static void test_lru_sanity0(int map_type, int map_flags)
/* key=2 has been removed from the LRU */
key = 2;
- assert(bpf_map_lookup_elem(lru_map_fd, &key, value) == -1 &&
- errno == ENOENT);
+ assert(bpf_map_lookup_elem(lru_map_fd, &key, value) == -ENOENT);
/* lookup elem key=1 and delete it, then check it doesn't exist */
key = 1;
@@ -382,8 +375,7 @@ static void test_lru_sanity2(int map_type, int map_flags, unsigned int tgt_free)
end_key = 1 + batch_size;
value[0] = 4321;
for (key = 1; key < end_key; key++) {
- assert(bpf_map_lookup_elem(lru_map_fd, &key, value) == -1 &&
- errno == ENOENT);
+ assert(bpf_map_lookup_elem(lru_map_fd, &key, value) == -ENOENT);
assert(!bpf_map_update_elem(lru_map_fd, &key, value,
BPF_NOEXIST));
assert(!bpf_map_lookup_elem_with_ref_bit(lru_map_fd, key, value));
@@ -563,8 +555,7 @@ static void do_test_lru_sanity5(unsigned long long last_key, int map_fd)
assert(!bpf_map_lookup_elem_with_ref_bit(map_fd, key, value));
/* Cannot find the last key because it was removed by LRU */
- assert(bpf_map_lookup_elem(map_fd, &last_key, value) == -1 &&
- errno == ENOENT);
+ assert(bpf_map_lookup_elem(map_fd, &last_key, value) == -ENOENT);
}
/* Test map with only one element */
@@ -712,21 +703,18 @@ static void test_lru_sanity7(int map_type, int map_flags)
BPF_NOEXIST));
/* BPF_NOEXIST means: add new element if it doesn't exist */
- assert(bpf_map_update_elem(lru_map_fd, &key, value, BPF_NOEXIST) == -1
- /* key=1 already exists */
- && errno == EEXIST);
+ assert(bpf_map_update_elem(lru_map_fd, &key, value, BPF_NOEXIST) == -EEXIST);
+ /* key=1 already exists */
/* insert key=2 element */
/* check that key=2 is not found */
key = 2;
- assert(bpf_map_lookup_elem(lru_map_fd, &key, value) == -1 &&
- errno == ENOENT);
+ assert(bpf_map_lookup_elem(lru_map_fd, &key, value) == -ENOENT);
/* BPF_EXIST means: update existing element */
- assert(bpf_map_update_elem(lru_map_fd, &key, value, BPF_EXIST) == -1 &&
- /* key=2 is not there */
- errno == ENOENT);
+ assert(bpf_map_update_elem(lru_map_fd, &key, value, BPF_EXIST) == -ENOENT);
+ /* key=2 is not there */
assert(!bpf_map_update_elem(lru_map_fd, &key, value, BPF_NOEXIST));
@@ -734,8 +722,7 @@ static void test_lru_sanity7(int map_type, int map_flags)
/* check that key=3 is not found */
key = 3;
- assert(bpf_map_lookup_elem(lru_map_fd, &key, value) == -1 &&
- errno == ENOENT);
+ assert(bpf_map_lookup_elem(lru_map_fd, &key, value) == -ENOENT);
/* check that key=1 can be found and mark the ref bit to
* stop LRU from removing key=1
@@ -758,8 +745,7 @@ static void test_lru_sanity7(int map_type, int map_flags)
/* key=2 has been removed from the LRU */
key = 2;
- assert(bpf_map_lookup_elem(lru_map_fd, &key, value) == -1 &&
- errno == ENOENT);
+ assert(bpf_map_lookup_elem(lru_map_fd, &key, value) == -ENOENT);
assert(map_equal(lru_map_fd, expected_map_fd));
@@ -806,21 +792,18 @@ static void test_lru_sanity8(int map_type, int map_flags)
assert(!bpf_map_update_elem(lru_map_fd, &key, value, BPF_NOEXIST));
/* BPF_NOEXIST means: add new element if it doesn't exist */
- assert(bpf_map_update_elem(lru_map_fd, &key, value, BPF_NOEXIST) == -1
- /* key=1 already exists */
- && errno == EEXIST);
+ assert(bpf_map_update_elem(lru_map_fd, &key, value, BPF_NOEXIST) == -EEXIST);
+ /* key=1 already exists */
/* insert key=2 element */
/* check that key=2 is not found */
key = 2;
- assert(bpf_map_lookup_elem(lru_map_fd, &key, value) == -1 &&
- errno == ENOENT);
+ assert(bpf_map_lookup_elem(lru_map_fd, &key, value) == -ENOENT);
/* BPF_EXIST means: update existing element */
- assert(bpf_map_update_elem(lru_map_fd, &key, value, BPF_EXIST) == -1 &&
- /* key=2 is not there */
- errno == ENOENT);
+ assert(bpf_map_update_elem(lru_map_fd, &key, value, BPF_EXIST) == -ENOENT);
+ /* key=2 is not there */
assert(!bpf_map_update_elem(lru_map_fd, &key, value, BPF_NOEXIST));
assert(!bpf_map_update_elem(expected_map_fd, &key, value,
@@ -830,8 +813,7 @@ static void test_lru_sanity8(int map_type, int map_flags)
/* check that key=3 is not found */
key = 3;
- assert(bpf_map_lookup_elem(lru_map_fd, &key, value) == -1 &&
- errno == ENOENT);
+ assert(bpf_map_lookup_elem(lru_map_fd, &key, value) == -ENOENT);
/* check that key=1 can be found and do _not_ mark ref bit.
* this will be evicted on next update.
@@ -854,8 +836,7 @@ static void test_lru_sanity8(int map_type, int map_flags)
/* key=1 has been removed from the LRU */
key = 1;
- assert(bpf_map_lookup_elem(lru_map_fd, &key, value) == -1 &&
- errno == ENOENT);
+ assert(bpf_map_lookup_elem(lru_map_fd, &key, value) == -ENOENT);
assert(map_equal(lru_map_fd, expected_map_fd));
@@ -878,6 +859,9 @@ int main(int argc, char **argv)
assert(nr_cpus != -1);
printf("nr_cpus:%d\n\n", nr_cpus);
+ /* Use libbpf 1.0 API mode */
+ libbpf_set_strict_mode(LIBBPF_STRICT_ALL);
+
for (f = 0; f < ARRAY_SIZE(map_flags); f++) {
unsigned int tgt_free = (map_flags[f] & BPF_F_NO_COMMON_LRU) ?
PERCPU_FREE_TARGET : LOCAL_FREE_TARGET;
diff --git a/tools/testing/selftests/bpf/test_progs.c b/tools/testing/selftests/bpf/test_progs.c
index 0a4b45d7b515..c536d1d29d57 100644
--- a/tools/testing/selftests/bpf/test_progs.c
+++ b/tools/testing/selftests/bpf/test_progs.c
@@ -3,6 +3,7 @@
*/
#define _GNU_SOURCE
#include "test_progs.h"
+#include "testing_helpers.h"
#include "cgroup_helpers.h"
#include <argp.h>
#include <pthread.h>
@@ -50,19 +51,8 @@ struct prog_test_def {
int test_num;
void (*run_test)(void);
void (*run_serial_test)(void);
- bool force_log;
- int error_cnt;
- int skip_cnt;
- int sub_succ_cnt;
bool should_run;
- bool tested;
bool need_cgroup_cleanup;
-
- char *subtest_name;
- int subtest_num;
-
- /* store counts before subtest started */
- int old_error_cnt;
};
/* Override C runtime library's usleep() implementation to ensure nanosleep()
@@ -84,12 +74,13 @@ static bool should_run(struct test_selector *sel, int num, const char *name)
int i;
for (i = 0; i < sel->blacklist.cnt; i++) {
- if (glob_match(name, sel->blacklist.strs[i]))
+ if (glob_match(name, sel->blacklist.tests[i].name) &&
+ !sel->blacklist.tests[i].subtest_cnt)
return false;
}
for (i = 0; i < sel->whitelist.cnt; i++) {
- if (glob_match(name, sel->whitelist.strs[i]))
+ if (glob_match(name, sel->whitelist.tests[i].name))
return true;
}
@@ -99,32 +90,69 @@ static bool should_run(struct test_selector *sel, int num, const char *name)
return num < sel->num_set_len && sel->num_set[num];
}
-static void dump_test_log(const struct prog_test_def *test, bool failed)
+static bool should_run_subtest(struct test_selector *sel,
+ struct test_selector *subtest_sel,
+ int subtest_num,
+ const char *test_name,
+ const char *subtest_name)
{
- if (stdout == env.stdout)
- return;
+ int i, j;
- /* worker always holds log */
- if (env.worker_id != -1)
- return;
+ for (i = 0; i < sel->blacklist.cnt; i++) {
+ if (glob_match(test_name, sel->blacklist.tests[i].name)) {
+ if (!sel->blacklist.tests[i].subtest_cnt)
+ return false;
+
+ for (j = 0; j < sel->blacklist.tests[i].subtest_cnt; j++) {
+ if (glob_match(subtest_name,
+ sel->blacklist.tests[i].subtests[j]))
+ return false;
+ }
+ }
+ }
- fflush(stdout); /* exports env.log_buf & env.log_cnt */
+ for (i = 0; i < sel->whitelist.cnt; i++) {
+ if (glob_match(test_name, sel->whitelist.tests[i].name)) {
+ if (!sel->whitelist.tests[i].subtest_cnt)
+ return true;
- if (env.verbosity > VERBOSE_NONE || test->force_log || failed) {
- if (env.log_cnt) {
- env.log_buf[env.log_cnt] = '\0';
- fprintf(env.stdout, "%s", env.log_buf);
- if (env.log_buf[env.log_cnt - 1] != '\n')
- fprintf(env.stdout, "\n");
+ for (j = 0; j < sel->whitelist.tests[i].subtest_cnt; j++) {
+ if (glob_match(subtest_name,
+ sel->whitelist.tests[i].subtests[j]))
+ return true;
+ }
}
}
+
+ if (!sel->whitelist.cnt && !subtest_sel->num_set)
+ return true;
+
+ return subtest_num < subtest_sel->num_set_len && subtest_sel->num_set[subtest_num];
}
-static void skip_account(void)
+static void dump_test_log(const struct prog_test_def *test,
+ const struct test_state *test_state,
+ bool force_failed)
{
- if (env.test->skip_cnt) {
- env.skip_cnt++;
- env.test->skip_cnt = 0;
+ bool failed = test_state->error_cnt > 0 || force_failed;
+
+ /* worker always holds log */
+ if (env.worker_id != -1)
+ return;
+
+ fflush(stdout); /* exports test_state->log_buf & test_state->log_cnt */
+
+ fprintf(env.stdout, "#%-3d %s:%s\n",
+ test->test_num, test->test_name,
+ failed ? "FAIL" : (test_state->skip_cnt ? "SKIP" : "OK"));
+
+ if (env.verbosity > VERBOSE_NONE || test_state->force_log || failed) {
+ if (test_state->log_cnt) {
+ test_state->log_buf[test_state->log_cnt] = '\0';
+ fprintf(env.stdout, "%s", test_state->log_buf);
+ if (test_state->log_buf[test_state->log_cnt - 1] != '\n')
+ fprintf(env.stdout, "\n");
+ }
}
}
@@ -135,7 +163,6 @@ static void stdio_restore(void);
*/
static void reset_affinity(void)
{
-
cpu_set_t cpuset;
int i, err;
@@ -178,68 +205,78 @@ static void restore_netns(void)
void test__end_subtest(void)
{
struct prog_test_def *test = env.test;
- int sub_error_cnt = test->error_cnt - test->old_error_cnt;
-
- dump_test_log(test, sub_error_cnt);
+ struct test_state *state = env.test_state;
+ int sub_error_cnt = state->error_cnt - state->old_error_cnt;
fprintf(stdout, "#%d/%d %s/%s:%s\n",
- test->test_num, test->subtest_num, test->test_name, test->subtest_name,
- sub_error_cnt ? "FAIL" : (test->skip_cnt ? "SKIP" : "OK"));
+ test->test_num, state->subtest_num, test->test_name, state->subtest_name,
+ sub_error_cnt ? "FAIL" : (state->subtest_skip_cnt ? "SKIP" : "OK"));
- if (sub_error_cnt)
- test->error_cnt++;
- else if (test->skip_cnt == 0)
- test->sub_succ_cnt++;
- skip_account();
+ if (sub_error_cnt == 0) {
+ if (state->subtest_skip_cnt == 0) {
+ state->sub_succ_cnt++;
+ } else {
+ state->subtest_skip_cnt = 0;
+ state->skip_cnt++;
+ }
+ }
- free(test->subtest_name);
- test->subtest_name = NULL;
+ free(state->subtest_name);
+ state->subtest_name = NULL;
}
-bool test__start_subtest(const char *name)
+bool test__start_subtest(const char *subtest_name)
{
struct prog_test_def *test = env.test;
+ struct test_state *state = env.test_state;
- if (test->subtest_name)
+ if (state->subtest_name)
test__end_subtest();
- test->subtest_num++;
+ state->subtest_num++;
- if (!name || !name[0]) {
+ if (!subtest_name || !subtest_name[0]) {
fprintf(env.stderr,
"Subtest #%d didn't provide sub-test name!\n",
- test->subtest_num);
+ state->subtest_num);
return false;
}
- if (!should_run(&env.subtest_selector, test->subtest_num, name))
+ if (!should_run_subtest(&env.test_selector,
+ &env.subtest_selector,
+ state->subtest_num,
+ test->test_name,
+ subtest_name))
return false;
- test->subtest_name = strdup(name);
- if (!test->subtest_name) {
+ state->subtest_name = strdup(subtest_name);
+ if (!state->subtest_name) {
fprintf(env.stderr,
"Subtest #%d: failed to copy subtest name!\n",
- test->subtest_num);
+ state->subtest_num);
return false;
}
- env.test->old_error_cnt = env.test->error_cnt;
+ state->old_error_cnt = state->error_cnt;
return true;
}
void test__force_log(void)
{
- env.test->force_log = true;
+ env.test_state->force_log = true;
}
void test__skip(void)
{
- env.test->skip_cnt++;
+ if (env.test_state->subtest_name)
+ env.test_state->subtest_skip_cnt++;
+ else
+ env.test_state->skip_cnt++;
}
void test__fail(void)
{
- env.test->error_cnt++;
+ env.test_state->error_cnt++;
}
int test__join_cgroup(const char *path)
@@ -472,8 +509,11 @@ static struct prog_test_def prog_test_defs[] = {
#include <prog_tests/tests.h>
#undef DEFINE_TEST
};
+
static const int prog_test_cnt = ARRAY_SIZE(prog_test_defs);
+static struct test_state test_states[ARRAY_SIZE(prog_test_defs)];
+
const char *argp_program_version = "test_progs 0.1";
const char *argp_program_bug_address = "<bpf@vger.kernel.org>";
static const char argp_program_doc[] = "BPF selftests test runner";
@@ -527,63 +567,29 @@ static int libbpf_print_fn(enum libbpf_print_level level,
return 0;
}
-static void free_str_set(const struct str_set *set)
+static void free_test_filter_set(const struct test_filter_set *set)
{
- int i;
+ int i, j;
if (!set)
return;
- for (i = 0; i < set->cnt; i++)
- free((void *)set->strs[i]);
- free(set->strs);
-}
-
-static int parse_str_list(const char *s, struct str_set *set, bool is_glob_pattern)
-{
- char *input, *state = NULL, *next, **tmp, **strs = NULL;
- int i, cnt = 0;
+ for (i = 0; i < set->cnt; i++) {
+ free((void *)set->tests[i].name);
+ for (j = 0; j < set->tests[i].subtest_cnt; j++)
+ free((void *)set->tests[i].subtests[j]);
- input = strdup(s);
- if (!input)
- return -ENOMEM;
-
- while ((next = strtok_r(state ? NULL : input, ",", &state))) {
- tmp = realloc(strs, sizeof(*strs) * (cnt + 1));
- if (!tmp)
- goto err;
- strs = tmp;
-
- if (is_glob_pattern) {
- strs[cnt] = strdup(next);
- if (!strs[cnt])
- goto err;
- } else {
- strs[cnt] = malloc(strlen(next) + 2 + 1);
- if (!strs[cnt])
- goto err;
- sprintf(strs[cnt], "*%s*", next);
- }
-
- cnt++;
+ free((void *)set->tests[i].subtests);
}
- tmp = realloc(set->strs, sizeof(*strs) * (cnt + set->cnt));
- if (!tmp)
- goto err;
- memcpy(tmp + set->cnt, strs, sizeof(*strs) * cnt);
- set->strs = (const char **)tmp;
- set->cnt += cnt;
+ free((void *)set->tests);
+}
- free(input);
- free(strs);
- return 0;
-err:
- for (i = 0; i < cnt; i++)
- free(strs[i]);
- free(strs);
- free(input);
- return -ENOMEM;
+static void free_test_selector(struct test_selector *test_selector)
+{
+ free_test_filter_set(&test_selector->blacklist);
+ free_test_filter_set(&test_selector->whitelist);
+ free(test_selector->num_set);
}
extern int extra_prog_load_log_flags;
@@ -615,33 +621,17 @@ static error_t parse_arg(int key, char *arg, struct argp_state *state)
}
case ARG_TEST_NAME_GLOB_ALLOWLIST:
case ARG_TEST_NAME: {
- char *subtest_str = strchr(arg, '/');
-
- if (subtest_str) {
- *subtest_str = '\0';
- if (parse_str_list(subtest_str + 1,
- &env->subtest_selector.whitelist,
- key == ARG_TEST_NAME_GLOB_ALLOWLIST))
- return -ENOMEM;
- }
- if (parse_str_list(arg, &env->test_selector.whitelist,
- key == ARG_TEST_NAME_GLOB_ALLOWLIST))
+ if (parse_test_list(arg,
+ &env->test_selector.whitelist,
+ key == ARG_TEST_NAME_GLOB_ALLOWLIST))
return -ENOMEM;
break;
}
case ARG_TEST_NAME_GLOB_DENYLIST:
case ARG_TEST_NAME_BLACKLIST: {
- char *subtest_str = strchr(arg, '/');
-
- if (subtest_str) {
- *subtest_str = '\0';
- if (parse_str_list(subtest_str + 1,
- &env->subtest_selector.blacklist,
- key == ARG_TEST_NAME_GLOB_DENYLIST))
- return -ENOMEM;
- }
- if (parse_str_list(arg, &env->test_selector.blacklist,
- key == ARG_TEST_NAME_GLOB_DENYLIST))
+ if (parse_test_list(arg,
+ &env->test_selector.blacklist,
+ key == ARG_TEST_NAME_GLOB_DENYLIST))
return -ENOMEM;
break;
}
@@ -706,7 +696,7 @@ static error_t parse_arg(int key, char *arg, struct argp_state *state)
return 0;
}
-static void stdio_hijack(void)
+static void stdio_hijack(char **log_buf, size_t *log_cnt)
{
#ifdef __GLIBC__
env.stdout = stdout;
@@ -720,7 +710,7 @@ static void stdio_hijack(void)
/* stdout and stderr -> buffer */
fflush(stdout);
- stdout = open_memstream(&env.log_buf, &env.log_cnt);
+ stdout = open_memstream(log_buf, log_cnt);
if (!stdout) {
stdout = env.stdout;
perror("open_memstream");
@@ -823,7 +813,7 @@ void crash_handler(int signum)
sz = backtrace(bt, ARRAY_SIZE(bt));
if (env.test)
- dump_test_log(env.test, true);
+ dump_test_log(env.test, env.test_state, true);
if (env.stdout)
stdio_restore();
if (env.worker_id != -1)
@@ -845,17 +835,6 @@ static int current_test_idx;
static pthread_mutex_t current_test_lock;
static pthread_mutex_t stdout_output_lock;
-struct test_result {
- int error_cnt;
- int skip_cnt;
- int sub_succ_cnt;
-
- size_t log_cnt;
- char *log_buf;
-};
-
-static struct test_result test_results[ARRAY_SIZE(prog_test_defs)];
-
static inline const char *str_msg(const struct msg *msg, char *buf)
{
switch (msg->type) {
@@ -909,8 +888,12 @@ static int recv_message(int sock, struct msg *msg)
static void run_one_test(int test_num)
{
struct prog_test_def *test = &prog_test_defs[test_num];
+ struct test_state *state = &test_states[test_num];
env.test = test;
+ env.test_state = state;
+
+ stdio_hijack(&state->log_buf, &state->log_cnt);
if (test->run_test)
test->run_test();
@@ -918,17 +901,19 @@ static void run_one_test(int test_num)
test->run_serial_test();
/* ensure last sub-test is finalized properly */
- if (test->subtest_name)
+ if (state->subtest_name)
test__end_subtest();
- test->tested = true;
+ state->tested = true;
- dump_test_log(test, test->error_cnt);
+ dump_test_log(test, state, false);
reset_affinity();
restore_netns();
if (test->need_cgroup_cleanup)
cleanup_cgroup_environment();
+
+ stdio_restore();
}
struct dispatch_data {
@@ -947,7 +932,7 @@ static void *dispatch_thread(void *ctx)
while (true) {
int test_to_run = -1;
struct prog_test_def *test;
- struct test_result *result;
+ struct test_state *state;
/* grab a test */
{
@@ -994,16 +979,15 @@ static void *dispatch_thread(void *ctx)
if (test_to_run != msg_test_done.test_done.test_num)
goto error;
- test->tested = true;
- result = &test_results[test_to_run];
-
- result->error_cnt = msg_test_done.test_done.error_cnt;
- result->skip_cnt = msg_test_done.test_done.skip_cnt;
- result->sub_succ_cnt = msg_test_done.test_done.sub_succ_cnt;
+ state = &test_states[test_to_run];
+ state->tested = true;
+ state->error_cnt = msg_test_done.test_done.error_cnt;
+ state->skip_cnt = msg_test_done.test_done.skip_cnt;
+ state->sub_succ_cnt = msg_test_done.test_done.sub_succ_cnt;
/* collect all logs */
if (msg_test_done.test_done.have_log) {
- log_fp = open_memstream(&result->log_buf, &result->log_cnt);
+ log_fp = open_memstream(&state->log_buf, &state->log_cnt);
if (!log_fp)
goto error;
@@ -1022,25 +1006,11 @@ static void *dispatch_thread(void *ctx)
fclose(log_fp);
log_fp = NULL;
}
- /* output log */
- {
- pthread_mutex_lock(&stdout_output_lock);
-
- if (result->log_cnt) {
- result->log_buf[result->log_cnt] = '\0';
- fprintf(stdout, "%s", result->log_buf);
- if (result->log_buf[result->log_cnt - 1] != '\n')
- fprintf(stdout, "\n");
- }
-
- fprintf(stdout, "#%d %s:%s\n",
- test->test_num, test->test_name,
- result->error_cnt ? "FAIL" : (result->skip_cnt ? "SKIP" : "OK"));
-
- pthread_mutex_unlock(&stdout_output_lock);
- }
-
} /* wait for test done */
+
+ pthread_mutex_lock(&stdout_output_lock);
+ dump_test_log(test, state, false);
+ pthread_mutex_unlock(&stdout_output_lock);
} /* while (true) */
error:
if (env.debug)
@@ -1062,38 +1032,50 @@ done:
return NULL;
}
-static void print_all_error_logs(void)
+static void calculate_summary_and_print_errors(struct test_env *env)
{
int i;
+ int succ_cnt = 0, fail_cnt = 0, sub_succ_cnt = 0, skip_cnt = 0;
+
+ for (i = 0; i < prog_test_cnt; i++) {
+ struct test_state *state = &test_states[i];
+
+ if (!state->tested)
+ continue;
+
+ sub_succ_cnt += state->sub_succ_cnt;
+ skip_cnt += state->skip_cnt;
+
+ if (state->error_cnt)
+ fail_cnt++;
+ else
+ succ_cnt++;
+ }
- if (env.fail_cnt)
- fprintf(stdout, "\nAll error logs:\n");
+ if (fail_cnt)
+ printf("\nAll error logs:\n");
/* print error logs again */
for (i = 0; i < prog_test_cnt; i++) {
- struct prog_test_def *test;
- struct test_result *result;
-
- test = &prog_test_defs[i];
- result = &test_results[i];
+ struct prog_test_def *test = &prog_test_defs[i];
+ struct test_state *state = &test_states[i];
- if (!test->tested || !result->error_cnt)
+ if (!state->tested || !state->error_cnt)
continue;
- fprintf(stdout, "\n#%d %s:%s\n",
- test->test_num, test->test_name,
- result->error_cnt ? "FAIL" : (result->skip_cnt ? "SKIP" : "OK"));
-
- if (result->log_cnt) {
- result->log_buf[result->log_cnt] = '\0';
- fprintf(stdout, "%s", result->log_buf);
- if (result->log_buf[result->log_cnt - 1] != '\n')
- fprintf(stdout, "\n");
- }
+ dump_test_log(test, state, true);
}
+
+ printf("Summary: %d/%d PASSED, %d SKIPPED, %d FAILED\n",
+ succ_cnt, sub_succ_cnt, skip_cnt, fail_cnt);
+
+ env->succ_cnt = succ_cnt;
+ env->sub_succ_cnt = sub_succ_cnt;
+ env->fail_cnt = fail_cnt;
+ env->skip_cnt = skip_cnt;
}
-static int server_main(void)
+static void server_main(void)
{
pthread_t *dispatcher_threads;
struct dispatch_data *data;
@@ -1149,60 +1131,18 @@ static int server_main(void)
for (int i = 0; i < prog_test_cnt; i++) {
struct prog_test_def *test = &prog_test_defs[i];
- struct test_result *result = &test_results[i];
if (!test->should_run || !test->run_serial_test)
continue;
- stdio_hijack();
-
run_one_test(i);
-
- stdio_restore();
- if (env.log_buf) {
- result->log_cnt = env.log_cnt;
- result->log_buf = strdup(env.log_buf);
-
- free(env.log_buf);
- env.log_buf = NULL;
- env.log_cnt = 0;
- }
- restore_netns();
-
- fprintf(stdout, "#%d %s:%s\n",
- test->test_num, test->test_name,
- test->error_cnt ? "FAIL" : (test->skip_cnt ? "SKIP" : "OK"));
-
- result->error_cnt = test->error_cnt;
- result->skip_cnt = test->skip_cnt;
- result->sub_succ_cnt = test->sub_succ_cnt;
}
/* generate summary */
fflush(stderr);
fflush(stdout);
- for (i = 0; i < prog_test_cnt; i++) {
- struct prog_test_def *current_test;
- struct test_result *result;
-
- current_test = &prog_test_defs[i];
- result = &test_results[i];
-
- if (!current_test->tested)
- continue;
-
- env.succ_cnt += result->error_cnt ? 0 : 1;
- env.skip_cnt += result->skip_cnt;
- if (result->error_cnt)
- env.fail_cnt++;
- env.sub_succ_cnt += result->sub_succ_cnt;
- }
-
- print_all_error_logs();
-
- fprintf(stdout, "Summary: %d/%d PASSED, %d SKIPPED, %d FAILED\n",
- env.succ_cnt, env.sub_succ_cnt, env.skip_cnt, env.fail_cnt);
+ calculate_summary_and_print_errors(&env);
/* reap all workers */
for (i = 0; i < env.workers; i++) {
@@ -1212,8 +1152,6 @@ static int server_main(void)
if (pid != env.worker_pids[i])
perror("Unable to reap worker");
}
-
- return 0;
}
static int worker_main(int sock)
@@ -1234,35 +1172,29 @@ static int worker_main(int sock)
env.worker_id);
goto out;
case MSG_DO_TEST: {
- int test_to_run;
- struct prog_test_def *test;
+ int test_to_run = msg.do_test.test_num;
+ struct prog_test_def *test = &prog_test_defs[test_to_run];
+ struct test_state *state = &test_states[test_to_run];
struct msg msg_done;
- test_to_run = msg.do_test.test_num;
- test = &prog_test_defs[test_to_run];
-
if (env.debug)
fprintf(stderr, "[%d]: #%d:%s running.\n",
env.worker_id,
test_to_run + 1,
test->test_name);
- stdio_hijack();
-
run_one_test(test_to_run);
- stdio_restore();
-
memset(&msg_done, 0, sizeof(msg_done));
msg_done.type = MSG_TEST_DONE;
msg_done.test_done.test_num = test_to_run;
- msg_done.test_done.error_cnt = test->error_cnt;
- msg_done.test_done.skip_cnt = test->skip_cnt;
- msg_done.test_done.sub_succ_cnt = test->sub_succ_cnt;
+ msg_done.test_done.error_cnt = state->error_cnt;
+ msg_done.test_done.skip_cnt = state->skip_cnt;
+ msg_done.test_done.sub_succ_cnt = state->sub_succ_cnt;
msg_done.test_done.have_log = false;
- if (env.verbosity > VERBOSE_NONE || test->force_log || test->error_cnt) {
- if (env.log_cnt)
+ if (env.verbosity > VERBOSE_NONE || state->force_log || state->error_cnt) {
+ if (state->log_cnt)
msg_done.test_done.have_log = true;
}
if (send_message(sock, &msg_done) < 0) {
@@ -1275,8 +1207,8 @@ static int worker_main(int sock)
char *src;
size_t slen;
- src = env.log_buf;
- slen = env.log_cnt;
+ src = state->log_buf;
+ slen = state->log_cnt;
while (slen) {
struct msg msg_log;
char *dest;
@@ -1296,10 +1228,10 @@ static int worker_main(int sock)
assert(send_message(sock, &msg_log) >= 0);
}
}
- if (env.log_buf) {
- free(env.log_buf);
- env.log_buf = NULL;
- env.log_cnt = 0;
+ if (state->log_buf) {
+ free(state->log_buf);
+ state->log_buf = NULL;
+ state->log_cnt = 0;
}
if (env.debug)
fprintf(stderr, "[%d]: #%d:%s done.\n",
@@ -1430,7 +1362,6 @@ int main(int argc, char **argv)
for (i = 0; i < prog_test_cnt; i++) {
struct prog_test_def *test = &prog_test_defs[i];
- struct test_result *result;
if (!test->should_run)
continue;
@@ -1446,34 +1377,7 @@ int main(int argc, char **argv)
continue;
}
- stdio_hijack();
-
run_one_test(i);
-
- stdio_restore();
-
- fprintf(env.stdout, "#%d %s:%s\n",
- test->test_num, test->test_name,
- test->error_cnt ? "FAIL" : (test->skip_cnt ? "SKIP" : "OK"));
-
- result = &test_results[i];
- result->error_cnt = test->error_cnt;
- if (env.log_buf) {
- result->log_buf = strdup(env.log_buf);
- result->log_cnt = env.log_cnt;
-
- free(env.log_buf);
- env.log_buf = NULL;
- env.log_cnt = 0;
- }
-
- if (test->error_cnt)
- env.fail_cnt++;
- else
- env.succ_cnt++;
-
- skip_account();
- env.sub_succ_cnt += test->sub_succ_cnt;
}
if (env.get_test_cnt) {
@@ -1484,21 +1388,14 @@ int main(int argc, char **argv)
if (env.list_test_names)
goto out;
- print_all_error_logs();
-
- fprintf(stdout, "Summary: %d/%d PASSED, %d SKIPPED, %d FAILED\n",
- env.succ_cnt, env.sub_succ_cnt, env.skip_cnt, env.fail_cnt);
+ calculate_summary_and_print_errors(&env);
close(env.saved_netns_fd);
out:
if (!env.list_test_names && env.has_testmod)
unload_bpf_testmod();
- free_str_set(&env.test_selector.blacklist);
- free_str_set(&env.test_selector.whitelist);
- free(env.test_selector.num_set);
- free_str_set(&env.subtest_selector.blacklist);
- free_str_set(&env.subtest_selector.whitelist);
- free(env.subtest_selector.num_set);
+
+ free_test_selector(&env.test_selector);
if (env.succ_cnt + env.fail_cnt + env.skip_cnt == 0)
return EXIT_NO_TEST;
diff --git a/tools/testing/selftests/bpf/test_progs.h b/tools/testing/selftests/bpf/test_progs.h
index eec4c7385b14..d3fee3b98888 100644
--- a/tools/testing/selftests/bpf/test_progs.h
+++ b/tools/testing/selftests/bpf/test_progs.h
@@ -25,6 +25,7 @@ typedef __u16 __sum16;
#include <sys/wait.h>
#include <sys/types.h>
#include <sys/time.h>
+#include <sys/param.h>
#include <fcntl.h>
#include <pthread.h>
#include <linux/bpf.h>
@@ -37,7 +38,6 @@ typedef __u16 __sum16;
#include <bpf/bpf_endian.h>
#include "trace_helpers.h"
#include "testing_helpers.h"
-#include "flow_dissector_load.h"
enum verbosity {
VERBOSE_NONE,
@@ -46,18 +46,43 @@ enum verbosity {
VERBOSE_SUPER,
};
-struct str_set {
- const char **strs;
+struct test_filter {
+ char *name;
+ char **subtests;
+ int subtest_cnt;
+};
+
+struct test_filter_set {
+ struct test_filter *tests;
int cnt;
};
struct test_selector {
- struct str_set whitelist;
- struct str_set blacklist;
+ struct test_filter_set whitelist;
+ struct test_filter_set blacklist;
bool *num_set;
int num_set_len;
};
+struct test_state {
+ bool tested;
+ bool force_log;
+
+ int error_cnt;
+ int skip_cnt;
+ int subtest_skip_cnt;
+ int sub_succ_cnt;
+
+ char *subtest_name;
+ int subtest_num;
+
+ /* store counts before subtest started */
+ int old_error_cnt;
+
+ size_t log_cnt;
+ char *log_buf;
+};
+
struct test_env {
struct test_selector test_selector;
struct test_selector subtest_selector;
@@ -70,12 +95,11 @@ struct test_env {
bool get_test_cnt;
bool list_test_names;
- struct prog_test_def *test; /* current running tests */
+ struct prog_test_def *test; /* current running test */
+ struct test_state *test_state; /* current running test result */
FILE *stdout;
FILE *stderr;
- char *log_buf;
- size_t log_cnt;
int nr_cpus;
int succ_cnt; /* successful tests */
@@ -120,11 +144,12 @@ struct msg {
extern struct test_env env;
-extern void test__force_log();
-extern bool test__start_subtest(const char *name);
-extern void test__skip(void);
-extern void test__fail(void);
-extern int test__join_cgroup(const char *path);
+void test__force_log(void);
+bool test__start_subtest(const char *name);
+void test__end_subtest(void);
+void test__skip(void);
+void test__fail(void);
+int test__join_cgroup(const char *path);
#define PRINT_FAIL(format...) \
({ \
@@ -267,6 +292,17 @@ extern int test__join_cgroup(const char *path);
___ok; \
})
+#define ASSERT_HAS_SUBSTR(str, substr, name) ({ \
+ static int duration = 0; \
+ const char *___str = str; \
+ const char *___substr = substr; \
+ bool ___ok = strstr(___str, ___substr) != NULL; \
+ CHECK(!___ok, (name), \
+ "unexpected %s: '%s' is not a substring of '%s'\n", \
+ (name), ___substr, ___str); \
+ ___ok; \
+})
+
#define ASSERT_OK(res, name) ({ \
static int duration = 0; \
long long ___res = (res); \
diff --git a/tools/testing/selftests/bpf/test_skb_cgroup_id_user.c b/tools/testing/selftests/bpf/test_skb_cgroup_id_user.c
index 4a64306728ab..3256de30f563 100644
--- a/tools/testing/selftests/bpf/test_skb_cgroup_id_user.c
+++ b/tools/testing/selftests/bpf/test_skb_cgroup_id_user.c
@@ -15,7 +15,6 @@
#include <bpf/bpf.h>
#include <bpf/libbpf.h>
-#include "bpf_rlimit.h"
#include "cgroup_helpers.h"
#define CGROUP_PATH "/skb_cgroup_test"
@@ -160,6 +159,9 @@ int main(int argc, char **argv)
exit(EXIT_FAILURE);
}
+ /* Use libbpf 1.0 API mode */
+ libbpf_set_strict_mode(LIBBPF_STRICT_ALL);
+
cgfd = cgroup_setup_and_join(CGROUP_PATH);
if (cgfd < 0)
goto err;
diff --git a/tools/testing/selftests/bpf/test_sock.c b/tools/testing/selftests/bpf/test_sock.c
index fe10f8134278..810c3740b2cc 100644
--- a/tools/testing/selftests/bpf/test_sock.c
+++ b/tools/testing/selftests/bpf/test_sock.c
@@ -14,7 +14,6 @@
#include "cgroup_helpers.h"
#include <bpf/bpf_endian.h>
-#include "bpf_rlimit.h"
#include "bpf_util.h"
#define CG_PATH "/foo"
@@ -493,7 +492,7 @@ static int run_test_case(int cgfd, const struct sock_test *test)
goto err;
}
- if (attach_sock_prog(cgfd, progfd, test->attach_type) == -1) {
+ if (attach_sock_prog(cgfd, progfd, test->attach_type) < 0) {
if (test->result == ATTACH_REJECT)
goto out;
else
@@ -541,6 +540,9 @@ int main(int argc, char **argv)
if (cgfd < 0)
goto err;
+ /* Use libbpf 1.0 API mode */
+ libbpf_set_strict_mode(LIBBPF_STRICT_ALL);
+
if (run_tests(cgfd))
goto err;
diff --git a/tools/testing/selftests/bpf/test_sock_addr.c b/tools/testing/selftests/bpf/test_sock_addr.c
index f3d5d7ac6505..458564fcfc82 100644
--- a/tools/testing/selftests/bpf/test_sock_addr.c
+++ b/tools/testing/selftests/bpf/test_sock_addr.c
@@ -19,7 +19,6 @@
#include <bpf/libbpf.h>
#include "cgroup_helpers.h"
-#include "bpf_rlimit.h"
#include "bpf_util.h"
#ifndef ENOTSUPP
@@ -1418,6 +1417,9 @@ int main(int argc, char **argv)
if (cgfd < 0)
goto err;
+ /* Use libbpf 1.0 API mode */
+ libbpf_set_strict_mode(LIBBPF_STRICT_ALL);
+
if (run_tests(cgfd))
goto err;
diff --git a/tools/testing/selftests/bpf/test_sockmap.c b/tools/testing/selftests/bpf/test_sockmap.c
index dfb4f5c0fcb9..0fbaccdc8861 100644
--- a/tools/testing/selftests/bpf/test_sockmap.c
+++ b/tools/testing/selftests/bpf/test_sockmap.c
@@ -18,7 +18,6 @@
#include <sched.h>
#include <sys/time.h>
-#include <sys/resource.h>
#include <sys/types.h>
#include <sys/sendfile.h>
@@ -37,7 +36,6 @@
#include <bpf/libbpf.h>
#include "bpf_util.h"
-#include "bpf_rlimit.h"
#include "cgroup_helpers.h"
int running;
@@ -2017,6 +2015,9 @@ int main(int argc, char **argv)
cg_created = 1;
}
+ /* Use libbpf 1.0 API mode */
+ libbpf_set_strict_mode(LIBBPF_STRICT_ALL);
+
if (test == SELFTESTS) {
err = test_selftest(cg_fd, &options);
goto out;
diff --git a/tools/testing/selftests/bpf/test_sysctl.c b/tools/testing/selftests/bpf/test_sysctl.c
index 4f6cf833b522..57620e7c9048 100644
--- a/tools/testing/selftests/bpf/test_sysctl.c
+++ b/tools/testing/selftests/bpf/test_sysctl.c
@@ -14,7 +14,6 @@
#include <bpf/libbpf.h>
#include <bpf/bpf_endian.h>
-#include "bpf_rlimit.h"
#include "bpf_util.h"
#include "cgroup_helpers.h"
#include "testing_helpers.h"
@@ -1561,7 +1560,7 @@ static int run_test_case(int cgfd, struct sysctl_test *test)
goto err;
}
- if (bpf_prog_attach(progfd, cgfd, atype, BPF_F_ALLOW_OVERRIDE) == -1) {
+ if (bpf_prog_attach(progfd, cgfd, atype, BPF_F_ALLOW_OVERRIDE) < 0) {
if (test->result == ATTACH_REJECT)
goto out;
else
@@ -1618,6 +1617,9 @@ int main(int argc, char **argv)
if (cgfd < 0)
goto err;
+ /* Use libbpf 1.0 API mode */
+ libbpf_set_strict_mode(LIBBPF_STRICT_ALL);
+
if (run_tests(cgfd))
goto err;
diff --git a/tools/testing/selftests/bpf/test_tag.c b/tools/testing/selftests/bpf/test_tag.c
index 0851c42ee31c..5546b05a0486 100644
--- a/tools/testing/selftests/bpf/test_tag.c
+++ b/tools/testing/selftests/bpf/test_tag.c
@@ -20,7 +20,6 @@
#include <bpf/bpf.h>
#include "../../../include/linux/filter.h"
-#include "bpf_rlimit.h"
#include "testing_helpers.h"
static struct bpf_insn prog[BPF_MAXINSNS];
@@ -189,6 +188,9 @@ int main(void)
uint32_t tests = 0;
int i, fd_map;
+ /* Use libbpf 1.0 API mode */
+ libbpf_set_strict_mode(LIBBPF_STRICT_ALL);
+
fd_map = bpf_map_create(BPF_MAP_TYPE_HASH, NULL, sizeof(int),
sizeof(int), 1, &opts);
assert(fd_map > 0);
diff --git a/tools/testing/selftests/bpf/test_tcp_check_syncookie_user.c b/tools/testing/selftests/bpf/test_tcp_check_syncookie_user.c
index e7775d3bbe08..5c8ef062f760 100644
--- a/tools/testing/selftests/bpf/test_tcp_check_syncookie_user.c
+++ b/tools/testing/selftests/bpf/test_tcp_check_syncookie_user.c
@@ -15,7 +15,6 @@
#include <bpf/bpf.h>
#include <bpf/libbpf.h>
-#include "bpf_rlimit.h"
#include "cgroup_helpers.h"
static int start_server(const struct sockaddr *addr, socklen_t len, bool dual)
@@ -235,6 +234,9 @@ int main(int argc, char **argv)
exit(1);
}
+ /* Use libbpf 1.0 API mode */
+ libbpf_set_strict_mode(LIBBPF_STRICT_ALL);
+
results = get_map_fd_by_prog_id(atoi(argv[1]), &xdp);
if (results < 0) {
log_err("Can't get map");
diff --git a/tools/testing/selftests/bpf/test_tcpnotify_user.c b/tools/testing/selftests/bpf/test_tcpnotify_user.c
index 4c5114765b23..8284db8b0f13 100644
--- a/tools/testing/selftests/bpf/test_tcpnotify_user.c
+++ b/tools/testing/selftests/bpf/test_tcpnotify_user.c
@@ -19,7 +19,6 @@
#include <linux/perf_event.h>
#include <linux/err.h>
-#include "bpf_rlimit.h"
#include "bpf_util.h"
#include "cgroup_helpers.h"
diff --git a/tools/testing/selftests/bpf/test_verifier.c b/tools/testing/selftests/bpf/test_verifier.c
index a2cd236c32eb..372579c9f45e 100644
--- a/tools/testing/selftests/bpf/test_verifier.c
+++ b/tools/testing/selftests/bpf/test_verifier.c
@@ -53,7 +53,7 @@
#define MAX_INSNS BPF_MAXINSNS
#define MAX_TEST_INSNS 1000000
#define MAX_FIXUPS 8
-#define MAX_NR_MAPS 22
+#define MAX_NR_MAPS 23
#define MAX_TEST_RUNS 8
#define POINTER_VALUE 0xcafe4all
#define TEST_DATA_LEN 64
@@ -101,6 +101,7 @@ struct bpf_test {
int fixup_map_reuseport_array[MAX_FIXUPS];
int fixup_map_ringbuf[MAX_FIXUPS];
int fixup_map_timer[MAX_FIXUPS];
+ int fixup_map_kptr[MAX_FIXUPS];
struct kfunc_btf_id_pair fixup_kfunc_btf_id[MAX_FIXUPS];
/* Expected verifier log output for result REJECT or VERBOSE_ACCEPT.
* Can be a tab-separated sequence of expected strings. An empty string
@@ -621,8 +622,15 @@ static int create_cgroup_storage(bool percpu)
* struct timer {
* struct bpf_timer t;
* };
+ * struct btf_ptr {
+ * struct prog_test_ref_kfunc __kptr *ptr;
+ * struct prog_test_ref_kfunc __kptr_ref *ptr;
+ * struct prog_test_member __kptr_ref *ptr;
+ * }
*/
-static const char btf_str_sec[] = "\0bpf_spin_lock\0val\0cnt\0l\0bpf_timer\0timer\0t";
+static const char btf_str_sec[] = "\0bpf_spin_lock\0val\0cnt\0l\0bpf_timer\0timer\0t"
+ "\0btf_ptr\0prog_test_ref_kfunc\0ptr\0kptr\0kptr_ref"
+ "\0prog_test_member";
static __u32 btf_raw_types[] = {
/* int */
BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4), /* [1] */
@@ -638,6 +646,22 @@ static __u32 btf_raw_types[] = {
/* struct timer */ /* [5] */
BTF_TYPE_ENC(35, BTF_INFO_ENC(BTF_KIND_STRUCT, 0, 1), 16),
BTF_MEMBER_ENC(41, 4, 0), /* struct bpf_timer t; */
+ /* struct prog_test_ref_kfunc */ /* [6] */
+ BTF_STRUCT_ENC(51, 0, 0),
+ BTF_STRUCT_ENC(89, 0, 0), /* [7] */
+ /* type tag "kptr" */
+ BTF_TYPE_TAG_ENC(75, 6), /* [8] */
+ /* type tag "kptr_ref" */
+ BTF_TYPE_TAG_ENC(80, 6), /* [9] */
+ BTF_TYPE_TAG_ENC(80, 7), /* [10] */
+ BTF_PTR_ENC(8), /* [11] */
+ BTF_PTR_ENC(9), /* [12] */
+ BTF_PTR_ENC(10), /* [13] */
+ /* struct btf_ptr */ /* [14] */
+ BTF_STRUCT_ENC(43, 3, 24),
+ BTF_MEMBER_ENC(71, 11, 0), /* struct prog_test_ref_kfunc __kptr *ptr; */
+ BTF_MEMBER_ENC(71, 12, 64), /* struct prog_test_ref_kfunc __kptr_ref *ptr; */
+ BTF_MEMBER_ENC(71, 13, 128), /* struct prog_test_member __kptr_ref *ptr; */
};
static int load_btf(void)
@@ -727,6 +751,25 @@ static int create_map_timer(void)
return fd;
}
+static int create_map_kptr(void)
+{
+ LIBBPF_OPTS(bpf_map_create_opts, opts,
+ .btf_key_type_id = 1,
+ .btf_value_type_id = 14,
+ );
+ int fd, btf_fd;
+
+ btf_fd = load_btf();
+ if (btf_fd < 0)
+ return -1;
+
+ opts.btf_fd = btf_fd;
+ fd = bpf_map_create(BPF_MAP_TYPE_ARRAY, "test_map", 4, 24, 1, &opts);
+ if (fd < 0)
+ printf("Failed to create map with btf_id pointer\n");
+ return fd;
+}
+
static char bpf_vlog[UINT_MAX >> 8];
static void do_test_fixup(struct bpf_test *test, enum bpf_prog_type prog_type,
@@ -754,6 +797,7 @@ static void do_test_fixup(struct bpf_test *test, enum bpf_prog_type prog_type,
int *fixup_map_reuseport_array = test->fixup_map_reuseport_array;
int *fixup_map_ringbuf = test->fixup_map_ringbuf;
int *fixup_map_timer = test->fixup_map_timer;
+ int *fixup_map_kptr = test->fixup_map_kptr;
struct kfunc_btf_id_pair *fixup_kfunc_btf_id = test->fixup_kfunc_btf_id;
if (test->fill_helper) {
@@ -947,6 +991,13 @@ static void do_test_fixup(struct bpf_test *test, enum bpf_prog_type prog_type,
fixup_map_timer++;
} while (*fixup_map_timer);
}
+ if (*fixup_map_kptr) {
+ map_fds[22] = create_map_kptr();
+ do {
+ prog[*fixup_map_kptr].imm = map_fds[22];
+ fixup_map_kptr++;
+ } while (*fixup_map_kptr);
+ }
/* Patch in kfunc BTF IDs */
if (fixup_kfunc_btf_id->kfunc) {
diff --git a/tools/testing/selftests/bpf/test_verifier_log.c b/tools/testing/selftests/bpf/test_verifier_log.c
index 8d6918c3b4a2..70feda97cee5 100644
--- a/tools/testing/selftests/bpf/test_verifier_log.c
+++ b/tools/testing/selftests/bpf/test_verifier_log.c
@@ -11,8 +11,6 @@
#include <bpf/bpf.h>
-#include "bpf_rlimit.h"
-
#define LOG_SIZE (1 << 20)
#define err(str...) printf("ERROR: " str)
@@ -141,6 +139,9 @@ int main(int argc, char **argv)
memset(log, 1, LOG_SIZE);
+ /* Use libbpf 1.0 API mode */
+ libbpf_set_strict_mode(LIBBPF_STRICT_ALL);
+
/* Test incorrect attr */
printf("Test log_level 0...\n");
test_log_bad(log, LOG_SIZE, 0);
diff --git a/tools/testing/selftests/bpf/testing_helpers.c b/tools/testing/selftests/bpf/testing_helpers.c
index 87867f7a78c3..9695318e8132 100644
--- a/tools/testing/selftests/bpf/testing_helpers.c
+++ b/tools/testing/selftests/bpf/testing_helpers.c
@@ -6,6 +6,7 @@
#include <errno.h>
#include <bpf/bpf.h>
#include <bpf/libbpf.h>
+#include "test_progs.h"
#include "testing_helpers.h"
int parse_num_list(const char *s, bool **num_set, int *num_set_len)
@@ -69,6 +70,94 @@ int parse_num_list(const char *s, bool **num_set, int *num_set_len)
return 0;
}
+int parse_test_list(const char *s,
+ struct test_filter_set *set,
+ bool is_glob_pattern)
+{
+ char *input, *state = NULL, *next;
+ struct test_filter *tmp, *tests = NULL;
+ int i, j, cnt = 0;
+
+ input = strdup(s);
+ if (!input)
+ return -ENOMEM;
+
+ while ((next = strtok_r(state ? NULL : input, ",", &state))) {
+ char *subtest_str = strchr(next, '/');
+ char *pattern = NULL;
+ int glob_chars = 0;
+
+ tmp = realloc(tests, sizeof(*tests) * (cnt + 1));
+ if (!tmp)
+ goto err;
+ tests = tmp;
+
+ tests[cnt].subtest_cnt = 0;
+ tests[cnt].subtests = NULL;
+
+ if (is_glob_pattern) {
+ pattern = "%s";
+ } else {
+ pattern = "*%s*";
+ glob_chars = 2;
+ }
+
+ if (subtest_str) {
+ char **tmp_subtests = NULL;
+ int subtest_cnt = tests[cnt].subtest_cnt;
+
+ *subtest_str = '\0';
+ subtest_str += 1;
+ tmp_subtests = realloc(tests[cnt].subtests,
+ sizeof(*tmp_subtests) *
+ (subtest_cnt + 1));
+ if (!tmp_subtests)
+ goto err;
+ tests[cnt].subtests = tmp_subtests;
+
+ tests[cnt].subtests[subtest_cnt] =
+ malloc(strlen(subtest_str) + glob_chars + 1);
+ if (!tests[cnt].subtests[subtest_cnt])
+ goto err;
+ sprintf(tests[cnt].subtests[subtest_cnt],
+ pattern,
+ subtest_str);
+
+ tests[cnt].subtest_cnt++;
+ }
+
+ tests[cnt].name = malloc(strlen(next) + glob_chars + 1);
+ if (!tests[cnt].name)
+ goto err;
+ sprintf(tests[cnt].name, pattern, next);
+
+ cnt++;
+ }
+
+ tmp = realloc(set->tests, sizeof(*tests) * (cnt + set->cnt));
+ if (!tmp)
+ goto err;
+
+ memcpy(tmp + set->cnt, tests, sizeof(*tests) * cnt);
+ set->tests = tmp;
+ set->cnt += cnt;
+
+ free(tests);
+ free(input);
+ return 0;
+
+err:
+ for (i = 0; i < cnt; i++) {
+ for (j = 0; j < tests[i].subtest_cnt; j++)
+ free(tests[i].subtests[j]);
+
+ free(tests[i].name);
+ }
+ free(tests);
+ free(input);
+ return -ENOMEM;
+}
+
__u32 link_info_prog_id(const struct bpf_link *link, struct bpf_link_info *info)
{
__u32 info_len = sizeof(*info);
diff --git a/tools/testing/selftests/bpf/testing_helpers.h b/tools/testing/selftests/bpf/testing_helpers.h
index f46ebc476ee8..6ec00bf79cb5 100644
--- a/tools/testing/selftests/bpf/testing_helpers.h
+++ b/tools/testing/selftests/bpf/testing_helpers.h
@@ -12,3 +12,11 @@ int bpf_test_load_program(enum bpf_prog_type type, const struct bpf_insn *insns,
size_t insns_cnt, const char *license,
__u32 kern_version, char *log_buf,
size_t log_buf_sz);
+
+/*
+ * below function is exported for testing in prog_test test
+ */
+struct test_filter_set;
+int parse_test_list(const char *s,
+ struct test_filter_set *test_set,
+ bool is_glob_pattern);
diff --git a/tools/testing/selftests/bpf/verifier/calls.c b/tools/testing/selftests/bpf/verifier/calls.c
index 2e03decb11b6..743ed34c1238 100644
--- a/tools/testing/selftests/bpf/verifier/calls.c
+++ b/tools/testing/selftests/bpf/verifier/calls.c
@@ -139,6 +139,26 @@
},
},
{
+ "calls: invalid kfunc call: don't match first member type when passed to release kfunc",
+ .insns = {
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, BPF_PSEUDO_KFUNC_CALL, 0, 0),
+ BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
+ BPF_EXIT_INSN(),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, BPF_PSEUDO_KFUNC_CALL, 0, 0),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+ .result = REJECT,
+ .errstr = "kernel function bpf_kfunc_call_memb1_release args#0 expected pointer",
+ .fixup_kfunc_btf_id = {
+ { "bpf_kfunc_call_memb_acquire", 1 },
+ { "bpf_kfunc_call_memb1_release", 5 },
+ },
+},
+{
"calls: invalid kfunc call: PTR_TO_BTF_ID with negative offset",
.insns = {
BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
diff --git a/tools/testing/selftests/bpf/verifier/map_kptr.c b/tools/testing/selftests/bpf/verifier/map_kptr.c
new file mode 100644
index 000000000000..9113834640e6
--- /dev/null
+++ b/tools/testing/selftests/bpf/verifier/map_kptr.c
@@ -0,0 +1,469 @@
+/* Common tests */
+{
+ "map_kptr: BPF_ST imm != 0",
+ .insns = {
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_LD_MAP_FD(BPF_REG_6, 0),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -4),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_ST_MEM(BPF_W, BPF_REG_2, 0, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
+ BPF_EXIT_INSN(),
+ BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 1),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+ .fixup_map_kptr = { 1 },
+ .result = REJECT,
+ .errstr = "BPF_ST imm must be 0 when storing to kptr at off=0",
+},
+{
+ "map_kptr: size != bpf_size_to_bytes(BPF_DW)",
+ .insns = {
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_LD_MAP_FD(BPF_REG_6, 0),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -4),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_ST_MEM(BPF_W, BPF_REG_2, 0, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
+ BPF_EXIT_INSN(),
+ BPF_ST_MEM(BPF_W, BPF_REG_0, 0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+ .fixup_map_kptr = { 1 },
+ .result = REJECT,
+ .errstr = "kptr access size must be BPF_DW",
+},
+{
+ "map_kptr: map_value non-const var_off",
+ .insns = {
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_LD_MAP_FD(BPF_REG_6, 0),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -4),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_ST_MEM(BPF_W, BPF_REG_2, 0, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
+ BPF_EXIT_INSN(),
+ BPF_MOV64_REG(BPF_REG_3, BPF_REG_0),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_0, 0),
+ BPF_JMP_IMM(BPF_JNE, BPF_REG_2, 0, 1),
+ BPF_EXIT_INSN(),
+ BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_2, 0),
+ BPF_JMP_IMM(BPF_JLE, BPF_REG_2, 4, 1),
+ BPF_EXIT_INSN(),
+ BPF_JMP_IMM(BPF_JGE, BPF_REG_2, 0, 1),
+ BPF_EXIT_INSN(),
+ BPF_ALU64_REG(BPF_ADD, BPF_REG_3, BPF_REG_2),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_3, 0),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+ .fixup_map_kptr = { 1 },
+ .result = REJECT,
+ .errstr = "kptr access cannot have variable offset",
+},
+{
+ "map_kptr: bpf_kptr_xchg non-const var_off",
+ .insns = {
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_LD_MAP_FD(BPF_REG_6, 0),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -4),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_ST_MEM(BPF_W, BPF_REG_2, 0, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
+ BPF_EXIT_INSN(),
+ BPF_MOV64_REG(BPF_REG_3, BPF_REG_0),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_0, 0),
+ BPF_JMP_IMM(BPF_JNE, BPF_REG_2, 0, 1),
+ BPF_EXIT_INSN(),
+ BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_2, 0),
+ BPF_JMP_IMM(BPF_JLE, BPF_REG_2, 4, 1),
+ BPF_EXIT_INSN(),
+ BPF_JMP_IMM(BPF_JGE, BPF_REG_2, 0, 1),
+ BPF_EXIT_INSN(),
+ BPF_ALU64_REG(BPF_ADD, BPF_REG_3, BPF_REG_2),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_3),
+ BPF_MOV64_IMM(BPF_REG_2, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_kptr_xchg),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+ .fixup_map_kptr = { 1 },
+ .result = REJECT,
+ .errstr = "R1 doesn't have constant offset. kptr has to be at the constant offset",
+},
+{
+ "map_kptr: unaligned boundary load/store",
+ .insns = {
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_LD_MAP_FD(BPF_REG_6, 0),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -4),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_ST_MEM(BPF_W, BPF_REG_2, 0, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
+ BPF_EXIT_INSN(),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 7),
+ BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+ .fixup_map_kptr = { 1 },
+ .result = REJECT,
+ .errstr = "kptr access misaligned expected=0 off=7",
+},
+{
+ "map_kptr: reject var_off != 0",
+ .insns = {
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_LD_MAP_FD(BPF_REG_6, 0),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -4),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_ST_MEM(BPF_W, BPF_REG_2, 0, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
+ BPF_EXIT_INSN(),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0, 0),
+ BPF_JMP_IMM(BPF_JNE, BPF_REG_1, 0, 1),
+ BPF_EXIT_INSN(),
+ BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, 0),
+ BPF_JMP_IMM(BPF_JLE, BPF_REG_2, 4, 1),
+ BPF_EXIT_INSN(),
+ BPF_JMP_IMM(BPF_JGE, BPF_REG_2, 0, 1),
+ BPF_EXIT_INSN(),
+ BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_2),
+ BPF_STX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, 0),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+ .fixup_map_kptr = { 1 },
+ .result = REJECT,
+ .errstr = "variable untrusted_ptr_ access var_off=(0x0; 0x7) disallowed",
+},
+/* Tests for unreferened PTR_TO_BTF_ID */
+{
+ "map_kptr: unref: reject btf_struct_ids_match == false",
+ .insns = {
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_LD_MAP_FD(BPF_REG_6, 0),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -4),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_ST_MEM(BPF_W, BPF_REG_2, 0, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
+ BPF_EXIT_INSN(),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0, 0),
+ BPF_JMP_IMM(BPF_JNE, BPF_REG_1, 0, 1),
+ BPF_EXIT_INSN(),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 4),
+ BPF_STX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, 0),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+ .fixup_map_kptr = { 1 },
+ .result = REJECT,
+ .errstr = "invalid kptr access, R1 type=untrusted_ptr_prog_test_ref_kfunc expected=ptr_prog_test",
+},
+{
+ "map_kptr: unref: loaded pointer marked as untrusted",
+ .insns = {
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_LD_MAP_FD(BPF_REG_6, 0),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -4),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_ST_MEM(BPF_W, BPF_REG_2, 0, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
+ BPF_EXIT_INSN(),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_0, 0),
+ BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+ .fixup_map_kptr = { 1 },
+ .result = REJECT,
+ .errstr = "R0 invalid mem access 'untrusted_ptr_or_null_'",
+},
+{
+ "map_kptr: unref: correct in kernel type size",
+ .insns = {
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_LD_MAP_FD(BPF_REG_6, 0),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -4),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_ST_MEM(BPF_W, BPF_REG_2, 0, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
+ BPF_EXIT_INSN(),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_0, 0),
+ BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
+ BPF_EXIT_INSN(),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_0, 24),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+ .fixup_map_kptr = { 1 },
+ .result = REJECT,
+ .errstr = "access beyond struct prog_test_ref_kfunc at off 24 size 8",
+},
+{
+ "map_kptr: unref: inherit PTR_UNTRUSTED on struct walk",
+ .insns = {
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_LD_MAP_FD(BPF_REG_6, 0),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -4),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_ST_MEM(BPF_W, BPF_REG_2, 0, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
+ BPF_EXIT_INSN(),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_0, 0),
+ BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
+ BPF_EXIT_INSN(),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0, 16),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_this_cpu_ptr),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+ .fixup_map_kptr = { 1 },
+ .result = REJECT,
+ .errstr = "R1 type=untrusted_ptr_ expected=percpu_ptr_",
+},
+{
+ "map_kptr: unref: no reference state created",
+ .insns = {
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_LD_MAP_FD(BPF_REG_6, 0),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -4),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_ST_MEM(BPF_W, BPF_REG_2, 0, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
+ BPF_EXIT_INSN(),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_0, 0),
+ BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
+ BPF_EXIT_INSN(),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+ .fixup_map_kptr = { 1 },
+ .result = ACCEPT,
+},
+{
+ "map_kptr: unref: bpf_kptr_xchg rejected",
+ .insns = {
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_LD_MAP_FD(BPF_REG_6, 0),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -4),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_ST_MEM(BPF_W, BPF_REG_2, 0, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
+ BPF_EXIT_INSN(),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
+ BPF_MOV64_IMM(BPF_REG_2, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_kptr_xchg),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+ .fixup_map_kptr = { 1 },
+ .result = REJECT,
+ .errstr = "off=0 kptr isn't referenced kptr",
+},
+{
+ "map_kptr: unref: bpf_kfunc_call_test_kptr_get rejected",
+ .insns = {
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_LD_MAP_FD(BPF_REG_6, 0),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -4),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_ST_MEM(BPF_W, BPF_REG_2, 0, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
+ BPF_EXIT_INSN(),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
+ BPF_MOV64_IMM(BPF_REG_2, 0),
+ BPF_MOV64_IMM(BPF_REG_3, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, BPF_PSEUDO_KFUNC_CALL, 0, 0),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+ .fixup_map_kptr = { 1 },
+ .result = REJECT,
+ .errstr = "arg#0 no referenced kptr at map value offset=0",
+ .fixup_kfunc_btf_id = {
+ { "bpf_kfunc_call_test_kptr_get", 13 },
+ }
+},
+/* Tests for referenced PTR_TO_BTF_ID */
+{
+ "map_kptr: ref: loaded pointer marked as untrusted",
+ .insns = {
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_LD_MAP_FD(BPF_REG_6, 0),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -4),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_ST_MEM(BPF_W, BPF_REG_2, 0, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
+ BPF_EXIT_INSN(),
+ BPF_MOV64_IMM(BPF_REG_1, 0),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0, 8),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_this_cpu_ptr),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+ .fixup_map_kptr = { 1 },
+ .result = REJECT,
+ .errstr = "R1 type=untrusted_ptr_or_null_ expected=percpu_ptr_",
+},
+{
+ "map_kptr: ref: reject off != 0",
+ .insns = {
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_LD_MAP_FD(BPF_REG_6, 0),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -4),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_ST_MEM(BPF_W, BPF_REG_2, 0, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
+ BPF_EXIT_INSN(),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
+ BPF_MOV64_REG(BPF_REG_7, BPF_REG_0),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
+ BPF_MOV64_IMM(BPF_REG_2, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_kptr_xchg),
+ BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
+ BPF_EXIT_INSN(),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_7),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_kptr_xchg),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+ .fixup_map_kptr = { 1 },
+ .result = REJECT,
+ .errstr = "invalid kptr access, R2 type=ptr_prog_test_ref_kfunc expected=ptr_prog_test_member",
+},
+{
+ "map_kptr: ref: reference state created and released on xchg",
+ .insns = {
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_LD_MAP_FD(BPF_REG_6, 0),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -4),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_ST_MEM(BPF_W, BPF_REG_2, 0, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
+ BPF_EXIT_INSN(),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
+ BPF_MOV64_REG(BPF_REG_7, BPF_REG_0),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
+ BPF_ST_MEM(BPF_DW, BPF_REG_1, 0, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, BPF_PSEUDO_KFUNC_CALL, 0, 0),
+ BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
+ BPF_EXIT_INSN(),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_7),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_kptr_xchg),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+ .fixup_map_kptr = { 1 },
+ .result = REJECT,
+ .errstr = "Unreleased reference id=5 alloc_insn=20",
+ .fixup_kfunc_btf_id = {
+ { "bpf_kfunc_call_test_acquire", 15 },
+ }
+},
+{
+ "map_kptr: ref: reject STX",
+ .insns = {
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_LD_MAP_FD(BPF_REG_6, 0),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -4),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_ST_MEM(BPF_W, BPF_REG_2, 0, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
+ BPF_EXIT_INSN(),
+ BPF_MOV64_REG(BPF_REG_1, 0),
+ BPF_STX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, 8),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+ .fixup_map_kptr = { 1 },
+ .result = REJECT,
+ .errstr = "store to referenced kptr disallowed",
+},
+{
+ "map_kptr: ref: reject ST",
+ .insns = {
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_LD_MAP_FD(BPF_REG_6, 0),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -4),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_ST_MEM(BPF_W, BPF_REG_2, 0, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
+ BPF_EXIT_INSN(),
+ BPF_ST_MEM(BPF_DW, BPF_REG_0, 8, 0),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+ .fixup_map_kptr = { 1 },
+ .result = REJECT,
+ .errstr = "store to referenced kptr disallowed",
+},
+{
+ "map_kptr: reject helper access to kptr",
+ .insns = {
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_LD_MAP_FD(BPF_REG_6, 0),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -4),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_ST_MEM(BPF_W, BPF_REG_2, 0, 0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
+ BPF_EXIT_INSN(),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 2),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_0),
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_delete_elem),
+ BPF_EXIT_INSN(),
+ },
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+ .fixup_map_kptr = { 1 },
+ .result = REJECT,
+ .errstr = "kptr cannot be accessed indirectly by helper",
+},
diff --git a/tools/testing/selftests/bpf/verifier/ref_tracking.c b/tools/testing/selftests/bpf/verifier/ref_tracking.c
index fbd682520e47..57a83d763ec1 100644
--- a/tools/testing/selftests/bpf/verifier/ref_tracking.c
+++ b/tools/testing/selftests/bpf/verifier/ref_tracking.c
@@ -796,7 +796,7 @@
},
.prog_type = BPF_PROG_TYPE_SCHED_CLS,
.result = REJECT,
- .errstr = "reference has not been acquired before",
+ .errstr = "R1 must be referenced when passed to release function",
},
{
/* !bpf_sk_fullsock(sk) is checked but !bpf_tcp_sock(sk) is not checked */
diff --git a/tools/testing/selftests/bpf/verifier/sock.c b/tools/testing/selftests/bpf/verifier/sock.c
index 86b24cad27a7..d11d0b28be41 100644
--- a/tools/testing/selftests/bpf/verifier/sock.c
+++ b/tools/testing/selftests/bpf/verifier/sock.c
@@ -417,7 +417,7 @@
},
.prog_type = BPF_PROG_TYPE_SCHED_CLS,
.result = REJECT,
- .errstr = "reference has not been acquired before",
+ .errstr = "R1 must be referenced when passed to release function",
},
{
"bpf_sk_release(bpf_sk_fullsock(skb->sk))",
@@ -436,7 +436,7 @@
},
.prog_type = BPF_PROG_TYPE_SCHED_CLS,
.result = REJECT,
- .errstr = "reference has not been acquired before",
+ .errstr = "R1 must be referenced when passed to release function",
},
{
"bpf_sk_release(bpf_tcp_sock(skb->sk))",
@@ -455,7 +455,7 @@
},
.prog_type = BPF_PROG_TYPE_SCHED_CLS,
.result = REJECT,
- .errstr = "reference has not been acquired before",
+ .errstr = "R1 must be referenced when passed to release function",
},
{
"sk_storage_get(map, skb->sk, NULL, 0): value == NULL",
diff --git a/tools/testing/selftests/bpf/xdp_redirect_multi.c b/tools/testing/selftests/bpf/xdp_redirect_multi.c
index aaedbf4955c3..c03b3a75991f 100644
--- a/tools/testing/selftests/bpf/xdp_redirect_multi.c
+++ b/tools/testing/selftests/bpf/xdp_redirect_multi.c
@@ -10,7 +10,6 @@
#include <net/if.h>
#include <unistd.h>
#include <libgen.h>
-#include <sys/resource.h>
#include <sys/ioctl.h>
#include <sys/types.h>
#include <sys/socket.h>
diff --git a/tools/testing/selftests/bpf/xdping.c b/tools/testing/selftests/bpf/xdping.c
index c567856fd1bc..5b6f977870f8 100644
--- a/tools/testing/selftests/bpf/xdping.c
+++ b/tools/testing/selftests/bpf/xdping.c
@@ -12,7 +12,6 @@
#include <string.h>
#include <unistd.h>
#include <libgen.h>
-#include <sys/resource.h>
#include <net/if.h>
#include <sys/types.h>
#include <sys/socket.h>
@@ -89,7 +88,6 @@ int main(int argc, char **argv)
{
__u32 mode_flags = XDP_FLAGS_DRV_MODE | XDP_FLAGS_SKB_MODE;
struct addrinfo *a, hints = { .ai_family = AF_INET };
- struct rlimit r = {RLIM_INFINITY, RLIM_INFINITY};
__u16 count = XDPING_DEFAULT_COUNT;
struct pinginfo pinginfo = { 0 };
const char *optstr = "c:I:NsS";
@@ -167,10 +165,8 @@ int main(int argc, char **argv)
freeaddrinfo(a);
}
- if (setrlimit(RLIMIT_MEMLOCK, &r)) {
- perror("setrlimit(RLIMIT_MEMLOCK)");
- return 1;
- }
+ /* Use libbpf 1.0 API mode */
+ libbpf_set_strict_mode(LIBBPF_STRICT_ALL);
snprintf(filename, sizeof(filename), "%s_kern.o", argv[0]);
diff --git a/tools/testing/selftests/bpf/xdpxceiver.c b/tools/testing/selftests/bpf/xdpxceiver.c
index 5f8296d29e77..cfcb031323c5 100644
--- a/tools/testing/selftests/bpf/xdpxceiver.c
+++ b/tools/testing/selftests/bpf/xdpxceiver.c
@@ -90,7 +90,6 @@
#include <string.h>
#include <stddef.h>
#include <sys/mman.h>
-#include <sys/resource.h>
#include <sys/types.h>
#include <sys/queue.h>
#include <time.h>
@@ -1448,14 +1447,13 @@ static void ifobject_delete(struct ifobject *ifobj)
int main(int argc, char **argv)
{
- struct rlimit _rlim = { RLIM_INFINITY, RLIM_INFINITY };
struct pkt_stream *pkt_stream_default;
struct ifobject *ifobj_tx, *ifobj_rx;
struct test_spec test;
u32 i, j;
- if (setrlimit(RLIMIT_MEMLOCK, &_rlim))
- exit_with_error(errno);
+ /* Use libbpf 1.0 API mode */
+ libbpf_set_strict_mode(LIBBPF_STRICT_ALL);
ifobj_tx = ifobject_create();
if (!ifobj_tx)